hadoop的WordCount例子
2014-04-24 22:47
239 查看
package cn.lmj.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
public class WordCount
{
//mapper
public static class WordCountMapper extends MapReduceBase implements Mapper<LongWritable,Text,Text,LongWritable>
{
LongWritable count = new LongWritable(1);
Text content = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, LongWritable> output, Reporter report)
throws IOException
{
//分割字符串
String str = value.toString();
String[] arr = str.split(" ");
for(String s : arr)
{
content.set(s);
output.collect(content,count);
}
}
}
//reducer
public static class WordCountReduce extends MapReduceBase implements Reducer<Text,LongWritable,Text,LongWritable>
{
@Override
public void reduce(Text key, Iterator<LongWritable> values,
OutputCollector<Text, LongWritable> output, Reporter rep)
throws IOException
{
//将相同key的value累加
long sum = 0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key,new LongWritable(sum));
}
}
public static void main(String[] args) throws Exception
{
//创建一个JobConf
JobConf conf = new JobConf(WordCount2.class);
conf.setJobName("lmj");
//设置输出类型
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
//设置Map、Combine和Reduce处理类
conf.setMapperClass(WordCountMapper.class);
conf.setCombinerClass(WordCountReduce.class);
conf.setReducerClass(WordCountReduce.class);
//设置输入类型
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
//设置输入和输出目录
FileInputFormat.setInputPaths(conf,new Path("/aaa/hadoop.txt"));
FileOutputFormat.setOutputPath(conf,new Path("/aaa/output"));
//启动jobConf
JobClient.runJob(conf);
}
}
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
public class WordCount
{
//mapper
public static class WordCountMapper extends MapReduceBase implements Mapper<LongWritable,Text,Text,LongWritable>
{
LongWritable count = new LongWritable(1);
Text content = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, LongWritable> output, Reporter report)
throws IOException
{
//分割字符串
String str = value.toString();
String[] arr = str.split(" ");
for(String s : arr)
{
content.set(s);
output.collect(content,count);
}
}
}
//reducer
public static class WordCountReduce extends MapReduceBase implements Reducer<Text,LongWritable,Text,LongWritable>
{
@Override
public void reduce(Text key, Iterator<LongWritable> values,
OutputCollector<Text, LongWritable> output, Reporter rep)
throws IOException
{
//将相同key的value累加
long sum = 0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key,new LongWritable(sum));
}
}
public static void main(String[] args) throws Exception
{
//创建一个JobConf
JobConf conf = new JobConf(WordCount2.class);
conf.setJobName("lmj");
//设置输出类型
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
//设置Map、Combine和Reduce处理类
conf.setMapperClass(WordCountMapper.class);
conf.setCombinerClass(WordCountReduce.class);
conf.setReducerClass(WordCountReduce.class);
//设置输入类型
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
//设置输入和输出目录
FileInputFormat.setInputPaths(conf,new Path("/aaa/hadoop.txt"));
FileOutputFormat.setOutputPath(conf,new Path("/aaa/output"));
//启动jobConf
JobClient.runJob(conf);
}
}
相关文章推荐
- eclipse中运行Hadoop2.6.0的WordCount例子
- Hadoop的测试例子WordCount(含效果图)
- Hadoop AWS Word Count 例子
- hadoop2.7.3 使用wordcount的一个例子
- Hadoop的测试例子WordCount(含效果图)
- hadoop学习之HDFS(2.6):wordcount例子代码详细解读
- centos6.5配置Hadoop环境,运行wordcount例子
- 学习Hadoop MapReduce与WordCount例子分析
- 分布式环境搭建redhat7+hadoop2.6.1+jdk1.8+WordCount成功运行例子
- Hadoop测试例子wordcount
- 分析Hadoop自带WordCount例子的执行过程(1)
- 运行hadoop自带的wordcount例子
- Hadoop伪分布式运行wordcount例子
- Hadoop1.x 的MapReduce 简单例子WordCount
- hadoop第一个例子wordcount学习
- hadoop学习之HDFS(2.5):windows下eclipse远程连接linux下的hadoop集群并测试wordcount例子
- 关于hadoop1.0.4运行自带例子WordCount内存溢出问题。
- hadoop 2.7.4 下运行WordCount例子笔记
- [Linux][Hadoop] 运行WordCount例子
- Hadoop例子中WordCount参数分析