WordCount代码
2017-10-16 19:20
148 查看
package com.zhiyou.bd17.mr;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
// 定义map
public static class WordCountMap extends Mapper<LongWritable, Text, Text, IntWritable>{
private String[] infos;
private Text oKey = new Text();
private IntWritable oValue = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//解析一行数据,转换成一个单词组成的数组
infos = value.toString().split("\\s");
for (String i : infos) {
// 把单词形成一个kv对发送给reducer(单词,1)
oKey.set(i);
context.write(oKey, oValue);
}
}
}
//定义reducer
public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private int sum;
private IntWritable oValue = new IntWritable(0);
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
// 输出kv(单词,单词的计数)
oValue.set(sum);
context.write(key, oValue);
}
}
//组装一个job到mr引擎上执行
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException{
// 构建一个configuration,用来配置hdfs 的位置,和mr的各项参数
Configuration configuration = new Configuration();
// 创建job对象
Job job = Job.getInstance(configuration);
job.setJarByClass(WordCount.class);
job.setJobName("第一个mr作业:wordcount");
// 配置mr执行类
job.setMapperClass(WordCountMap.class);
//添加combinner,可以不写
// job.setCombinerClass(WordCountReducer.class);
job.setReducerClass(WordCountReducer.class);
// 设置输出kv类型(mapper和reduce输出类型一致时,可不写)
// job.setMapOutputKeyClass(Text.class);
// job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//设置数据源(待处理)
Path inputPath = new Path("/README.txt");
FileInputFormat.addInputPath(job, inputPath);
// 设置目标数据的存放位置
Path outputPath = new Path("/bd17/output/wordcount1");
outputPath.getFileSystem(configuration).delete(outputPath, true);
FileOutputFormat.setOutputPath(job, outputPath);
// 启动作业,分布式计算提交给mr引擎
boolean result = job.waitForCompletion(true);
//如果job运行成功了,我们的程序就会正常退出
System.exit(result?0:1);
}
}
执行结果:
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
// 定义map
public static class WordCountMap extends Mapper<LongWritable, Text, Text, IntWritable>{
private String[] infos;
private Text oKey = new Text();
private IntWritable oValue = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//解析一行数据,转换成一个单词组成的数组
infos = value.toString().split("\\s");
for (String i : infos) {
// 把单词形成一个kv对发送给reducer(单词,1)
oKey.set(i);
context.write(oKey, oValue);
}
}
}
//定义reducer
public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private int sum;
private IntWritable oValue = new IntWritable(0);
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
// 输出kv(单词,单词的计数)
oValue.set(sum);
context.write(key, oValue);
}
}
//组装一个job到mr引擎上执行
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException{
// 构建一个configuration,用来配置hdfs 的位置,和mr的各项参数
Configuration configuration = new Configuration();
// 创建job对象
Job job = Job.getInstance(configuration);
job.setJarByClass(WordCount.class);
job.setJobName("第一个mr作业:wordcount");
// 配置mr执行类
job.setMapperClass(WordCountMap.class);
//添加combinner,可以不写
// job.setCombinerClass(WordCountReducer.class);
job.setReducerClass(WordCountReducer.class);
// 设置输出kv类型(mapper和reduce输出类型一致时,可不写)
// job.setMapOutputKeyClass(Text.class);
// job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//设置数据源(待处理)
Path inputPath = new Path("/README.txt");
FileInputFormat.addInputPath(job, inputPath);
// 设置目标数据的存放位置
Path outputPath = new Path("/bd17/output/wordcount1");
outputPath.getFileSystem(configuration).delete(outputPath, true);
FileOutputFormat.setOutputPath(job, outputPath);
// 启动作业,分布式计算提交给mr引擎
boolean result = job.waitForCompletion(true);
//如果job运行成功了,我们的程序就会正常退出
System.exit(result?0:1);
}
}
执行结果:
相关文章推荐
- Spark--02WordCount代码解析
- Hadoop WordCount代码
- 010-spark standalone模式Scala版本WordCount代码
- spark streaming 接收 kafka 数据java代码WordCount示例
- 第四篇:WordCount运行原理结合代码详细分析
- Hadoop的WordCount代码解析
- 【3-3】Wordcount代码编写
- 分别用Java、Scala、spark-shell开发wordcount程序及测试代码
- 查看jar里面到底什么鬼~(hadoop学习wordcount程序代码查看)
- 第一个storm代码-wordcount-本地模式
- 3-3 wordcount代码编写 mapper+reducer,wordcount
- STORM_0006_第二个storm_topology:WordCountTopology的代码与运行
- wordcount代码分析
- WordCount代码实现
- MapReduce WordCount 代码初探
- spark streaming 接收 kafka 数据java代码WordCount示例
- Hadoop入门—WordCount代码分析
- wordcount和sort代码
- 纯代码视角看分布式运算(上(wordcount)
- spark streaming 接收 kafka 数据java代码WordCount示例