使用MapReduce开发WordCount应用程序
2017-11-05 10:40
337 查看
package com.hadoop.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* 使用MapReduce开发WordCount应用程序
*/
public class WordCountApp {
/**
* Map:读取输入的文件
* Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>
*/
public static class MyMapper extends Mapper<LongWritable,Text,Text,LongWritable>{
LongWritable one = new LongWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//接收到的每一行数据
String line = value.toString();
//按照指定分隔符进行拆分
String[] words = line.split(" ");
for ( String word : words ) {
//通过上下文把map的处理结果输出
context.write(new Text(word),one);
}
}
}
/**
* Reduce: 归并操作
*/
public static class MyReducer extends Reducer<Text,LongWritable,Text,LongWritable>{
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long sum = 0;
for (LongWritable value:values) {
//求key出现的次数总和
sum += value.get();
}
//最终统计结果的输出
context.write(key,new LongWritable(sum));
}
}
/**
* 定义Driver:封装了MapReduce作业的所有信息
* @param args
*/
public static void main(String[] args) throws Exception {
//创建Configuration
Configuration configuration = new Configuration();
//准备清理已存在的输出目录
Path outputPath = new Path(args[1]);
FileSystem fileSystem = FileSystem.get(configuration);
if (fileSystem.exists(outputPath)){
fileSystem.delete(outputPath,true);
System.out.println("output file exists, but is has deleted");
}
//创建Job
Job job = Job.getInstance(configuration, "wordcount");
//设置job的处理类
job.setJarByClass(WordCountApp.class);
//设置作业处理的输入路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
//设置map相关参数
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
//设置reduce相关参数
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
//设置作业处理的输出路径
FileOutputFormat.setOutputPath(job,new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
/**
* 打包:mvn clean package -DskipTests
* 上传到服务器:scp target/hadoop-train.1.0.jar hadoop@hadoop000:~/lib
* 运行:hadoop jar /home/hadoop/lib/hadoop-train-1.0.jar com.imooc.hadoop.mapreduce.WordCountApp
* hdfs://hadoop000:8020/hello.txt hdfs://hadoop000:8020/output/wc
*
* 创建文件:mkdir wc_shell.sh
* hadoop fs -rm -r /output/wc #先删除
* hadoop jar /home/hadoop/lib/hadoop-train-1.0.jar com.imooc.hadoop.mapreduce.WordCountApp
* hdfs://hadoop000:8020/hello.txt hdfs://hadoop000:8020/output/wc
* 增加执行权限:chmod u+x wc_shell.sh
*/
}
相关文章推荐
- hadoop基础----hadoop实战(四)-----myeclipse开发MapReduce---myeclipse搭建hadoop开发环境并运行wordcount
- 知识学习——Hadoop MapReduce开发入门程序WordCount详解
- 使用python实现MapReduce的wordcount实例
- 使用Eclipse基于Maven使用Java开发WordCount程序项目
- hadoop学习(7)—— 使用yarn运行mapreduce一个简单的wordcount示例
- 在eclipse上搭建mapreduce开发环境及运行wordcount
- CDH5.0.0使用hue中的oozie编辑器创建一个wordcount的mapreduce job
- hadoop基础----hadoop实战(五)-----myeclipse开发MapReduce---WordCount例子---解析MapReduce的写法
- MapReduce基础开发之一词汇统计和排序(wordcount)
- windows10上使用Eclipse配置Hadoop开发环境详细步骤+WordCount示例
- CDH5.0.0使用hue中的oozie编辑器创建一个wordcount的mapreduce job
- CDH5.0.0使用hue中的oozie编辑器创建一个wordcount的mapreduce job
- hadoop学习之HDFS(2.1):linux下eclipse中配置hadoop-mapreduce开发环境并运行WordCount.java程序
- Spark实战----(1)使用Scala开发本地测试的Spark WordCount程序
- Spark学习(二):使用Spark开发wordcount程序
- 使用eclipse开发spark程序 wordcount 事例
- hbase和mapreduce开发 WordCount
- Windows 使用Eclipse配置连接hadoop,编译运行MapReduce --本地调试WordCount
- hadoop基础----hadoop实战(五)-----myeclipse开发MapReduce---WordCount例子---解析MapReduce的写法
- BEGINNING SHAREPOINT® 2013 DEVELOPMENT 第14章节--使用Office Services开发应用程序 WORD自动服务和新的PowerPoint自动服务