Hadoop示例程序之单词统计MapReduce
2013-04-07 19:53
597 查看
在eclipse下新建一个map/reduce Project
1,新建文件MyMap.java
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.Hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class MyMap extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word;
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word = new Text();
word.set(tokenizer.nextToken());
context.write(word, one);
}
}
}
2,新建文件MyReduce.java:
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class MyReduce extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
3,新建一个文件MyDriver.java
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class MyDriver {
public static void main(String[] args) throws Exception,InterruptedException {
Configuration conf=new Configuration();
Job job=new Job(conf,"Hello Hadoop World");
job.setJarByClass(MyDriver.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(MyMap.class);
job.setCombinerClass(MyReduce.class);
job.setReducerClass(MyReduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path("./input/555.txt"));
FileOutputFormat.setOutputPath(job, new Path("./input/out.txt"));
job.waitForCompletion(true);
}
}
好了,见证奇迹的时刻到了,先在工程目录下创建一个目录input,并在下面新建文件555.txt,
Hello World 555 hahaha
Hello World
保存,运行java应用程序
在input下多了个文件目录:out.txt,该目录下有个文件part-r-0000文件,
打开后文件的内容是:
555 1
Hello 2
World 2
hahaha 1
算是搞定了。。。
运行中碰到包错:
org.apache.hadoop.fs.ChecksumException: Checksum error:
这个好办 只要将工程文件下的CRC数据校验文件删除就可以了
本篇文章来源于 Linux公社网站(www.linuxidc.com) 原文链接:http://www.linuxidc.com/Linux/2012-12/75494.htm
1,新建文件MyMap.java
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.Hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class MyMap extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word;
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word = new Text();
word.set(tokenizer.nextToken());
context.write(word, one);
}
}
}
2,新建文件MyReduce.java:
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class MyReduce extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
3,新建一个文件MyDriver.java
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class MyDriver {
public static void main(String[] args) throws Exception,InterruptedException {
Configuration conf=new Configuration();
Job job=new Job(conf,"Hello Hadoop World");
job.setJarByClass(MyDriver.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(MyMap.class);
job.setCombinerClass(MyReduce.class);
job.setReducerClass(MyReduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path("./input/555.txt"));
FileOutputFormat.setOutputPath(job, new Path("./input/out.txt"));
job.waitForCompletion(true);
}
}
好了,见证奇迹的时刻到了,先在工程目录下创建一个目录input,并在下面新建文件555.txt,
Hello World 555 hahaha
Hello World
保存,运行java应用程序
在input下多了个文件目录:out.txt,该目录下有个文件part-r-0000文件,
打开后文件的内容是:
555 1
Hello 2
World 2
hahaha 1
算是搞定了。。。
运行中碰到包错:
org.apache.hadoop.fs.ChecksumException: Checksum error:
这个好办 只要将工程文件下的CRC数据校验文件删除就可以了
本篇文章来源于 Linux公社网站(www.linuxidc.com) 原文链接:http://www.linuxidc.com/Linux/2012-12/75494.htm
相关文章推荐
- 【hadoop】 3002-mapreduce程序统计单词个数示例
- Hadoop示例程序之单词统计MapReduce
- Hadoop示例程序之单词统计MapReduce
- Hadoop示例程序之单词统计MapReduce
- 在hadoop上进行编写mapreduce程序,统计关键词在text出现次数
- (11) Hadoop Java 实现MapReduce HelloWord 单词统计
- 第六篇:Eclipse上运行第一个Hadoop实例 - WordCount(单词统计程序)
- Hadoop(4-1)-MapReduce程序案例-统计销售商品数量
- Hadoop2.5.2学习01--mapreduce统计单词数
- 和我一起学Hadoop(五):MapReduce的单词统计,wordcount
- (12) Hadoop Java 实现MapReduce HelloWord 单词统计 更新版
- Hadoop mapduce 统计单词编程示例
- hadoop基础教程(二) MapReduce 单词统计
- 一脸懵逼学习Hadoop中的序列化机制——流量求和统计MapReduce的程序开发案例——流量求和统计排序
- hadoop的统计单词程序WordCount提示找不到WordCount类
- Hadoop2.4.1 简单的用户手机流量统计的MapReduce程序(一)
- 在hadoop上进行编写mapreduce程序,统计关键词在text出现次数
- 运行Hadoop自带的wordcount单词统计程序
- Hadoop:使用原生python编写MapReduce来统计文本文件中所有单词出现的频率功能
- Hadoop(4-3)-MapReduce程序案例-统计每一年最高温度