Hadoop AWS Word Count 例子
2014-05-01 05:00
471 查看
在AWS里用Elastic Map Reduce 开一个Cluster
然后登陆master node并编译以下程序:
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
public static class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private final IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while(tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
context.write(word, one);
}
}
}
public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for(IntWritable value : values) {
sum += value.get();
}
context.write(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf, "Word Count hadoop-0.20");
//setting the class names
job.setJarByClass(WordCount.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
//setting the output data type classes
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//to accept the hdfs input and outpur dir at run time
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
设置:
export CLASSPATH=$CLASSPATH:/home/hadoop/*:/home/hadoop/lib/*:'.'
javac WordCount.java
jar cvf WordCount.jar *.class
hadoop jar WordCount.jar WordCount s3://15-319-s13/book-dataset/pg_00 /output
运行成功后,因为output文件夹在Hadoop FS下,所以可以这样查看:
hadoop fs -cat /output/part-r-00000 | less
主要参考:
http://kickstarthadoop.blogspot.com/2011/04/word-count-hadoop-map-reduce-example.html
http://kickstarthadoop.blogspot.com/2011/05/word-count-example-with-hadoop-020.html
然后登陆master node并编译以下程序:
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
public static class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private final IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while(tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
context.write(word, one);
}
}
}
public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for(IntWritable value : values) {
sum += value.get();
}
context.write(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf, "Word Count hadoop-0.20");
//setting the class names
job.setJarByClass(WordCount.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
//setting the output data type classes
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//to accept the hdfs input and outpur dir at run time
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
设置:
export CLASSPATH=$CLASSPATH:/home/hadoop/*:/home/hadoop/lib/*:'.'
javac WordCount.java
jar cvf WordCount.jar *.class
hadoop jar WordCount.jar WordCount s3://15-319-s13/book-dataset/pg_00 /output
运行成功后,因为output文件夹在Hadoop FS下,所以可以这样查看:
hadoop fs -cat /output/part-r-00000 | less
主要参考:
http://kickstarthadoop.blogspot.com/2011/04/word-count-hadoop-map-reduce-example.html
http://kickstarthadoop.blogspot.com/2011/05/word-count-example-with-hadoop-020.html
相关文章推荐
- ./configure,make,make install的作用
- 一个电脑运行多个tomcat
- linux基础知识
- 西邮Linux兴趣小组13级纳新免试题浅析(完结)
- 用hadoop实现SimRank++算法(1)----权值转移矩阵的计算
- 实用Linux命令集锦
- Linux mint 系统升级
- Linux
- OPENCV中的数据存储
- Bash实用技巧:同时循环两个列表
- OpenGL使用libPng读取png图片
- Linux下的解压命令
- 西邮Linux兴趣小组13级纳新免试题浅析(下)
- Hadoop初学指南(6)--MapReduce的简单实例及分析
- Hadoop的整文件读取
- 网站建设:网站如此设计 怎能不独领风骚
- linux文件属性知识总结
- POJ 2443 - Set Operation
- 批量获取bin/lib链接库依赖关系的shell脚本
- Linux HotSopt虚拟机GC线程的CPU占用率