您的位置:首页 > 编程语言

wordcount和sort代码

2016-08-18 22:26 155 查看
程序1:WordCount.java
package com.wordcount.test;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {

public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}

public static class IntSumCombiner extends Reducer<Text,IntWritable,Text,IntWritable>{
private IntWritable result = new IntWritable();

@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
int sum = 0;
for(IntWritable val : values){
sum += val.get();
}
result.set(sum);
context.write(key,result);
}
}

public static class IntSumReducer
extends Reducer<Text,IntWritable,IntWritable,Text> {
private IntWritable result = new IntWritable();

public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(result, key);
}
}

public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 2) {
System.err.println("Usage: wordcount <in> [<in>...] <out>");
System.exit(2);
}
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumCombiner.class);
job.setReducerClass(IntSumReducer.class);

job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);

job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(Text.class);

job.setOutputFormatClass(SequenceFileOutputFormat.class);

FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

程序2:Sort.java
package com.wordcount.test;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Sort {

public static class SimpleMapper extends Mapper<IntWritable,Text,RevertKey,Text>{
protected void map(IntWritable key, Text value, Mapper<IntWritable, Text, RevertKey, Text>.Context context)
throws IOException, InterruptedException {
RevertKey newkey = new RevertKey(key);
context.write(newkey,value);
}
}

public static class SimpleReducer extends Reducer<RevertKey,Text,Text,IntWritable>{
protected void reduce(RevertKey key, java.lang.Iterable<Text> values,
org.apache.hadoop.mapreduce.Reducer<RevertKey,Text,Text,IntWritable>.Context context) throws IOException ,InterruptedException {
for(Text val : values){
context.write(val,key.getKey());
}
};
}

public static class RevertKey implements WritableComparable<RevertKey>{
private IntWritable key;
public RevertKey(){
key = new IntWritable();
}

public RevertKey(IntWritable key){
this.key = key;
}

public IntWritable getKey(){
return key;
}
@Override
public void readFields(DataInput in) throws IOException {
key.readFields(in);
}

@Override
public void write(DataOutput out) throws IOException {
key.write(out);
}

@Override
public int compareTo(RevertKey o) {
return -key.compareTo(o.getKey());
}

}

public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 2) {
System.err.println("Usage: wordcount <in> [<in>...] <out>");
System.exit(2);
}
Job job = new Job(conf, "word count");
job.setJarByClass(Sort.class);
job.setMapperClass(SimpleMapper.class);
job.setReducerClass(SimpleReducer.class);

job.setMapOutputKeyClass(RevertKey.class);
job.setMapOutputValueClass(Text.class);

job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);

job.setInputFormatClass(SequenceFileInputFormat.class);

FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

今天在远程桌面上跑了这两个程序

导入jar包 hadoop-2.6.2 share/hadoop 中的common hdfs mapreduce yarn 各自jar包及其lib里的(都倒进去避免麻烦

导入jar包。

执行bin/hadoop jar /liyanan/Desktop/wordcounta.jar com.wordcount.test.WordCount /tmp/wordcount tmp/wordcounta
可以用bin/hadoop fs -ls tmp/
bin/hadoop fs -cat tmp/

查看,之前linux没学好

尽快补上。

执行完wordcount之后,调试sort,注意输入文件是之前输出的part-r-00000 

bin/hadoop jar /liyanan/Desktop/wordcounta.jar com.wordcount.test.Sort tmp/wordcounta/part-r-00000 tmp/sorta
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  mapreduce sort hadoop java