MapReduce程序打jar要注意的事项
2014-04-13 23:11
211 查看
package cmd; import java.io.IOException; import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; public class WordCountApp extends Configured implements Tool{ public static String FILE_PATH=""; public static String OUT_PATH=""; public int run(String[] args) throws Exception { FILE_PATH = args[0]; OUT_PATH = args[1]; Job job = new Job(new Configuration(), WordCountApp.class.getSimpleName()); job.setJarByClass(WordCountApp.class); final Configuration conf = new Configuration(); final FileSystem fileSystem = FileSystem.get(new URI(OUT_PATH), conf); if(fileSystem.exists(new Path(OUT_PATH))){ fileSystem.delete(new Path(OUT_PATH), true); } //1.1从哪里读取数据 FileInputFormat.setInputPaths(job, FILE_PATH); //把每一行数据解析成一个键值对 job.setInputFormatClass(TextInputFormat.class); //1.2自定义函数 job.setMapperClass(MyMapReduce.class); job.setMapOutputKeyClass(Text.class); job.setPartitionerClass(HashPartitioner.class); //1.3分区 job.setPartitionerClass(HashPartitioner.class); job.setNumReduceTasks(1); //1.4排序,分组 //1.5归约 //2.1框架自己完成 //2.2自定义reduce函数 job.setReducerClass(MyReduce.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); //2.3写入hdfs中去 FileOutputFormat.setOutputPath(job, new Path(OUT_PATH)); job.setOutputFormatClass(TextOutputFormat.class); job.waitForCompletion(true); return 0; } public static void main(String[] args) throws Exception { ToolRunner.run(new WordCountApp(), args); } static class MyMapReduce extends Mapper<LongWritable, Text, Text, LongWritable>{ protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String[] splits = line.split("\t"); for(String word:splits){ context.write(new Text(word),new LongWritable(1)); } } } static class MyReduce extends Reducer<Text, LongWritable, Text, LongWritable>{ protected void reduce(Text key, Iterable<LongWritable> values, Context context ) throws IOException, InterruptedException { long sum = 0L; for(LongWritable value: values) { sum+=value.get(); } context.write(key, new LongWritable(sum)); } } }
如上图所示
1,继承Configured类
2,实现Tool接口
3,重写run方法并把输入,输出路径作为参数(数组)给传进来
4,job.setJarByClass(WordCountApp.class);这句一定要写,否则会报错
cmd
hadoopfs jar jar包名。后戳 输入路径 输出路径
见图如下:
[root@simon Downloads]# hadoop jar jar.jar hdfs://simon:9000/hello hdfs://simon:9000/out
然后回车就OK了
相关文章推荐
- mapreduce程序编写注意事项
- 关于在hadoop的mapreduce程序中使用GroupingComparator组件的注意事项
- hadoop程序在MyEclipse中打jar时要注意的事项
- x64位windows上程序开发的注意事项
- 在程序中用new ClassPathXmlApplicationContext()获取Spring的上下文环境注意事项
- 微信对接注意事项-jar设置
- 用Eclipse跑Hadoop程序的注意事项
- 32位程序访问64位 system32文件夹时注意事项
- ABAP初学者程序规范注意事项1
- 提高ASP.NET WEB程序的几点注意事项
- 第一次做Java程序注意事项
- 【转】在64位机上跑32位程序注意事项
- MFC程序编写注意事项
- MapReduce程序打成jar包在远程服务器运行
- Matlab中计算量较大的程序运行管理注意事项
- 编写跨平台Java程序注意事项---摘抄
- 实现Java程序跨平台运行十二个注意事项
- iOS之程序上架注意事项
- PHP网站安装程序制作的原理、步骤、注意事项和示例代码
- 引用jar包注意事项