MapReduce 程序(1) - 求成绩平均数并且四舍五入,保留两位小数
2017-08-23 15:00
295 查看
之前在网上看了很多MapReduce程序计数的帖子,在这里跟大家分享一下自己编写的代码。
此文章适合初入门级别。
file1:
sunying 89
yangkun 95
wangjie 75
zhaode 85
xiaoming 90
file2:
sunying 85
yangkun 75
wangjie 79
zhaode 89
xiaoming 88
file3:
sunying 78
yangkun 90
wangjie 85
zhaode 80
xiaoming 93
package org.sunying.practice;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class AvgScore {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(AvgScore.class);
// 设置map类,定义输出的数据类型
job.setMapperClass(ASMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
// 设置reduce类 ,定义输出的数据类型
job.setReducerClass(ASReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 设置输入输出的文件目录
FileInputFormat.setInputPaths(job, new Path("D://hadoop-2.6.0//input//score"));
FileOutputFormat.setOutputPath(job, new Path("D://hadoop-2.6.0//output"));
// 这几行代码是清理输出结果的文件
Path path = new Path("D://hadoop-2.6.0//output");
FileSystem fileSystem = path.getFileSystem(conf);
if(fileSystem.exists(path)){
fileSystem.delete(path, true);// true表示文件里有内容时也删除文件。
}
job.waitForCompletion(true);
}
public static class ASMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 将读入的内容转成string类型并分割,输出到reduce
String[] str = value.toString().split("\t");
context.write(new Text(str[0]), new LongWritable(Long.parseLong(str[1])));
}
}
public static class ASReducer extends Reducer<Text, LongWritable, Text, Text>{
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long count = 0;
for (LongWritable l : values) {
count = count + l.get();
}
// 此处将结果四舍五入,保留两位小数。这是简单的方法。
double rr =count / 3.0;
String ff = String.format("%.2f", rr);
context.write(key, new Text(ff));
}
}
}
此文章适合初入门级别。
file1:
sunying 89
yangkun 95
wangjie 75
zhaode 85
xiaoming 90
file2:
sunying 85
yangkun 75
wangjie 79
zhaode 89
xiaoming 88
file3:
sunying 78
yangkun 90
wangjie 85
zhaode 80
xiaoming 93
package org.sunying.practice;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class AvgScore {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(AvgScore.class);
// 设置map类,定义输出的数据类型
job.setMapperClass(ASMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
// 设置reduce类 ,定义输出的数据类型
job.setReducerClass(ASReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 设置输入输出的文件目录
FileInputFormat.setInputPaths(job, new Path("D://hadoop-2.6.0//input//score"));
FileOutputFormat.setOutputPath(job, new Path("D://hadoop-2.6.0//output"));
// 这几行代码是清理输出结果的文件
Path path = new Path("D://hadoop-2.6.0//output");
FileSystem fileSystem = path.getFileSystem(conf);
if(fileSystem.exists(path)){
fileSystem.delete(path, true);// true表示文件里有内容时也删除文件。
}
job.waitForCompletion(true);
}
public static class ASMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 将读入的内容转成string类型并分割,输出到reduce
String[] str = value.toString().split("\t");
context.write(new Text(str[0]), new LongWritable(Long.parseLong(str[1])));
}
}
public static class ASReducer extends Reducer<Text, LongWritable, Text, Text>{
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long count = 0;
for (LongWritable l : values) {
count = count + l.get();
}
// 此处将结果四舍五入,保留两位小数。这是简单的方法。
double rr =count / 3.0;
String ff = String.format("%.2f", rr);
context.write(key, new Text(ff));
}
}
}
相关文章推荐
- php保留两位小数并且四舍五入 保留两位小数并且不四舍五入
- 对double数据类型的数据保留两位小数,并且进行四舍五入
- c#中的保留两位小数并且四舍五入
- PHP_保留两位小数并且四舍五入(可用于精度计算)_保留两位小数并且不四舍五入,
- PHP保留两位小数并且四舍五入及不四舍五入的方法
- PHP_保留两位小数并且四舍五入_保留两位小数并且不四舍五入
- PHP_保留两位小数并且四舍五入_保留两位小数并且不四舍五入
- PHP_保留两位小数并且四舍五入(可用于精度计算)_保留两位小数并且不四舍五入
- PHP保留两位小数并且四舍五入及不四舍五入的方法
- PHP_保留两位小数并且四舍五入_保留两位小数并且不四舍五入
- PHP_保留两位小数并且四舍五入_保留两位小数并且不四舍五入
- java实现四舍五入并且保留两位小数
- PHP_保留两位小数并且四舍五入_保留两位小数并且不四舍五入_取整
- C# 小数点后保留两位小数,四舍五入的函数及使用方法
- Java学习笔记---保留小数后两位不进行四舍五入
- 保留四舍五入后的两位小数
- Java:对double值进行四舍五入,保留两位小数的几种方法
- 保留一个实数的两位小数,从第三位实现四舍五入
- JS保留四舍五入两小数(解决保留两位小数输入1.999变成1.100的问题)
- 将浮点数保留两位小数并且向上舍入