hadoop mapreduce模式读取sequence文件
2015-06-03 10:08
393 查看
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.mahout.fpm.pfpgrowth.convertors.string.TopKStringPatterns;
public class ReadSeqFile {
String uri = "/home/hadoop/fpdir/output/part-r-00000";
private static SequenceFile.Reader reader = null;
private static Configuration conf = new Configuration();
public static class ReadFileMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value,Context context) {
Text text = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
TopKStringPatterns k = (TopKStringPatterns)ReflectionUtils.newInstance(reader.getValueClass(), conf);
try {
while (reader.next(text, k)) {
//System.out.printf("%s\t%s\n", text, k);
context.write(text, new Text(k.toString()));
//注意这里使用的reader读取的,而不是map方法中的key value
}
} catch (IOException e1) {
e1.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
/**
* @param args
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
Job job = new Job(conf,"read seq file");
job.setJarByClass(ReadSeqFile.class);
job.setMapperClass(ReadFileMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
//配置输出格式
Path path = new Path("/home/hadoop/fpdir/output/part-r-00000");
FileSystem fs = FileSystem.get(conf);
reader = new SequenceFile.Reader(fs, path, conf);
FileInputFormat.addInputPath(job, path);
FileOutputFormat.setOutputPath(job, new Path("/home/hadoop/fpdir/testReadSeq"));
System.exit(job.waitForCompletion(true)?0:1);
}
}
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.mahout.fpm.pfpgrowth.convertors.string.TopKStringPatterns;
public class ReadSeqFile {
String uri = "/home/hadoop/fpdir/output/part-r-00000";
private static SequenceFile.Reader reader = null;
private static Configuration conf = new Configuration();
public static class ReadFileMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value,Context context) {
Text text = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
TopKStringPatterns k = (TopKStringPatterns)ReflectionUtils.newInstance(reader.getValueClass(), conf);
try {
while (reader.next(text, k)) {
//System.out.printf("%s\t%s\n", text, k);
context.write(text, new Text(k.toString()));
//注意这里使用的reader读取的,而不是map方法中的key value
}
} catch (IOException e1) {
e1.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
/**
* @param args
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
Job job = new Job(conf,"read seq file");
job.setJarByClass(ReadSeqFile.class);
job.setMapperClass(ReadFileMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
//配置输出格式
Path path = new Path("/home/hadoop/fpdir/output/part-r-00000");
FileSystem fs = FileSystem.get(conf);
reader = new SequenceFile.Reader(fs, path, conf);
FileInputFormat.addInputPath(job, path);
FileOutputFormat.setOutputPath(job, new Path("/home/hadoop/fpdir/testReadSeq"));
System.exit(job.waitForCompletion(true)?0:1);
}
}
相关文章推荐
- 交互设计分享(一)
- hadoop单机模式读取sequence文件
- easyui 右键绑定事件
- hadoop读取 Sequence格式的文件的代码
- requirejs的配置
- 使用js控制表单重复提交(1加锁,2事件方式,3 EasyUI中解决表单重复提交)
- 使用js控制表单重复提交(1加锁,2事件方式,3 EasyUI中解决表单重复提交)
- UICollectionView cellForItemAtIndexPath not called
- ios UIView setFrame not working
- UIGestureRecognizerDelegate设置响应事件优先级
- 从客户端中检测到有潜在危险的Request.Form值的详细解决方案
- 融云 Android sdk 2.1+ 稳定版 UI 和 模块功能自定义
- UI 收集
- 奇异值(Singular value decomposition SVD)分解
- 修改UINavigationController的UINavigationItem的颜色
- IOS 开发学习30 UITableView的使用总结
- UEditor
- Core Image Programming Guide--图像编程指南
- mybatis(错误一) 项目启动时报“Result Maps collection already contains value forxxx”的解决方案
- String&StringBuilder&StringBuffer总结