Container exited with a non-zero exit code 1 .Failing this attempt.. Failing the application.
2015-11-18 15:49
633 查看
简单的代码实现不进行详细的说明:
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
public class ImportToHbase {
@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {
final Configuration configuration = new Configuration();
configuration.set("hbase.zookeeper.quorum", "master");
configuration.set(TableOutputFormat.OUTPUT_TABLE, "test3");
configuration.set("dfs.socket.timeout", "180000");
final Job job = new Job(configuration, ImportToHbase.class.getSimpleName());
job.setJarByClass(ImportToHbase.class);
job.setMapperClass(MyMap.class);
job.setReducerClass(MyReducer.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TableOutputFormat.class);
FileInputFormat.setInputPaths(job, "hdfs://master:9000/bbblll");
System.exit(job.waitForCompletion(true)?0:1);
}
static class MyMap extends Mapper<LongWritable, Text, IntWritable, Text>{
Text v2 = new Text();
int i=0;
@Override
protected void map(LongWritable key, Text value,
Context context)
throws IOException, InterruptedException {
try {
i+=1;
int rowKey =i;
v2.set(value.toString());
context.write(new IntWritable(rowKey), v2);
} catch (NumberFormatException e) {
System.out.println("出错了"+i+" "+e.getMessage());
}
}
}
static class MyReducer extends TableReducer<IntWritable, Text, NullWritable>{
@Override
protected void reduce(IntWritable k2, Iterable<Text> v2s,
Context context)
throws IOException, InterruptedException {
for (Text text : v2s) {
final String[] splited = text.toString().split("\t");
final Put put = new Put(Bytes.toBytes(k2.toString()));
put.add(Bytes.toBytes("artitle"), Bytes.toBytes("tile"), Bytes.toBytes(splited[1]));
put.add(Bytes.toBytes("artitle"), Bytes.toBytes("tag"), Bytes.toBytes(splited[2]));
context.write(NullWritable.get(), put);
}
}
}
}
我主要说一下在运行的过程中出现的一些错误:(在widows的eclipse中--导入编程所需要的包)
这个错误是由于资源的分配出现的,所以我对于yarn-site.xml和mapred-site.xml进行了修改(这个地方在网上搜了好久,看懂了一些原理,但是没有具体的解决方法)---一下仅说出自己的理由:
mapred-site.xml:
yarn-sit.xml:
在之前配置的前面加上vix.
但是在配置之后,由于在HBASE的表的问题上出现了一点小问题,经过对表进行改进,从而达到了目的
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
public class ImportToHbase {
@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {
final Configuration configuration = new Configuration();
configuration.set("hbase.zookeeper.quorum", "master");
configuration.set(TableOutputFormat.OUTPUT_TABLE, "test3");
configuration.set("dfs.socket.timeout", "180000");
final Job job = new Job(configuration, ImportToHbase.class.getSimpleName());
job.setJarByClass(ImportToHbase.class);
job.setMapperClass(MyMap.class);
job.setReducerClass(MyReducer.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TableOutputFormat.class);
FileInputFormat.setInputPaths(job, "hdfs://master:9000/bbblll");
System.exit(job.waitForCompletion(true)?0:1);
}
static class MyMap extends Mapper<LongWritable, Text, IntWritable, Text>{
Text v2 = new Text();
int i=0;
@Override
protected void map(LongWritable key, Text value,
Context context)
throws IOException, InterruptedException {
try {
i+=1;
int rowKey =i;
v2.set(value.toString());
context.write(new IntWritable(rowKey), v2);
} catch (NumberFormatException e) {
System.out.println("出错了"+i+" "+e.getMessage());
}
}
}
static class MyReducer extends TableReducer<IntWritable, Text, NullWritable>{
@Override
protected void reduce(IntWritable k2, Iterable<Text> v2s,
Context context)
throws IOException, InterruptedException {
for (Text text : v2s) {
final String[] splited = text.toString().split("\t");
final Put put = new Put(Bytes.toBytes(k2.toString()));
put.add(Bytes.toBytes("artitle"), Bytes.toBytes("tile"), Bytes.toBytes(splited[1]));
put.add(Bytes.toBytes("artitle"), Bytes.toBytes("tag"), Bytes.toBytes(splited[2]));
context.write(NullWritable.get(), put);
}
}
}
}
我主要说一下在运行的过程中出现的一些错误:(在widows的eclipse中--导入编程所需要的包)
这个错误是由于资源的分配出现的,所以我对于yarn-site.xml和mapred-site.xml进行了修改(这个地方在网上搜了好久,看懂了一些原理,但是没有具体的解决方法)---一下仅说出自己的理由:
mapred-site.xml:
yarn-sit.xml:
在之前配置的前面加上vix.
但是在配置之后,由于在HBASE的表的问题上出现了一点小问题,经过对表进行改进,从而达到了目的
相关文章推荐
- Android中ActivityManager的使用案例
- Android中的Handler的具体用法
- ios app 实现热更新(无需发新版本实现app添加新功能)
- iOS框架结构
- iOS开发 如何让navigationBar不半透明
- Android apk签名算法解析
- 1:android Studio的快捷键使用
- 微信开发总结小站
- iOS开发iOS8扩展App Extension 中文版
- android 使用ToggleButton实现开关效果
- android下拉菜单spinner的使用方法
- iOS控件-3级城市列表-plist版
- 创客路上,跟着嘻多猴快乐出发!
- Android LayoutInflater原理分析,带你一步步深入了解View(一)
- 非原生APP开发中可以用到的JQ插件
- Android数据存储方式
- iOS quartz 画双层空心圆,带百分比进度条
- Android中EditText的setError文字不显示的问题
- Android布局方式
- Mac iOS RSA 私钥和公钥的生成