您的位置:首页 > 编程语言 > Java开发

Spring For Hadoop学习笔记(3)

2015-05-23 13:53 561 查看
绑spring跑Hadoop的案例已经跑成功,下面贴个代码。

CountMapper.java:

CountReducer.java:

<pre name="code" class="java">import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.util.StringTokenizer;

/**
* Created by hadoop on 15-5-21.
*/
public class CountMapper extends Mapper<Object, Text, Text, IntWritable> {

private static final IntWritable one = new IntWritable(1);
private Text word = new Text();

public CountMapper() {
}

public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());

while (itr.hasMoreTokens()) {
this.word.set(itr.nextToken());
context.write(this.word, one);
}

}

}



import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.Iterator;

/**
* Created by hadoop on 15-5-21.
*/
public class CountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

private IntWritable result = new IntWritable();

public CountReducer() {
}

public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;

IntWritable val;
for (Iterator i$ = values.iterator(); i$.hasNext(); sum += val
.get()) {
val = (IntWritable) i$.next();
}

this.result.set(sum);
context.write(key, this.result);
}
}

spring-config.xml:
<pre name="code" class="html"><?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:hdp="http://www.springframework.org/schema/hadoop"
xmlns:context="http://www.springframework.org/schema/context"
xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/hadoop http://www.springframework.org/schema/hadoop/spring-hadoop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-4.1.xsd">

<hdp:configuration>
fs.defaultFS=${hd.fs}
hadoop.tmp.dir=${java.io.tmpdir}
</hdp:configuration>

<hdp:job id="word-count"
input-path="hdfs://localhost:9000/input/"
output-path="hdfs://localhost:9000/output/"
mapper="com.jsnu.chw.hadoop.wordcount.CountMapper"
reducer="com.jsnu.chw.hadoop.wordcount.CountReducer"
combiner="com.jsnu.chw.hadoop.wordcount.CountReducer"
key="org.apache.hadoop.io.Text"
value="org.apache.hadoop.io.IntWritable"
/>

<hdp:job-runner id="job-runner" job-ref="word-count" run-at-startup="true"/>
<context:property-placeholder location="./hadoop.properties"/>
<context:property-placeholder location="./log4j.properties"/>

</beans>


hadoop.properties:

hd.fs=hdfs://localhost:9000
java.io.tmpdir=/home/hadoop/tmp


main.java:

import org.apache.hadoop.mapreduce.Job;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;

/**
* Created by hadoop on 15-5-21.
*/
public class main {

public static void main(String[] args) {

ApplicationContext ctx = new ClassPathXmlApplicationContext("classpath:spring-config.xml");
Job job = (Job) ctx.getBean("word-count");

}
}


wordcount就是hadoop自带的例子。


                                            
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: