您的位置:首页 > 其它

我的第一个spark workcount程序

2015-08-18 13:58 302 查看
1、建立maven项目

pom.xml内容

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<groupId>com.test</groupId>

<artifactId>spark</artifactId>

<version>0.0.1</version>

<packaging>jar</packaging>

<name>spark</name>

<url>http://maven.apache.org</url>

<properties>

<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

</properties>

<repositories>

<repository>

<id>localRepository</id>

<url>file://${localRepository}</url>

</repository>

<repository>

<id>codehausSnapshots</id>

<url>https://repository.apache.org/content/repositories/releases</url>

</repository>

<repository>

<snapshots>

<enabled>false</enabled>

</snapshots>

<id>cloudra</id>

<name>cloudera-repos</name>

<url>https://repository.cloudera.com/artifactory/cloudera-repos</url>

</repository>

<repository>

<snapshots />

<id>snapshots</id>

<name>cloudera-repos</name>

<url>https://repository.cloudera.com/artifactory/cloudera-repos</url>

</repository>

<repository>

<id>spring-release</id>

<name>Spring Release Repository</name>

<url>http://repo.spring.io/libs-release</url>

</repository>

</repositories>

<!-- 子模块会继承 -->

<build>

<plugins>

<plugin>

<groupId>org.apache.maven.plugins</groupId>

<artifactId>maven-compiler-plugin</artifactId>

<version>3.1</version>

<configuration>

<source>1.7</source>

<target>1.7</target>

</configuration>

</plugin>

</plugins>

</build>

<dependencies>

<dependency>

<groupId>org.scala-lang</groupId>

<artifactId>scala-library</artifactId>

<version>2.10.4</version>

</dependency>

<dependency>

<groupId>org.apache.spark</groupId>

<artifactId>spark-core_2.10</artifactId>

<version>1.3.0-cdh5.4.0</version>

</dependency>

<dependency>

<groupId>org.apache.spark</groupId>

<artifactId>spark-mllib_2.10</artifactId>

<version>1.3.0-cdh5.4.0</version>

</dependency>

<dependency>

<groupId>org.apache.hadoop</groupId>

<artifactId>hadoop-common</artifactId>

<version>2.6.0-cdh5.4.0</version>

</dependency>

<dependency>

<groupId>org.apache.hadoop</groupId>

<artifactId>hadoop-hdfs</artifactId>

<version>2.6.0-cdh5.4.0</version>

</dependency>

<dependency>

<groupId>junit</groupId>

<artifactId>junit</artifactId>

<version>3.8.1</version>

<scope>test</scope>

</dependency>

</dependencies>

</project>

2、 代码:

package com.test.spark;

import java.util.Arrays;

import java.util.List;

import java.util.regex.Pattern;

import org.apache.spark.SparkConf;

import org.apache.spark.api.java.JavaPairRDD;

import org.apache.spark.api.java.JavaRDD;

import org.apache.spark.api.java.JavaSparkContext;

import org.apache.spark.api.java.function.FlatMapFunction;

import org.apache.spark.api.java.function.Function2;

import org.apache.spark.api.java.function.PairFunction;

import scala.Tuple2;

public final class JavaWordCount {

private static final Pattern SPACE = Pattern.compile(" ");

public static void main(String[] args) throws Exception {

if (args.length < 1) {

System.err.println("Usage: JavaWordCount <file>");

System.exit(1);

}

SparkConf sparkConf = new SparkConf().setAppName("JavaWordCount");

JavaSparkContext ctx = new JavaSparkContext(sparkConf);

JavaRDD<String> lines = ctx.textFile(args[0], 1);

JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {

public Iterable<String> call(String s) {

return Arrays.asList(SPACE.split(s));

}

});

JavaPairRDD<String, Integer> ones = words.mapToPair(new PairFunction<String, String, Integer>() {

public Tuple2<String, Integer> call(String s) {

return new Tuple2<String, Integer>(s, 1);

}

});

JavaPairRDD<String, Integer> counts = ones.reduceByKey(new Function2<Integer, Integer, Integer>() {

public Integer call(Integer i1, Integer i2) {

return i1 + i2;

}

});

List<Tuple2<String, Integer>> output = counts.collect();

for (Tuple2<?, ?> tuple : output) {

System.out.println(tuple._1() + ": " + tuple._2());

}

ctx.stop();

}

}

3、使用maven命令

A 先下载jar文件

mvn clean install

B 把项目打包成jar文件

mvn clean package

4、将jar文件上传linux 然后使用spark运行

最后一个是分词统计的文件目录 需要上传至hdfs上面

spark-submit --master local --class com.test.spark.JavaWordCount spark-0.0.1.jar /user/cloudera/caojian/oozie_coordinator/1--FourteenJob/coordinator.xml
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: