本地模式使用JAVA SACLA 开发 Spark SQL DataFrame
2016-03-14 21:34
651 查看
原文件:
{"name":"Michael"}
{"name":"Andy", "age":30}
{"name":"Justin", "age":19}
java
package com.dt.sparkApps.sql;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;
public class DataFrameOps {
public static void main(String[] args) {
// TODO Auto-generated method stub
SparkConf conf=new SparkConf().setAppName("DataFrameOps").setMaster("local");
JavaSparkContext sc=new JavaSparkContext(conf);
SQLContext sqlContext=new SQLContext(sc);
//DataFrame df=sqlContext.read().json("hdfs://master:9000/library/people.json");
DataFrame df=sqlContext.read().json("G://IMFBigDataSpark2016//tesdata//people.json");
df.show();
df.printSchema();
df.select("name").show();
//select name,age+10 fom table;
df.select(df.col("name"),df.col("age").plus(10)).show();
//select * from table whee age>10
df.filter(df.col("age").gt(10)).show();
//select count(1) from table groupby age;
df.groupBy(df.col("age")).count().show();
}
}
结果
scala
package com.dt.spark.sql
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
object DataFrameOps {
def main(args: Array[String]){
val conf = new SparkConf() //创建SparkConf对象
conf.setAppName("Wow,My First Spark DataFrame App!") //设置应用程序的名称,在程序运行的监控界面可以看到名称
conf.setMaster("local") //此时,程序在本地运行,不需要安装Spark集群
val sc = new SparkContext(conf)
val sqlContext =new SQLContext(sc)
// val df =sqlContext.read.json("hdfs://master:9000/library/people.json");
val df =sqlContext.read.json("G://IMFBigDataSpark2016//tesdata//people.json");
df.show()
df.printSchema()
df.select("name").show()
df.select(df("name"), df("age")+10).show()
df.filter(df("age")>10).show()
}
}
{"name":"Michael"}
{"name":"Andy", "age":30}
{"name":"Justin", "age":19}
java
package com.dt.sparkApps.sql;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;
public class DataFrameOps {
public static void main(String[] args) {
// TODO Auto-generated method stub
SparkConf conf=new SparkConf().setAppName("DataFrameOps").setMaster("local");
JavaSparkContext sc=new JavaSparkContext(conf);
SQLContext sqlContext=new SQLContext(sc);
//DataFrame df=sqlContext.read().json("hdfs://master:9000/library/people.json");
DataFrame df=sqlContext.read().json("G://IMFBigDataSpark2016//tesdata//people.json");
df.show();
df.printSchema();
df.select("name").show();
//select name,age+10 fom table;
df.select(df.col("name"),df.col("age").plus(10)).show();
//select * from table whee age>10
df.filter(df.col("age").gt(10)).show();
//select count(1) from table groupby age;
df.groupBy(df.col("age")).count().show();
}
}
结果
scala
package com.dt.spark.sql
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
object DataFrameOps {
def main(args: Array[String]){
val conf = new SparkConf() //创建SparkConf对象
conf.setAppName("Wow,My First Spark DataFrame App!") //设置应用程序的名称,在程序运行的监控界面可以看到名称
conf.setMaster("local") //此时,程序在本地运行,不需要安装Spark集群
val sc = new SparkContext(conf)
val sqlContext =new SQLContext(sc)
// val df =sqlContext.read.json("hdfs://master:9000/library/people.json");
val df =sqlContext.read.json("G://IMFBigDataSpark2016//tesdata//people.json");
df.show()
df.printSchema()
df.select("name").show()
df.select(df("name"), df("age")+10).show()
df.filter(df("age")>10).show()
}
}
相关文章推荐
- Eclipse中常用快捷键
- Collection,List,Set和Map用法和区别
- SpringMVC——注解的使用与结果跳转方式
- 整理spring零配置(Annotation)
- spring-hibernate修改数据报错
- Java内部类详解
- java知识盲区
- Java集合类
- android 在eclipse中使用RecyclerView
- 二叉树java版
- 静态内部类,成员内部类,外部类之间的一些知识总结
- eclipse中搭建svn开发管理环境
- SpringMVC——hello SpringMVC
- java实现走迷宫
- java path classpath JAVA_HOME
- Java实现发送邮件功能(可带附件)
- java设计模式之建造者模式
- Java —内部类
- 修改 JVM 输出数组
- struts2上传下载文件