spark-1.4配置
2015-07-26 17:33
281 查看
<pre code_snippet_id="1619510" snippet_file_name="blog_20160322_1_19627" name="code" class="html"><pre code_snippet_id="1619510" snippet_file_name="blog_20160322_1_19627" name="code" class="html"><strong><span style="font-size:14px;">spark-default.conf:</span></strong>
spark.master spark://hadoop:7077 spark.eventLog.enabled true spark.eventLog.dir hdfs://hadoop:9000/spark spark.serializer org.apache.spark.serializer.KryoSerializer spark.driver.cores 2 spark.driver.memory 512m spark.executor.memory 1g spark.shuffle.consolidateFiles true spark.sql.shuffle.partitions 100 spark.driver.userClassPathFirst true spark.streaming.blockInterval 100 spark.cleaner.ttl 90000 spark.yarn.historyServer.address http://hadoop:18088 spark.history.fs.logDirectory hdfs://hadoop:9000/spark/log spark.driver.extraLibraryPath $HADOOP_HOME/lib/native spark.executor.extraLibraryPath $HADOOP_HOME/lib/native spark.tachyonStore.url tachyon://hadoop:19998 spark.tachyonStore.baseURL /home/hadoop/data/tachyon
spark-env.sh:
export SCALA_HOME=/home/hadoop/scala export SPARK_MASTER_IP=hadoop export SPARK_WORKER_MEMORY=3G export JAVA_HOME=/home/hadoop/jdk export SPARK_EXECUTOR_INSTANCES=6 export SPARK_WORKER_CORES=3 export SPARK_EXECUTOR_CORES=1 export SPARK_WORKER_INSTANCES=3 export SPARK_DRIVER_MEMORY=1G # export SPARK_CLASSPATH=$TACHYON_HOME/client/target/tachyon-client-0.6.4-jar-with-dependencies.jar:$SPARK_CLASSPATH export SPARK_CLASSPATH=/home/hadoop/spark/lib/jblas-1.2.4.jar:$SPARK_CLASSPATH
hive-site.xml:
<configuration> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://127.0.0.1:3306/hive?createDatabaseIfNotExist=true</value> <description>JDBC connect string for a JDBCmetastore</description> </property> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> <description>Driver class name for a JDBCmetastore</description> </property> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>root</value> <description>Username to use against metastoredatabase</description> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>root</value> <description>password to use against metastoredatabase</description> </property> <property> <name>hive.metastore.uris</name> <value>thrift://127.0.0.1:9083</value> <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description> </property> <property> <name>hive.server2.thrift.min.worker.threads</name> <value>3</value> </property> <property> <name>hive.server2.thrift.max.worker.threads</name> <value>20</value> </property> <property> <name>hive.server2.thrift.port</name> <value>10000</value> </property> <property> <name>hive.server2.thrift.bind.host</name> <value>hadoop</value> </property> </configuration>
core-site.xml:
<configuration> <property> <name>fs.tachyon.impl</name> <value>tachyon.hadoop.TFS</value> </property> </configuration>
mvn -Dhadoop.version=2.6.0-mr2-cdh4.6.0 -Pyarn -Phadoop-2.6 -Dhadoop.version=2.6.0 -Phive -Phive-thriftserver -Dscala-2.11 -Pspark-ganglia-lgpl -DskipTests clean package
more:
http://spark.apache.org/docs/latest/configuration.html
相关文章推荐
- Spark随谈——开发指南(译)
- Spark,一种快速数据分析替代方案
- eclipse 开发 spark Streaming wordCount
- Spark初探
- Spark Streaming初探
- 搭建hadoop/spark集群环境
- 整合Kafka到Spark Streaming——代码示例和挑战
- Spark 性能相关参数配置详解-任务调度篇
- 基于spark1.3.1的spark-sql实战-01
- 基于spark1.3.1的spark-sql实战-02
- 使用openfire,spark,fastpath webchat搭建在线咨询服务详细图文解说
- Spark源码分析(1) 从WordCount示例看Spark延迟计算原理
- spark自带示例一
- 在Tachyon上面运行Spark
- Ubuntu12.04(64bit)上部署编译运行Openfire+Spark环境
- ubuntu装spark openfire
- 开始spark之旅
- spark的几点备忘
- Scala中中下划线(_)的用法
- Spark学习资料