承接安装系列hadoop,hive
hive 与 kylin的搭建
在原有hadoop hbase hive spark基础上搭建
hive
配置环境变量添加如下:/etc/profile和 ~/.bash_profile
export HCAT_HOME=$HIVE_HOME/hcatalog
export HIVE_CONF=$HIVE_HOME/conf
export hive_dependency=/itcast/hive/conf:/itcast/hive/lib/*:/itcast/hive/hcatalog/share/hcatalog/hive-hcatalog-pig-adapter-1.1.0-cdh5.5.1.jar:/itcast/hive/hcatalog/share/hcatalog/hive-hcatalog-core-1.1.0-cdh5.5.1.jar:/itcast/hive/hcatalog/share/hcatalog/hive-hcatalog-server-extensions-1.1.0-cdh5.5.1.jar:/itcast/hive/hcatalog/share/hcatalog/hive-hcatalog-streaming-1.1.0-cdh5.5.1.jar:/itcast/hive/lib/hive-exec-1.1.0-cdh5.5.1.jar
PATH=$PATH:$HIVE_HOME/bin:$HCAT_HOME/bin
CLASSPATH=.:$HIVE_HOME/lib
hive-site.xml中添加:
<property>
<name>hive.hwi.war.file</name>
<value>/itcast/hive/lib/hive-hwi-1.1.0-cdh5.5.1.jar</value>
</property>
kylin
配置环境变量/etc/profile和 ~/.bash_profile
export KYLIN_HOME=/itcast/kylin
PATH=$PATH:$KYLIN_HOME/bin
CLASSPATH=.:$KYLIN_HOME/lib
配置/itcast/kylin/tomcat/conf 修改catalina.properties
shared.loader=/itcast/hive/lib/*.jar
配置/itcast/kylin/conf 编辑kylin.properties
kylin.rest.servers=centos1:7070,centos2:7070,centos3:7070
kylin.hbase.cluster.fs=hdfs://mycluster/apps/hbase/data
kylin.route.hive.enabled=true
kylin.route.hive.url=jdbc:hive2://centos1:10000
kylin.monitor.hive.jdbc.connection.url=jdbc:hive2://centos1:10000
kylin.monitor.ext.log.base.dir = /tmp/kylin_log1,/tmp/kylin_log2
kylin.monitor.query.log.parse.result.table = kylin_query_log
在/itcast/kylin/conf目录下 运行如下命令
ln -s /itcast/hive/conf/hive_site.xml
注释掉hadoop-env.sh 里面的这段代码
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
# if [ "$HADOOP_CLASSPATH" ]; then
# export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
# else
# export HADOOP_CLASSPATH=$f
# fi
#done
检查部署问题
check-env.sh
find-hbase-dependency.sh
find-hive-dependency.sh
启动
kylin.sh start