承接安装系列hadoop,hbase环境
//hive 安装一个节点
1.上传tar包
2.解压
tar -zxvf apache-hive-1.2.1-bin.tar.gz -C /itcast/
3.mkdir /itcast/hive-1.2.1/logs
4./itcast/hive-1.2.1/conf/hive-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hive.querylog.location</name>
<value>/itcast/hive-1.2.1/logs</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://172.17.3.192:3306/hive?createDatabaseIfNotExist=true</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>1111</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.aux.jars.path</name>
<value>
file:///itcast/hive-1.2.1/lib/hive-hbase-handler-1.2.1.jar,file:///itcast/hive-1.2.1/lib/hbase-client-1.1.4.jar,file:///itcast/hive-1.2.1/lib/hbase-common-1.1.4-tests.jar,file:///itcast/hive-1.2.1/lib/hbase-common-1.1.4.jar,file:///itcast/hive-1.2.1/lib/hbase-protocol-1.1.4.jar,file:///itcast/hive-1.2.1/lib/hbase-server-1.1.4.jar,file:///itcast/hive-1.2.1/lib/htrace-core-3.1.0-incubating.jar,file:///itcast/hive-1.2.1/lib/guava-14.0.1.jar,file:///itcast/hive-1.2.1/lib/zookeeper-3.4.6.jar</value>
</property>
<property>
<name>hive.zookeeper.quorum</name>
<value>centos1,centos2,centos3</value>
</property>
</configuration>
5.
先删除 $HIVE_HOME/lib 有以下jar
cp $HBASE_HOME/lib/hbase-client-1.0.0-cdh5.5.1.jar $HIVE_HOME/lib
cp $HBASE_HOME/lib/hbase-common-1.0.0-cdh5.5.1-tests.jar $HIVE_HOME/lib
cp $HBASE_HOME/lib/hbase-common-1.0.0-cdh5.5.1.jar $HIVE_HOME/lib
cp $HBASE_HOME/lib/hbase-protocol-1.0.0-cdh5.5.1.jar $HIVE_HOME/lib
cp $HBASE_HOME/lib/htrace-core-3.2.0-incubating.jar $HIVE_HOME/lib
cp $HBASE_HOME/lib/zookeeper-3.4.5-cdh5.5.1.jar $HIVE_HOME/lib
安装hive和mysq完成后,将mysql的连接jar包拷贝到$HIVE_HOME/lib目录下
如果出现没有权限的问题,在mysql授权(在安装mysql的机器上执行)
mysql -uroot -p
#(执行下面的语句 *.*:所有库下的所有表 %:任何IP地址或主机都可以连接)
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '1111' WITH GRANT OPTION;
FLUSH PRIVILEGES;
6. Jline包版本不一致的问题,需要拷贝hive的lib目录中jline.2.12.jar的jar包替换掉hadoop中的
/itcast/hadoop-2.6.0/share/hadoop/yarn/lib/jline-0.9.94.jar
7.配置hive-config.sh
export JAVA_HOME=/usr/java/jdk1.7.0_45
export HIVE_HOME=/itcast/hive-1.2.1
export HADOOP_HOME=/itcast/hadoop-2.6.0
8.配置hive-env.sh
HADOOP_HOME=/itcast/hadoop-2.6.0
export HIVE_CONF_DIR=/itcast/hive-1.2.1/conf
9配置hive-log4j.properties
hive.log.dir=/itcast/hive-1.2.1/logs
10.mysql驱动放到hive/lib下
启动hive服务
hive --service hiveserver2 &
启动hive metastore 服务
hive --service metastore &
测试单节点Hbase的连接:
hive -hiveconf hbase.master=centos1:60000
DROP DATABASE IF EXISTS mondrian CASCADE;
测试集群Hbase的连接
hive -hiveconf hbase.zookeeper.quorum=centos1,centos2,centos3
登录mysql中hive库,tbls存储hive创建表信息,columns_v2表的列属性,sds表存储在哪里hdfs目录
建表(默认是内部表)
create table trade_detail(id bigint, account string, income double, expenses double, time string) row format delimited fields terminated by 't';
建分区表
create table td_part(id bigint, account string, income double, expenses double, time string) partitioned by (logdate string) row format delimited fields terminated by 't';
建外部表
create external table td_ext(id bigint, account string, income double, expenses double, time string) row format delimited fields terminated by 't' location '/td_ext';
创建分区表
普通表和分区表区别:有大量数据增加的需要建分区表
create table book (id bigint, name string) partitioned by (pubdate string) row format delimited fields terminated by 't';
分区表加载数据
load data local inpath './book.txt' overwrite into table book partition (pubdate='2010-08-22');
load data local inpath '/root/data.am' into table beauty partition (nation="USA");
select nation, avg(size) from beauties group by nation order by avg(size);
本地配置HADOOP_HOME、PATH hadoop-common-2.2.0-bin-master