文档编写目的
本文主要介绍如何使用Java代码访问Kerberos环境下的Phoenix5.0。
- 内容概述
- 代码编写
- 验证
- 总结
- 测试环境
- CM和CDH版本均为7.1.1
- Phoenix版本为5.0.0
- 集群已启用Kerberos
代码编写
1.1 集群环境
CM和CDH版本均为7.1.1
集群已启用Kerberos认证
使用Phoenix查看HBase表情况
代码语言:javascript复制phoenix-sqlline
!tables
1.2 编写Java代码
创建一个Maven工程pro-phoenix
添加Maven依赖,pom.xml文件如下:
代码语言:javascript复制<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.apress.phoenix</groupId>
<artifactId>pro-phoenix</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<!--<phoenix.version>5.0.0-HBase-2.0</phoenix.version>-->
<phoenix.version>5.0.0-HBase-2.0</phoenix.version>
<hadoop.version>2.7.1</hadoop.version>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-core</artifactId>
<version>${phoenix.version}</version>
<exclusions>
<exclusion>
<artifactId>hadoop-auth</artifactId>
<groupId>org.apache.hadoop</groupId>
</exclusion>
<exclusion>
<artifactId>hadoop-hdfs</artifactId>
<groupId>org.apache.hadoop</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<artifactId>hadoop-auth</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<artifactId>hadoop-hdfs</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<artifactId>hadoop-common</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>${hadoop.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
</configuration>
</plugin>
</plugins>
</build>
</project>
新建Kerberos用户hbase/_HOST@HADOOP.COM,并导出keytab文件
代码语言:javascript复制kadmin.local
addprinc hbase/_HOST@HADOOP.COM
xst -norandkey -k hbasehost.keytab hbase/_HOST@HADOOP.COM
在项目工程目录下创建conf目录,并将krb5.conf和keytab文件拷贝到该目录下
hbase-site.xml文件拷贝到Maven工程下的resources目录下
在工程下创建测试类PhoenixKrbTest.java,代码如下:
代码语言:javascript复制package com.apress.phoenix.chapter3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Logger;
import org.apache.phoenix.jdbc.PhoenixConnection;
import java.io.IOException;
import java.sql.*;
public class PhoenixKrbTest {
private static Logger log = Logger.getLogger(PhoenixKrbTest.class);
/**
* 获取连接
*/
public Connection getConn() {
// 连接hadoop环境,进行 Kerberos认证
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "Kerberos");
// linux 环境会默认读取/etc/nokrb.cnf文件,win不指定会默认读取C:/Windows/krb5.ini
/*if (System.getProperty("os.name").toLowerCase().startsWith("win")) {
System.setProperty("java.security.krb5.conf", "/conf/krb5.conf");
}*/
System.setProperty("java.security.krb5.conf", "conf/krb5.conf");
UserGroupInformation.setConfiguration(conf);
Connection conn = null;
try {
UserGroupInformation.loginUserFromKeytab("hbase/_HOST@HADOOP.COM", "conf/hbasehost.keytab");
Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
// kerberos环境下Phoenix的jdbc字符串为 jdbc:phoenix:zk:2181:/znode:principal:keytab
String url = "jdbc:phoenix:cdp2.hadoop.com:2181:/hbase:hbase/_HOST@HADOOP.COM:conf/hbasehost.keytab";
conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
log.error(e.getMessage());
e.printStackTrace();
} catch (SQLException e1) {
log.error(e1.getMessage());
e1.printStackTrace();
} catch (IOException e2) {
log.error(e2.getMessage());
e2.printStackTrace();
}
return conn;
}
/**
* 对表执行操作
* 通过phoenix 创建表、插入数据、索引、查询数据
*/
public void operTable() {
Connection conn = getConn();
ResultSet rs = null;
Statement stmt = null;
try {
stmt = conn.createStatement();
stmt.execute("DROP TABLE if EXISTS testjdbc");
stmt.execute("CREATE TABLE testjdbc (id INTEGER NOT NULL PRIMARY KEY, content VARCHAR)");
// 创建二级索引
// stmt.execute("create index test_idx on testjdbc(content)");
// 循环插入数据
for (int i = 1; i <= 100; i ) {
stmt.executeUpdate("upsert INTO testjdbc VALUES (" i ",'The num is " i "')");
}
conn.commit();
PreparedStatement statement = conn.prepareStatement("SELECT * FROM testjdbc limit 10");
rs = statement.executeQuery();
while (rs.next()) {
log.info("-------------The num is ---------------" rs.getInt(1));
String id = rs.getString("id");
String content = rs.getString("content");
System.out.println("id = " id "; " "content = " content);
}
} catch (SQLException e) {
log.error(e.getMessage());
e.printStackTrace();
} finally {
closeRes(conn, stmt, rs);
}
}
/**
* 关闭资源
*
* @param conn
* @param statement
* @param rs
*/
public void closeRes(Connection conn, Statement statement, ResultSet rs) {
try {
if (conn != null) {
conn.close();
}
if (statement != null)
statement.close();
if (rs != null)
rs.close();
} catch (Exception e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
PhoenixKrbTest test = new PhoenixKrbTest();
test.operTable();
}
}
1.3 示例代码执行
在IDEA中运行编写好的代码,代码执行情况如下:
验证
代码语言:javascript复制kinit hbase
klist
Phoenix终端验证
登录Phoenix,查看表信息如下
代码语言:javascript复制phoenix-sqlline
!tables
如上所示,testjdbc表已被创建。
查看testjdbc表中数据
代码语言:javascript复制select * from testjdbc limit 10;
如上所示,与IDEA中查询的数据一致
HBase shell验证
登录HBase shell,查看当前已存在的表
代码语言:javascript复制hbase shell
list
查看表中数据
代码语言:javascript复制scan 'TESTJDBC',{LIMIT => 10}
如上所示,与IDEA以及Phoenix终端中查询数据一致。
总结
- Kerberos环境下需要krb5.conf和keytab文件,并且需要在代码中执行principal。
- Kerberos环境需要hbase-site.xml文件,只需将该文件放在项目工程下的resources目录下即可
- 在访问HBase时,实际上是先访问的Zookeeper,所以代码中需要配置Zookeeper集群的url。
- 访问Kerberos,JDK需要启用无限制强度加密,jdk1.8.0_160以上默认启用