最近项目需要节约成本进行开发,所以要把docker利用的淋漓尽致,暂时只有一台服务器可用。 规划如下:zookeeper开启三个,kafka开启三个,hbase和hadoop在本地开启,不用docker。 参考:https://www.cnblogs.com/idea360/p/12411859.html 首先服务器上已经有了docker,这里我们使用docker-compose,提高开发效率,首先安装docker-compose:
代码语言:javascript复制sudo curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
这里的1.24.1要进行改变,看你想用什么版本,兼容性如下: compose文件格式版本 docker版本 3.4 17.09.0 3.3 17.06.0 3.2 17.04.0 3.1 1.13.1 3.0 1.13.0 2.3 17.06.0 2.2 1.13.0 2.1 1.12.0 2.0 1.10.0 1.0 1.9.1. 接下来:
代码语言:javascript复制sudo chmod x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose --version
即可查看是否安装成功 随后写两个文件分别为zookeeper和kafka: (注:在zookeeper和kafka中可添加 volumes操作,映射目录,我这里没做: 例: “opt/kafka/kafka1/data/:/kafka”)
代码语言:javascript复制version: '3.4'
services:
zoo1:
image: zookeeper:3.4.10
restart: always
hostname: zoo1
container_name: zoo1
ports:
- 2184:2181
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
zoo2:
image: zookeeper:3.4.10
restart: always
hostname: zoo2
container_name: zoo2
ports:
- 2185:2181
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888
zoo3:
image: zookeeper:3.4.10
restart: always
hostname: zoo3
container_name: zoo3
ports:
- 2186:2181
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888
代码语言:javascript复制version: '3.4'
services:
kafka1:
image: wurstmeister/kafka:2.11-0.11.0.3
restart: unless-stopped
container_name: kafka1
ports:
- "9093:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 1
KAFKA_ADVERTISED_HOST_NAME: 172.21.0.3 ## 修改:宿主机IP
KAFKA_ADVERTISED_PORT: 9093 ## 修改:宿主机映射port
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.21.0.3:9093 ## 绑定发布订阅的端口。修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
KAFKA_delete_topic_enable: 'true'
volumes:
- "/home/cdata/data1/docker/kafka/kafka1/docker.sock:/var/run/docker.sock"
- "/home/cdata/data1/docker/kafka/kafka1/data/:/kafka"
kafka2:
image: wurstmeister/kafka:2.11-0.11.0.3
restart: unless-stopped
container_name: kafka2
ports:
- "9094:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 2
KAFKA_ADVERTISED_HOST_NAME: 172.21.0.3 ## 修改:宿主机IP
KAFKA_ADVERTISED_PORT: 9094 ## 修改:宿主机映射port
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.21.0.3:9094 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
KAFKA_delete_topic_enable: 'true'
volumes:
- "/home/cdata/data1/docker/kafka/kafka2/docker.sock:/var/run/docker.sock"
- "/home/cdata/data1/docker/kafka/kafka2/data/:/kafka"
kafka3:
image: wurstmeister/kafka:2.11-0.11.0.3
restart: unless-stopped
container_name: kafka3
ports:
- "9095:9092"
external_links:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 3
KAFKA_ADVERTISED_HOST_NAME: 172.21.0.3 ## 修改:宿主机IP
KAFKA_ADVERTISED_PORT: 9095 ## 修改:宿主机映射port
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.21.0.3:9095 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
KAFKA_delete_topic_enable: 'true'
volumes:
- "/home/cdata/data1/docker/kafka/kafka3/docker.sock:/var/run/docker.sock"
- "/home/cdata/data1/docker/kafka/kafka3/data/:/kafka"
kafka-manager:
image: sheepkiller/kafka-manager:latest
restart: unless-stopped
container_name: kafka-manager
hostname: kafka-manager
ports:
- "9000:9000"
links: # 连接本compose文件创建的container
- kafka1
- kafka2
- kafka3
external_links: # 连接本compose文件以外的container
- zoo1
- zoo2
- zoo3
environment:
ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181 ## 修改:宿主机IP
TZ: CST-8
写完yml文件后:分别执行docker-compose up -d命令,即可完成安装并且执行,随后打开9000端口也可以查看:
点击添加集群,输入相应的配置即可
可以进行topic的创建和查询。 接下来进行hadoop的安装,配置好相应的文件,开启namenode和datanode
随后安装hbase,安装好后,核心配置如下: hbase-site.xml 分别绑定到zookeeper和kafka上即可
hbae-env.sh