一、节点1的docker-compose

version: '3.1'
services:
zoo1:
image :docker.io/zookeeper:3.7.0
restart: always
container_name: pro-zookeeper-001
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /disk2/kafka/zookeeper-data:/data
- /disk2/kafka/zookeeper-datalog:/datalog
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=192.168.150.15:2888:3888;2181 server.3=192.168.150.16:2888:3888;2181

kafka1:
image: docker.io/wurstmeister/kafka:2.12-2.5.0
restart: always
container_name: pro-kafka-001
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_HOST_NAME: 192.168.150.14
KAFKA_HOST_NAME: 192.168.150.14
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 192.168.150.14:2181,192.168.150.15:2181,192.168.150.16:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.150.14:9092
KAFKA_LISTENERS: PLAINTEXT://:9092
volumes:
- /disk2/kafka/kafka-data:/kafka

二、节点2的docker-compose

version: '3.1'
services:
zoo2:
image: docker.io/zookeeper:3.7.0
restart: always
container_name: pro-zookeeper-002
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /disk2/kafka/zookeeper-data:/data
- /disk2/kafka/zookeeper-datalog:/datalog
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=docker run -d  -e cpu=6 -e time=31536000 --name nifi-test 39b6cf5ac5d7:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=192.168.150.16:2888:3888;2181

kafka2:
image: docker.io/wurstmeister/kafka:2.12-2.5.0
restart: always
container_name: pro-kafka-002
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_HOST_NAME: 192.168.150.15
KAFKA_HOST_NAME: 192.168.150.15
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: 192.168.150.14:2181,192.168.150.15:2181,192.168.150.16:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.150.15:9092
KAFKA_LISTENERS: PLAINTEXT://:9092
volumes:
- /disk2/kafka/kafka-data:/kafka

三、节点3的docker-compose

version: '3.1'
services:
zoo3:
image: docker.io/wurstmeister/kafka:2.12-2.5.0
restart: always
container_name: pro-zookeeper-003
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /disk2/kafka/zookeeper-data:/data
- /disk2/kafka/zookeeper-datalog:/datalog
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=192.168.150.14:2888:3888;2181 server.2=192.168.150.15:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181

kafka3:
image: docker.io/wurstmeister/kafka:2.12-2.5.0
restart: always
container_name: pro-kafka-003
ports:

- 9092:9092
  environment:
  KAFKA_ADVERTISED_HOST_NAME: 192.168.150.16
  KAFKA_HOST_NAME: 192.168.150.16
  KAFKA_ADVERTISED_PORT: 9092
  KAFKA_BROKER_ID: 3
  KAFKA_ZOOKEEPER_CONNECT: 192.168.150.14:2181,192.168.150.15:2181,192.168.150.16:2181
  KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.150.16:9092
  KAFKA_LISTENERS: PLAINTEXT://:9092
  volumes:
- /disk2/kafka/kafka-data:/kafka

四、部署过程

  • 1、创建每个节点对应的数据目录

  • 2、先将每个节点对应的挂载的数据目录先注释掉,先启动容器。

  • 3、通过docker cp命令先导出数据,每个节点相同操作

    docker cp pro-zookeeper-001:/data /disk2/kafka/zookeeper-data
    docker cp pro-zookeeper-001:/datalog /disk2/kafka/zookeeper-datalog
    docker cp pro-kafka-001:/kafka /disk2/kafka/kafka-data
    
  • 4、删除容器,重新挂载路径,重新启动