MySQL 8
version: '3.0'
services:
mysql:
image: mysql:8.0.32
container_name: mysql
restart: always
environment:
- TZ=Asia/Shanghai
- MYSQL_ROOT_PASSWORD=123456
ports:
- 3306:3306
volumes:
- ./data:/var/lib/mysql
command: --log-bin=mysql-bin --binlog-format=ROW
networks:
app-stack:
aliases:
- docker.mysql.db
networks:
app-stack:
external: true
Redis 7.0
version: '3.0'
services:
redis:
image: redis:7.0
container_name: redis
restart: always
ports:
- 6379:6379
command: --requirepass 123456 --appendonly yes
networks:
app-stack:
aliases:
- docker.redis.db
networks:
app-stack:
external: true
ElasticSearch 7.16
version: '3.0'
services:
master-node:
image: elasticsearch:7.16.3
restart: always
container_name: master-node
environment:
ELASTIC_PASSWORD: "123456"
ES_JAVA_OPTS: "-Xms4g -Xmx4g"
cluster.name: "aaron-cluster"
node.name: "master-node"
network.host: "0.0.0.0"
network.publish_host: "docker.elastic.master"
cluster.initial_master_nodes: "master-node"
discovery.seed_hosts: "docker.elastic.master,docker.elastic.data1,docker.elastic.data2"
http.cors.enabled: "true"
http.cors.allow-origin: "*"
xpack.security.enabled: "true"
ports:
- 9200:9200
networks:
app-stack:
aliases:
- docker.elastic.master
data1-node:
image: elasticsearch:7.16.3
restart: always
container_name: data1-node
environment:
ELASTIC_PASSWORD: "123456"
ES_JAVA_OPTS: "-Xms4g -Xmx4g"
cluster.name: "aaron-cluster"
node.name: "data1-node"
network.host: "0.0.0.0"
network.publish_host: "docker.elastic.data1"
cluster.initial_master_nodes: "master-node"
discovery.seed_hosts: "docker.elastic.master,docker.elastic.data1,docker.elastic.data2"
http.cors.enabled: "true"
http.cors.allow-origin: "*"
xpack.security.enabled: "true"
depends_on:
- master-node
networks:
app-stack:
aliases:
- docker.elastic.data1
data2-node:
image: elasticsearch:7.16.3
restart: always
container_name: data2-node
environment:
ELASTIC_PASSWORD: "123456"
ES_JAVA_OPTS: "-Xms4g -Xmx4g"
cluster.name: "aaron-cluster"
node.name: "data2-node"
network.host: "0.0.0.0"
network.publish_host: "docker.elastic.data2"
cluster.initial_master_nodes: "master-node"
discovery.seed_hosts: "docker.elastic.master,docker.elastic.data1,docker.elastic.data2"
http.cors.enabled: "true"
http.cors.allow-origin: "*"
xpack.security.enabled: "true"
depends_on:
- master-node
networks:
app-stack:
aliases:
- docker.elastic.data2
networks:
app-stack:
external: true
ELK 7.9.3
version: '3.0'
services:
elk-es:
image: elasticsearch:7.9.3
container_name: elk-es
ports:
- 9200:9200
environment:
- ELASTIC_PASSWORD=elastic
- xpack.security.enabled=true
- discovery.type=single-node
- network.host=0.0.0.0
- http.cors.allow-origin=*
- http.cors.enabled=true
networks:
app-stack:
aliases:
- docker.elk.es
elk-kibana:
image: kibana:7.9.3
container_name: elk-kibana
ports:
- 5601:5601
environment:
- I18N_LOCALE=zh-CN
- SERVER_NAME=kibana
- SERVER_HOST=0.0.0.0
- ELASTICSEARCH_HOSTS=http://docker.elk.es:9200
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD=elastic
networks:
app-stack:
aliases:
- docker.elk.kibana
logstash:
image: logstash:7.9.3
container_name: logstash
ports:
- 9600:9600
- 9610:9610
environment:
- MONITORING_ELASTICSEARCH_HOSTS=http://docker.elk.es:9200
volumes:
- ./pipeline:/usr/share/logstash/pipeline
networks:
app-stack:
aliases:
- docker.elk.logstash
networks:
app-stack:
external: true
EMQX 4.3.9
version: '3.0'
services:
emqx:
image: emqx/emqx:4.3.9
container_name: emqx
environment:
- EMQX_LOADED_PLUGINS="emqx_recon,emqx_retainer,emqx_management,emqx_dashboard"
- EMQX_DASHBOARD__DEFAULT_USER__LOGIN=admin
- EMQX_DASHBOARD__DEFAULT_USER__PASSWORD=123456
- EMQX_MANAGEMENT__DEFAULT_APPLICATION__ID=admin
- EMQX_MANAGEMENT__DEFAULT_APPLICATION__SECRET=123456
##- EMQX_AUTH__MYSQL__SERVER=aaron.mysql
##- EMQX_AUTH__MYSQL__USERNAME=root
##- EMQX_AUTH__MYSQL__PASSWORD=123456
ports:
- 18083:18083
- 1883:1883
networks:
- app-stack
networks:
app-stack:
external: true
Kafka 3.4
version: "3.0"
services:
zookeeper:
image: bitnami/zookeeper:3.8
ports:
- 2181:2181
volumes:
- zookeeper_data:/bitnami
extra_hosts:
vmserver: 192.168.31.78
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
networks:
app-stack:
aliases:
- docker.zookeeper
kafka:
image: bitnami/kafka:3.4
ports:
- 9092:9092
volumes:
- kafka_data:/bitnami
extra_hosts:
vmserver: 192.168.31.78
environment:
- KAFKA_CFG_BROKER_ID=1001
- KAFKA_CFG_RESERVED_BROKER_MAX_ID=1002
- KAFKA_CFG_ZOOKEEPER_CONNECT=docker.zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://vmserver:9092
- KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092
depends_on:
- zookeeper
networks:
app-stack:
aliases:
- docker.mq.kafka
volumes:
zookeeper_data:
driver: local
kafka_data:
driver: local
networks:
app-stack:
external: true
RabbitMQ
version: '3.0'
services:
rabbitmq:
image: rabbitmq:3-management
container_name: rabbitmq
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=admin_123456
ports:
- 5672:5672
networks:
app-stack:
aliases:
- docker.rabbitmq
networks:
app-stack:
external: true
PostgresSQL 14.1
version: '3.0'
services:
postgres:
image: postgres:14.1
container_name: postgres
environment:
- POSTGRES_USER=root
- POSTGRES_PASSWORD=123456
volumes:
- ./data:/var/lib/postgresql/data
ports:
- 5432:5432
networks:
- app-stack
networks:
app-stack:
external: true
Debezium-connect 2.1.2
version: '3.0'
services:
connect:
image: debezium/connect:2.1.2.Final
container_name: connect
ports:
- 8083:8083
environment:
- BOOTSTRAP_SERVERS=docker.kafka:9092
- GROUP_ID=1
- CONFIG_STORAGE_TOPIC=debezium_connect_configs
- OFFSET_STORAGE_TOPIC=debezium_connect_offsets
- STATUS_STORAGE_TOPIC=debezium_connect_statuses
- CONNECT_KEY_CONVERTER_SCHEMAS_ENABLE=false ##配置不需要key schemas
- CONNECT_VALUE_CONVERTER_SCHEMAS_ENABLE=false ##配置不需要value schemas
networks:
app-stack:
aliases:
- docker.debezium
connect-ui:
image: debezium/debezium-ui:2.1.2.Final
container_name: connect-ui
environment:
- kafka.connect.uris=http://docker.debezium:8083
ports:
- 18080:8080
networks:
app-stack:
aliases:
- docker.debezium.ui
-
networks:
app-stack:
external: true
Nacos 2.0.3
version: '3.0'
services:
nacos:
image: nacos/nacos-server:v2.0.3
container_name: nacos
environment:
- MODE=standalone
ports:
- 8848:8848
networks:
- app-stack
networks:
app-stack:
external: true
Clickhouse 22.2.3.5
version: '3.0'
services:
clickhouse:
image: yandex/clickhouse-server:21.11.2.2
container_name: clickhouse
ports:
- 8123:8123
volumes:
- ./data:/var/lib/clickhouse
- ./config/config.xml:/etc/clickhouse-server/config.xml
- ./users.d:/etc/clickhouse-server/users.d/
networks:
app-stack:
aliases:
- docker.clickhouse
networks:
app-stack:
external: true
用户配置模板
<?xml version="1.0"?>
<clickhouse>
<users>
<aaron>
<password>123456</password>
<networks>
<ip>::/0</ip>
</networks>
<quota>default</quota>
<access_management>1</access_management>
</aaron>
</users>
</clickhouse>
config.xml 分片修整段(非必须),此处单机单分片
<remote_servers>
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>localhost</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
</test_shard_localhost>
</remote_servers>
clickhouse-jdbc-bridge
version: "3.0"
# Only 'ch-server' and 'jdbc-bridge' are mandatory.
# You may remove any db-xxx to save memory.
services:
jdbc-bridge:
image: clickhouse/jdbc-bridge:2.0
hostname: jdbc-bridge
# In general you don't need to define any environment variable
# Below are all default settings just for demonstration
environment:
CONFIG_DIR: config # configuration directory
SERIAL_MODE: "false" # whether run query in serial mode or not
HTTPD_CONFIG_FILE: httpd.json # httpd configuration file
SERVER_CONFIG_FILE: server.json # server configuration file
VERTX_CONFIG_FILE: vertx.json # vertx configuration file
DATASOURCE_CONFIG_DIR: datasources # named datasource directory
DRIVER_DIR: drivers # driver directory
EXTENSION_DIR: extensions # extension directory
QUERY_CONFIG_DIR: queries # named query directory
CUSTOM_DRIVER_LOADER: "true" # whether use custom driver loader or not
JDBC_BRIDGE_JVM_OPTS: # use CPU and memory allocated by container
# You may want to keep datasources, queries, SQL scripts, and maybe drivers in a git repo
volumes:
- ./drivers:/app/drivers
mem_limit: 512m
restart: always
networks:
app-stack:
aliases:
- docker.jdbc.bridge
networks:
app-stack:
external: true
CREATE TABLE jdbc_table
(
`id` Int32,
`name` Nullable(String)
)
ENGINE JDBC('jdbc:mysql://192.168.31.233:3306/?user=root&password=123456', 'chenzhou', 'users')
select * from jdbc_table
CREATE TABLE oracle_table
(
`ID` String,
`NAME` Nullable(String)
)
ENGINE JDBC('jdbc:oracle:thin:@oracle:1521/ORCL?user=C##flinkuser&password=flinkpw', 'C##FLINKUSER', 'DEMO')
DataX-Web
version: "3.7"
services:
datax-web:
expose:
- "9504"
- "9527"
- "9999"
restart: always
image: eurekas/datax-web:standalone-1.1
container_name: datax-web
ports:
- 9504:9504
- 9527:9527
- 9999:9999
depends_on:
- mysql
links:
- "mysql:mysql"
mysql:
image: eurekas/datax-web-mysql:1.0
expose:
- "3306"
restart: always
container_name: mysql
ports:
- 13306:3306
environment:
- "MYSQL_ROOT_PASSWORD=1q2w3e4r5"
版权声明:本文为weixin_42942484原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。