包含单机版或集群部署,相关参数可自行调整
安装方式
1 2 3 4 5 6 7 8 9 10 11 12 13 docker compose -f docker-compose.yaml up -d docker compose -p demo -f docker-compose.yaml up -d docker compose -p demo -f docker-compose.yaml stop docker compose -p demo -f docker-compose.yaml down docker compose -p demo -f docker-compose.yaml down -all
Nacos
单机模式(Standalone)
使用内嵌 Derby 数据库(不推荐生产)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 version: '3' services: nacos: image: nacos/nacos-server:v2.5.2 container_name: nacos ports: - "8848:8848" - "9848:9848" - "9849:9849" environment: - MODE=standalone - NACOS_AUTH_ENABLE=true - PREFER_HOST_MODE=hostname - MODE=standalone - NACOS_AUTH_IDENTITY_KEY=nacos - NACOS_AUTH_IDENTITY_VALUE=nacos - NACOS_AUTH_TOKEN=VGhpc0lzTXlDdXN0b21TZWNyZXRLZXkwMTIzNDU2Nzg= - TZ=Asia/Shanghai - JVM_XMX=1g - JVM_XMS=1g volumes: - ./data/nacos/logs:/data/nacos/logs restart: no
启动后访问:http://localhost:8848/nacos 默认账号密码:nacos / nacos
集群模式(Cluster)
必须使用外置 MySQL(5.7+ 或 8.0) ,且需初始化 SQL
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 version: '3.8' services: mysql: image: mysql:8.0 container_name: mysql8 environment: - MYSQL_ROOT_PASSWORD=123456 - MYSQL_DATABASE=nacos - MYSQL_USER=nacos - MYSQL_PASSWORD=nacos volumes: - ./data/mysql/init.sql:/docker-entrypoint-initdb.d/nacos-mysql.sql - ./data/mysql/data:/var/lib/mysql ports: - "3306:3306" command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci restart: unless-stopped nacos1: image: nacos/nacos-server:v2.5.2 container_name: nacos-8848 depends_on: - mysql environment: - MODE=cluster - NACOS_SERVERS=nacos1:8848 nacos2:8848 nacos3:8848 - SPRING_DATASOURCE_PLATFORM=mysql - MYSQL_SERVICE_HOST=mysql - MYSQL_SERVICE_PORT=3306 - MYSQL_SERVICE_USER=nacos - MYSQL_SERVICE_PASSWORD=nacos - MYSQL_SERVICE_DB_NAME=nacos - PREFER_HOST_MODE=hostname ports: - "8848:8848" - "9848:9848" - "9849:9849" volumes: - ./data/nacos-8848/logs:/home/nacos/logs restart: unless-stopped nacos2: image: nacos/nacos-server:v2.5.2 container_name: nacos-8849 depends_on: - mysql environment: - MODE=cluster - NACOS_SERVERS=nacos1:8848 nacos2:8848 nacos3:8848 - SPRING_DATASOURCE_PLATFORM=mysql - MYSQL_SERVICE_HOST=mysql - MYSQL_SERVICE_PORT=3306 - MYSQL_SERVICE_USER=nacos - MYSQL_SERVICE_PASSWORD=nacos - MYSQL_SERVICE_DB_NAME=nacos - PREFER_HOST_MODE=hostname ports: - "8849:8848" - "9850:9848" - "9851:9849" volumes: - ./data/nacos-8849/logs:/home/nacos/logs restart: unless-stopped nacos3: image: nacos/nacos-server:v2.5.2 container_name: nacos-8850 depends_on: - mysql environment: - MODE=cluster - NACOS_SERVERS=nacos1:8848 nacos2:8848 nacos3:8848 - SPRING_DATASOURCE_PLATFORM=mysql - MYSQL_SERVICE_HOST=mysql - MYSQL_SERVICE_PORT=3306 - MYSQL_SERVICE_USER=nacos - MYSQL_SERVICE_PASSWORD=nacos - MYSQL_SERVICE_DB_NAME=nacos - PREFER_HOST_MODE=hostname ports: - "8850:8848" - "9852:9848" - "9853:9849" volumes: - ./data/nacos-8850/logs:/home/nacos/logs restart: unless-stopped volumes: mysql_data: nacos1_logs: nacos2_logs: nacos3_logs:
访问任意节点:
默认账号密码:nacos / nacos
RocketMQ
单机版
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 version: '3.3' services: namesrv: image: apache/rocketmq:5.3.4 container_name: rmqnamesrv ports: - 9876 :9876 networks: - rocketmq command: sh mqnamesrv broker: image: apache/rocketmq:5.3.4 container_name: rmqbroker ports: - 10909 :10909 - 10911 :10911 - 10912 :10912 environment: - NAMESRV_ADDR=rmqnamesrv:9876 depends_on: - namesrv networks: - rocketmq command: sh mqbroker proxy: image: apache/rocketmq:5.3.4 container_name: rmqproxy networks: - rocketmq depends_on: - broker - namesrv ports: - 8081 :8081 - 8082 :8082 restart: no environment: - NAMESRV_ADDR=rmqnamesrv:9876 command: sh mqproxy networks: rocketmq: driver: bridge
Kafka
单机版(KRaft模式)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 version: "3.8" services: kafka: image: confluentinc/cp-kafka:latest container_name: kafka ports: - "9092:9092" - "19092:19092" - "9093:9093" volumes: - type: volume source: kafka_standalone_data target: /data/kafka read_only: false environment: CLUSTER_ID: mk-0kz5wGoTQ6eN6vDPN2eX2A KAFKA_PROCESS_ROLES: broker,controller KAFKA_NODE_ID: 1 KAFKA_CONTROLLER_QUORUM_VOTERS: 1 @localhost:9093 KAFKA_LISTENERS: INTERNAL://0.0.0.0:19092,EXTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:19092,EXTERNAL://localhost:9092 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER KAFKA_LOG_DIRS: /tmp/kraft-combined-logs KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_AUTO_CREATE_TOPICS_ENABLE: true KAFKA_NUM_PARTITIONS: 3 KAFKA_DEFAULT_REPLICATION_FACTOR: 1 KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1 @127.0.0.1:9094 ALLOW_PLAINTEXT_LISTENER: yes JMX_PORT: 9998 KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka -Dcom.sun.management.jmxremote.rmi.port=9998 healthcheck: test: ["CMD-SHELL" , "kafka-broker-api-versions --bootstrap-server localhost:9092" ] interval: 30s timeout: 10s retries: 3 networks: - kafka-network kafka-ui: container_name: kafka-ui image: provectuslabs/kafka-ui:latest ports: - "9090:8080" depends_on: - kafka environment: KAFKA_CLUSTERS_0_NAME: kafka-stand-alone KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:19092 KAFKA_CLUSTERS_0_READONLY: false KAFKA_CLUSTERS_0_METRICS_PORT: 9998 SERVER_SERVLET_CONTEXT_PATH: /kafkaui AUTH_TYPE: disabled SPRING_SECURITY_USER_NAME: admin SPRING_SECURITY_USER_PASSWORD: 123456 DYNAMIC_CONFIG_ENABLED: 'true' networks: - kafka-network restart: unless-stopped volumes: kafka_standalone_data: driver: local networks: kafka-network: driver: bridge
MQTT 是一种基于标准的消息传递协议或规则集,用于机器对机器的通信。智能传感器、可穿戴设备和其他物联网(IoT)设备通常必须通过带宽有限的资源受限网络传输和接收数据。这些物联网设备使用 MQTT 进行数据传输,因为它易于实施,并且可以有效地传输物联网数据。MQTT 支持设备到云端和云端到设备之间的消息传递。
轻量、高效
IoT 设备上的 MQTT 实施需要最少的资源,因此它甚至可以用于小型微控制器。例如,最小的 MQTT 控制消息可以少至两个数据字节。MQTT 消息的标头也很小,因此您可以优化网络带宽。
可扩展
MQTT 实施需要最少的代码,在操作中消耗的功率非常少。该协议还具有支持与大量物联网设备通信的内置功能。因此,您可以实施 MQTT 协议来连接数百万台此类设备。
可靠
许多 IoT 设备通过低带宽、高延迟的不可靠蜂窝网络连接。MQTT 具有内置功能,可减少 IoT 设备重新连接云所需的时间。它还定义了三种不同的服务质量级别,以确保 IoT 用例的可靠性——最多一次(0)、至少一次(1)和恰好一次(2)。
安全
MQTT 使开发人员可以轻松地使用现代身份验证协议(例如 OAuth、TLS1.3、客户管理的证书等)加密消息并对设备和用户进行身份验证。
单机版
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 services: mosquitto: image: eclipse-mosquitto:2.0 container_name: mqtt hostname: mosquitto restart: no ports: - "1883:1883" - "9001:9001" - "8083:8083" volumes: - ./conf/mosquitto.conf:/mosquitto/config/mosquitto.conf - ./data/mosquitto:/mosquitto/data - ./logs/mosquitto:/mosquitto/log networks: - mqtt-network environment: - TZ=Asia/Shanghai deploy: resources: limits: memory: 512M cpus: '1' reservations: memory: 256M healthcheck: test: ["CMD" , "mosquitto" , "-c" , "/mosquitto/config/mosquitto.conf" ] interval: 30s timeout: 10s retries: 3 node-red: image: nodered/node-red:latest container_name: mqtt-node-red ports: - "1880:1880" volumes: - ./data/mosquitto/node-red-data:/data restart: no networks: mqtt-network: driver: bridge
配置
mosquitto.conf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 # Basic configuration # TCP 监听器 listener 1883 0.0 .0 .0 protocol mqtt # 允许匿名连接 allow_anonymous true # Security # password_file /mosquitto/config/password.txt # Persistence persistence true persistence_location /mosquitto/data/ autosave_interval 1800 # 兼容性设置 connection_messages true # 协议版本兼容性 allow_zero_length_clientid true auto_id_prefix true log_timestamp true # Logging log_dest file /mosquitto/log/mosquitto.log log_type error log_dest stdout log_type all # Limits max_connections 1000 max_keepalive 300 max_packet_size 1024 # WebSocket support listener 9001 0.0 .0 .0 protocol websockets
Skywalking
skywalking 包含的所有组件
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 version: '3.3' services: elasticsearch: image: elasticsearch:7.14.2 container_name: elasticsearch restart: always ports: - 9200 :9200 environment: - "TAKE_FILE_OWNERSHIP=true" - "discovery.type=single-node" - "TZ=Asia/Shanghai" - "ES_JAVA_OPTS=-Xms512m -Xmx512m" volumes: - ./elasticsearch/logs:/usr/share/elasticsearch/logs - ./elasticsearch/data:/usr/share/elasticsearch/data - ./elasticsearch/conf/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml ulimits: memlock: soft: -1 hard: -1 skywalking-oap-server: image: apache/skywalking-oap-server:8.9.1 container_name: skywalking-oap-server depends_on: - elasticsearch links: - elasticsearch restart: always ports: - 11800 :11800 - 12800 :12800 environment: SW_STORAGE: elasticsearch SW_STORAGE_ES_CLUSTER_NODES: elasticsearch:9200 TZ: Asia/Shanghai volumes: - ./oap/conf/alarm-settings.yml:/skywalking/config/alarm-settings.yml skywalking-ui: image: apache/skywalking-ui:8.9.1 container_name: skywalking-ui depends_on: - skywalking-oap-server links: - skywalking-oap-server restart: always ports: - 8080 :8080 environment: SW_OAP_ADDRESS: http://skywalking-oap-server:12800 TZ: Asia/Shanghai
Prometheus
prometheus 包含的所有组件
单机版
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 version: "3" networks: net-prometheus: driver: bridge services: prometheus: image: prom/prometheus container_name: prometheus user: root environment: - TZ=Asia/Shanghai restart: no ports: - "9090:9090" volumes: - ./data/prometheus/conf:/etc/prometheus - ./data/prometheus/data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' networks: - net-prometheus grafana: image: grafana/grafana container_name: grafana user: root environment: - TZ=Asia/Shanghai restart: no ports: - "3000:3000" volumes: - ./data/prometheus/grafana/data:/var/lib/grafana depends_on: - prometheus networks: - net-prometheus pushgateway: image: prom/pushgateway container_name: pushgateway user: root environment: - TZ=Asia/Shanghai restart: no ports: - "9091:9091" volumes: - ./data/prometheus/pushgateway/data:/var/lib/pushgateway networks: - net-prometheus alertmanager: image: prom/alertmanager hostname: alertmanager container_name: alertmanager user: root environment: - TZ=Asia/Shanghai restart: no ports: - "9093:9093" volumes: - ./data/prometheus/alertmanager/data:/var/lib/alertmanager networks: - net-prometheus node_exporter: image: prom/node-exporter container_name: node_exporter volumes: - './data/prometheus:/host:ro,rslave' ports: - 9100 :9100 environment: TZ: Asia/Shanghai command: - '--path.rootfs=/host' labels: org.label-schema.group: "monitoring" restart: no networks: - net-prometheus cadvisor: image: google/cadvisor:latest container_name: cadvisor hostname: cadvisor restart: no volumes: - ./data/prometheus/:/rootfs:ro - ./data/prometheus/var/run:/var/run:rw - ./data/prometheus/sys:/sys:ro - ./data/prometheus/var/lib/docker/:/var/lib/docker:ro ports: - '8080:8080' networks: - net-prometheus
配置
prometheus.yml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 global: scrape_interval: 15s evaluation_interval: 15s external_labels: monitor: 'dashboard' alerting: alert_relabel_configs: - source_labels: [dc ] regex: (.+)\d+ target_label: dc1 alertmanagers: - static_configs: - targets: ['192.168.1.115:9093' ] rule_files: scrape_configs: - job_name: 'prometheus' scrape_interval: 5s static_configs: - targets: ['prometheus:9090' ] - job_name: 'node-exporter' static_configs: - targets: ['192.168.1.115:9100' ,'192.168.1.115:9091' ]
Redis
单机版
注意 :确保宿主机 redis.conf 是文件且存在 ,否则会创建为目录
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 services: redis: image: redis:6-alpine container_name: redis hostname: redis restart: no ports: - "6379:6379" volumes: - ./data/redis/redis.conf:/usr/local/etc/redis/redis.conf - ./data/redis/data:/data command: redis-server /usr/local/etc/redis/redis.conf networks: - redis-net deploy: resources: limits: memory: 1G reservations: memory: 256M volumes: redis_data: driver: local networks: redis-net: driver: bridge
集群版
基于 Docker Compose 的 Redis 集群(3 主 3 从,共 6 个节点)配置示例
至少 3 个主节点 才能实现故障转移,通常搭配 3 个从节点(共 6 节点)以保证高可用
注意 :确保宿主机 redis.conf 是文件且存在 ,否则会创建为目录
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 version: '3.8' services: redis-node-1: image: redis:7.2-alpine container_name: redis-node-1 command: redis-server --port 7000 --bind 0.0 .0 .0 --cluster-enabled yes --cluster-announce-ip host.docker.internal --cluster-announce-port 7000 --cluster-announce-bus-port 17000 --cluster-config-file nodes.conf --cluster-node-timeout 5000 ports: - "7000:7000" - "17000:17000" volumes: - ./data/redis-01/data:/data networks: redis-net: ipv4_address: 172.28 .0 .10 redis-node-2: image: redis:7.2-alpine container_name: redis-node-2 command: redis-server --port 7001 --bind 0.0 .0 .0 --cluster-enabled yes --cluster-announce-ip host.docker.internal --cluster-announce-port 7001 --cluster-announce-bus-port 17001 --cluster-config-file nodes.conf --cluster-node-timeout 5000 ports: - "7001:7001" - "17001:17001" volumes: - ./data/redis-02/data:/data networks: redis-net: ipv4_address: 172.28 .0 .11 redis-node-3: image: redis:7.2-alpine container_name: redis-node-3 command: redis-server --port 7002 --bind 0.0 .0 .0 --cluster-enabled yes --cluster-announce-ip host.docker.internal --cluster-announce-port 7002 --cluster-announce-bus-port 17002 --cluster-config-file nodes.conf --cluster-node-timeout 5000 ports: - "7002:7002" - "17002:17002" volumes: - ./data/redis-03/data:/data networks: redis-net: ipv4_address: 172.28 .0 .12 redis-node-4: image: redis:7.2-alpine container_name: redis-node-4 command: redis-server --port 7003 --bind 0.0 .0 .0 --cluster-enabled yes --cluster-announce-ip host.docker.internal --cluster-announce-port 7003 --cluster-announce-bus-port 17003 --cluster-config-file nodes.conf --cluster-node-timeout 5000 ports: - "7003:7003" - "17003:17003" volumes: - ./data/redis-04/data:/data networks: redis-net: ipv4_address: 172.28 .0 .13 redis-node-5: image: redis:7.2-alpine container_name: redis-node-5 command: redis-server --port 7004 --bind 0.0 .0 .0 --cluster-enabled yes --cluster-announce-ip host.docker.internal --cluster-announce-port 7004 --cluster-announce-bus-port 17004 --cluster-config-file nodes.conf --cluster-node-timeout 5000 ports: - "7004:7004" - "17004:17004" volumes: - ./data/redis-05/data:/data networks: redis-net: ipv4_address: 172.28 .0 .14 redis-node-6: image: redis:7.2-alpine container_name: redis-node-6 command: redis-server --port 7005 --bind 0.0 .0 .0 --cluster-enabled yes --cluster-announce-ip host.docker.internal --cluster-announce-port 7005 --cluster-announce-bus-port 17005 --cluster-config-file nodes.conf --cluster-node-timeout 5000 ports: - "7005:7005" - "17005:17005" volumes: - ./data/redis-06/data:/data networks: redis-net: ipv4_address: 172.28 .0 .15 networks: redis-net: driver: bridge ipam: config: - subnet: 172.28 .0 .0 /24
所有节点使用 127.0.0.1 作为 cluster-announce-ip,因为客户端从宿主机连接。如果你从其他机器访问,请将 127.0.0.1 改为宿主机的实际 IP 。
每个节点暴露两个端口:客户端端口(700x) 和 集群总线端口(1700x) 。
--appendonly yes 开启 AOF 持久化(可选)。
使用固定 IP 网络(172.20.0.0/24)避免容器重启后 IP 变化导致集群配置失效。
初始化集群(首次部署时运行一次),进入任意节点容器,运行集群创建命令:
1 2 3 4 5 6 7 8 docker exec -it redis-node-1 redis-cli --cluster create \ redis-node-1:7000 \ redis-node-2:7001 \ redis-node-3:7002 \ redis-node-4:7003 \ redis-node-5:7004 \ redis-node-6:7005 \ --cluster-replicas 1
--cluster-replicas 1 表示每个主节点有 1 个从节点(共 3 主 3 从)。
如果Network 出错,说明你在 docker-compose.yml 中指定的自定义子网(如 172.20.0.0/24)和 Docker 主机上已存在的某个网络(Docker 自动创建的或手动创建的)发生了 IP 地址冲突 :
✘ Network redis_redis-net Error 0.0s
failed to create network redis_redis-net: Error response from daemon: invalid pool request: Pool overlaps with other one on this address space
更换子网(推荐) :- subnet: 172.28.0.0/24 # 改成这个(或其他未用网段)
如果一直卡在:
Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
这是因为 节点间网络不通 ,必须要在从宿主机执行 --cluster create,但使用 host.docker.internal
切记:Windows Docker Desktop 不支持 --network host
1 2 3 4 5 docker run --rm -it --network host redis:7.2-alpine redis-cli --cluster create 127.0.0.1:7000 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 --cluster-replicas 1 docker exec -it redis-node-1 redis-cli --cluster create redis-node-1:7000 redis-node-2:7001 redis-node-3:7002 redis-node-4:7003 redis-node-5:7004 redis-node-6:7005 --cluster-replicas 1
Elasticsearch
完整的 Elasticsearch + Kibana(Dashboard) 的 docker-compose.yml 配置示例
单机 + Dashboard
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 version: '3.8' services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:7.17.29 container_name: es-single environment: - node.name=es-single-node - discovery.type=single-node - xpack.security.enabled=false - xpack.security.http.ssl.enabled=false - xpack.security.transport.ssl.enabled=false - ELASTIC_PASSWORD=123456 - "ES_JAVA_OPTS=-Xms1g -Xmx1g" ulimits: memlock: soft: -1 hard: -1 volumes: - ./data/elasticsearch/data:/usr/share/elasticsearch/data - ./data/elasticsearch/config:/usr/share/elasticsearch/config - ./data/elasticsearch/plugins:/usr/share/elasticsearch/plugins ports: - "9200:9200" - "9300:9300" networks: - es-net kibana: image: docker.elastic.co/kibana/kibana:7.17.29 container_name: kibana-single environment: - ELASTICSEARCH_HOSTS=http://elasticsearch:9200 - ELASTICSEARCH_SERVICEACCOUNT_TOKEN=AAEAAWVsYXN0aWMva2liYW5hL2tpYmFuYTpFM3cwM19aSVNueUV6MFFrUWV1emJR ports: - "5601:5601" depends_on: - elasticsearch networks: - es-net volumes: esdata-single: networks: es-net: driver: bridge
访问URL: http://localhost:5601 用户名/密码: elastic / 123456
集群 + Dashboard
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 version: '3.8' services: es01: image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0 container_name: es01 environment: - node.name=es01 - cluster.name=es-cluster - discovery.seed_hosts=es02,es03 - cluster.initial_master_nodes=es01,es02,es03 - xpack.security.enabled=true - xpack.security.http.ssl.enabled=false - xpack.security.transport.ssl.enabled=false - ELASTIC_PASSWORD=123456 ulimits: memlock: soft: -1 hard: -1 volumes: - esdata01:/usr/share/elasticsearch/data ports: - "9200:9200" networks: - es-net es02: image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0 container_name: es02 environment: - node.name=es02 - cluster.name=es-cluster - discovery.seed_hosts=es01,es03 - cluster.initial_master_nodes=es01,es02,es03 - xpack.security.enabled=true - xpack.security.http.ssl.enabled=false - xpack.security.transport.ssl.enabled=false - ELASTIC_PASSWORD=123456 ulimits: memlock: soft: -1 hard: -1 volumes: - esdata02:/usr/share/elasticsearch/data ports: - "9201:9200" networks: - es-net es03: image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0 container_name: es03 environment: - node.name=es03 - cluster.name=es-cluster - discovery.seed_hosts=es01,es02 - cluster.initial_master_nodes=es01,es02,es03 - xpack.security.enabled=true - xpack.security.http.ssl.enabled=false - xpack.security.transport.ssl.enabled=false - ELASTIC_PASSWORD=123456 ulimits: memlock: soft: -1 hard: -1 volumes: - esdata03:/usr/share/elasticsearch/data ports: - "9202:9200" networks: - es-net kibana: image: docker.elastic.co/kibana/kibana:8.15.0 container_name: kibana-cluster environment: - ELASTICSEARCH_HOSTS=["http://es01:9200","http://es02:9200","http://es03:9200"] - ELASTICSEARCH_SERVICEACCOUNT_TOKEN=AAEAAWVsYXN0aWMva2liYW5hL2tpYmFuYT..... ports: - "5601:5601" depends_on: - es01 - es02 - es03 networks: - es-net volumes: esdata01: esdata02: esdata03: networks: es-net: driver: bridge
OpenSearch
单机 + Dashboard
需要修改密码:OPENSEARCH_PASSWORD 的值
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 version: '3.8' services: opensearch: image: opensearchproject/opensearch:latest container_name: opensearch environment: - cluster.name=opensearch-single-node - node.name=opensearch-single-node - discovery.type=single-node - bootstrap.memory_lock=true - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" - plugins.security.disabled=true - OPENSEARCH_INITIAL_ADMIN_PASSWORD=XyrIhMlycy1L0#@2 - plugins.security.ssl.http.enabled=false - plugins.security.ssl.transport.enabled=false - plugins.security.allow_unsafe_democertificates=true - plugins.security.allow_default_init_securityindex=true ulimits: memlock: soft: -1 hard: -1 nofile: soft: 65536 hard: 65536 volumes: - opensearch-data:/usr/share/opensearch/data ports: - 9200 :9200 - 9600 :9600 networks: - opensearch-net healthcheck: test: ["CMD-SHELL" , "curl -f -u admin:StrongAdminPass123! http://localhost:9200/_cluster/health || exit 1" ] interval: 30s timeout: 10s retries: 10 start_period: 60s opensearch-dashboards: image: opensearchproject/opensearch-dashboards:latest container_name: opensearch-dashboards environment: - OPENSEARCH_HOSTS=["http://192.168.3.158:9200"] - DISABLE_SECURITY_DASHBOARDS_PLUGIN=true - OPENSEARCH_USERNAME=admin - OPENSEARCH_PASSWORD=XyrIhMlycy1L0#@2 - OPENSEARCH_REQUEST_HEADERS_ALLOWLIST=authorization,content-type - SERVER_HOST=0.0.0.0 - SERVER_PORT=5601 - SERVER_SSL_ENABLED=false - OPENSEARCH_SSL_VERIFICATIONMODE=none ports: - 5601 :5601 depends_on: opensearch: condition: service_healthy networks: - opensearch-net volumes: opensearch-data: networks: opensearch-net:
集群 + Dashboard
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 version: '3.8' services: opensearch-node1: image: opensearchproject/opensearch:latest container_name: opensearch-node1 env_file: - .env environment: - cluster.name=opensearch-cluster - node.name=opensearch-node1 - discovery.seed_hosts=opensearch-node1,opensearch-node2 - cluster.initial_master_nodes=opensearch-node1,opensearch-node2 - bootstrap.memory_lock=true - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD} ulimits: memlock: soft: -1 hard: -1 nofile: soft: 65536 hard: 65536 volumes: - opensearch-data1:/usr/share/opensearch/data ports: - 9200 :9200 - 9600 :9600 networks: - opensearch-net opensearch-node2: image: opensearchproject/opensearch:latest container_name: opensearch-node2 environment: - cluster.name=opensearch-cluster - node.name=opensearch-node2 - discovery.seed_hosts=opensearch-node1,opensearch-node2 - cluster.initial_master_nodes=opensearch-node1,opensearch-node2 - bootstrap.memory_lock=true - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" - plugins.security.disabled=true ulimits: memlock: soft: -1 hard: -1 nofile: soft: 65536 hard: 65536 volumes: - opensearch-data2:/usr/share/opensearch/data networks: - opensearch-net opensearch-dashboards: image: opensearchproject/opensearch-dashboards:latest container_name: opensearch-dashboards ports: - 5601 :5601 environment: - 'OPENSEARCH_HOSTS=["http://opensearch-node1:9200","http://opensearch-node2:9200"]' - OPENSEARCH_USERNAME=${OPENSEARCH_USERNAME} - OPENSEARCH_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD} networks: - opensearch-net volumes: opensearch-data1: opensearch-data2: networks: opensearch-net: driver: bridge
Neo4j 是一个原生图数据库 ,这意味着它从存储层面上实现了真正的图模型。数据在 Neo4j 中的存储方式与您在白板上构思的方式相同,而不是在其他技术之上使用“图抽象”。
Neo4j 图数据库将数据存储为节点、关系和属性,而不是以表格或文档形式。
访问:http://localhost:7474
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 docker ps | grep neo4j docker logs neo4j docker logs -f neo4j docker exec -it neo4j bash docker exec -it neo4j cypher-shell -u neo4j -p your_password docker exec neo4j neo4j-admin database dump neo4j --to-path=/backups docker exec neo4j neo4j-admin database load neo4j --from-path=/backups docker run --rm -v /docker/neo4j/data:/data neo4j:5.15 neo4j-admin dbms set-initial-password 123456
单机(Standalone)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 version: '3.8' services: neo4j: image: neo4j:5.20-community container_name: neo4j-standalone environment: NEO4J_AUTH: neo4j/123@Admin NEO4J_ACCEPT_LICENSE_AGREEMENT: yes ports: - "7474:7474" - "7687:7687" volumes: - ./data/neo4j/data:/data - ./data/neo4j/logs:/logs - ./data/neo4j/plugins:/plugins restart: unless-stopped volumes: neo4j_data: neo4j_logs:
使用 Enterprise Edition (即使单机也可用,但需接受许可协议)。
若用 Community Edition ,仅支持单机,且不能用于生产集群。
初始用户名:neo4j,密码:password(首次登录会要求修改)。
配置
apoc.conf 或 neo4j.conf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 dbms.default_database =neo4j server.memory.heap.initial_size =2G server.memory.heap.max_size =2G server.memory.pagecache.size =1G dbms.connector.http.listen_address =:7474 dbms.connector.bolt.listen_address =:7687 dbms.security.procedures.unrestricted =apoc.*,gds.* apoc.export.file.enabled =true apoc.import.file.enabled =true apoc.import.file.use_neo4j_config =true dbms.default_listen_address =0.0.0.0 server.default_listen_address =0.0.0.0
集群版
3 个 Core 节点 (负责写入与数据持久化,必须奇数个以实现 Raft 共识)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 version: '3.8' services: neo4j-core-1: image: neo4j:5.20-enterprise container_name: neo4j-core-1 environment: NEO4J_ACCEPT_LICENSE_AGREEMENT: yes NEO4J_AUTH: neo4j/123@Admin NEO4J_initial_mode_constraint: "true" NEO4J_dbms_mode: CORE NEO4J_causal_clustering_minimum_core_cluster_size_at_formation: 3 NEO4J_causal_clustering_minimum_core_cluster_size_at_runtime: 3 NEO4J_causal_clustering_initial_discovery_members: neo4j-core-1:5000,neo4j-core-2:5000,neo4j-core-3:5000 NEO4J_causal_clustering_discovery_advertised_address: neo4j-core-1:5000 NEO4J_causal_clustering_transaction_advertised_address: neo4j-core-1:6000 NEO4J_causal_clustering_raft_advertised_address: neo4j-core-1:7000 NEO4J_dbms_connector_bolt_advertised_address: neo4j-core-1:7687 ports: - "7474:7474" - "7687:7687" volumes: - ./data/neo4j-01/data:/neo4j/data - ./data/neo4j-01/logs:/neo4j/logs networks: - neo4j-cluster-net neo4j-core-2: image: neo4j:5.20-enterprise container_name: neo4j-core-2 environment: NEO4J_ACCEPT_LICENSE_AGREEMENT: yes NEO4J_AUTH: neo4j/123@Admin NEO4J_initial_mode_constraint: "true" NEO4J_dbms_mode: CORE NEO4J_causal_clustering_minimum_core_cluster_size_at_formation: 3 NEO4J_causal_clustering_minimum_core_cluster_size_at_runtime: 3 NEO4J_causal_clustering_initial_discovery_members: neo4j-core-1:5000,neo4j-core-2:5000,neo4j-core-3:5000 NEO4J_causal_clustering_discovery_advertised_address: neo4j-core-2:5000 NEO4J_causal_clustering_transaction_advertised_address: neo4j-core-2:6000 NEO4J_causal_clustering_raft_advertised_address: neo4j-core-2:7000 NEO4J_dbms_connector_bolt_advertised_address: neo4j-core-2:7687 ports: - "7475:7474" - "7688:7687" volumes: - ./data/neo4j-02/data:/neo4j/data - ./data/neo4j-02/logs:/neo4j/logs networks: - neo4j-cluster-net neo4j-core-3: image: neo4j:5.20-enterprise container_name: neo4j-core-3 environment: NEO4J_ACCEPT_LICENSE_AGREEMENT: yes NEO4J_AUTH: neo4j/123@Admin NEO4J_initial_mode_constraint: "true" NEO4J_dbms_mode: CORE NEO4J_causal_clustering_minimum_core_cluster_size_at_formation: 3 NEO4J_causal_clustering_minimum_core_cluster_size_at_runtime: 3 NEO4J_causal_clustering_initial_discovery_members: neo4j-core-1:5000,neo4j-core-2:5000,neo4j-core-3:5000 NEO4J_causal_clustering_discovery_advertised_address: neo4j-core-3:5000 NEO4J_causal_clustering_transaction_advertised_address: neo4j-core-3:6000 NEO4J_causal_clustering_raft_advertised_address: neo4j-core-3:7000 NEO4J_dbms_connector_bolt_advertised_address: neo4j-core-3:7687 ports: - "7476:7474" - "7689:7687" volumes: - ./data/neo4j-03/data:/neo4j/data - ./data/neo4j-03/logs:/neo4j/logs networks: - neo4j-cluster-net networks: neo4j-cluster-net: driver: bridge volumes: core1_data: core1_logs: core2_data: core2_logs: core3_data: core3_logs: