devkit/docker-compose.yml
2024-07-24 13:20:33 +08:00

362 lines
11 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

name: ${SERVICE_GROUP_NAME}
services:
# 主web入口服务代理一切web
nginx:
image: quay.io/wandoubaba517/nginx:1.27
container_name: devkit_nginx
restart: always
volumes:
- ./nginx/conf/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/conf/conf.d:/etc/nginx/conf.d
- ./nginx/conf/certs:/etc/nginx/certs
- ./nginx/conf/fastcgi_params:/etc/nginx/fastcgi_params
- ./nginx/conf/mime.types:/etc/nginx/mime.types
- ./nginx/conf/scgi_params:/etc/nginx/scgi_params
- ./nginx/conf/uwsgi_params:/etc/nginx/uwsgi_params
- ./nginx/html:/usr/share/nginx/html
working_dir: /usr/share/nginx/html
stdin_open: true
networks:
- ${NETWORK_NAME}
ports:
- 44100:80
# web接口服务提供http和ws服务被nginx代理
# api:
# image: quay.io/wandoubaba517/workerman:8.1
# container_name: devkit_api
# restart: always
# volumes:
# - ./api/:/app/service
# - ./api/php.ini:/usr/local/etc/php/php.ini
# - /var/run/docker.sock:/var/run/docker.sock
# working_dir: /app/service
# stdin_open: true
# networks:
# - devkit
# ports:
# - 55601:55601
# - 55621:55621
# - 55631:55631
# command: ['php', 'start.php', 'start']
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:55601/"]
# interval: 5s
# retries: 3
# start_period: 5s
# timeout: 10s
# depends_on:
# es:
# condition: service_healthy
# restart: true
# postgres:
# condition: service_healthy
# restart: true
# mongo:
# condition: service_healthy
# restart: true
# redis:
# condition: service_healthy
# restart: true
# minio:
# condition: service_healthy
# restart: true
# 容器可视化管理应用被nginx代理
portainer:
image: quay.io/wandoubaba517/portainer-ce:2.20.3
container_name: devkit_portainer
networks:
- ${NETWORK_NAME}
volumes:
- ./portainer/data:/data
- /var/run/docker.sock:/var/run/docker.sock
# postgres数据库管理应用被nginx代理
pgadmin:
image: quay.io/wandoubaba517/pgadmin4:8.9
container_name: devkit_pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: hello@pg.com
PGADMIN_DEFAULT_PASSWORD: helloworld
volumes:
- ./pgadmin/servers.json:/pgadmin4/servers.json
restart: always
networks:
- ${NETWORK_NAME}
depends_on:
postgres:
condition: service_healthy
restart: true
# postgres数据库
postgres:
image: quay.io/wandoubaba517/postgres:16.3
container_name: devkit_postgres
restart: always
environment:
POSTGRES_PASSWORD: helloworld
POSTGRES_USER: hello
POSTGRES_DB: hello
PGDATA: /data/pgdata
volumes:
- ./postgres/data:/data
- ./postgres/conf:/etc/postgres
networks:
- ${NETWORK_NAME}
command:
- postgres
- -c
- 'config_file=/etc/postgres/postgresql.conf'
- -c
- 'hba_file=/etc/postgres/pg_hba.conf'
- -c
- 'ident_file=/etc/postgres/pg_ident.conf'
healthcheck:
test: ["CMD-SHELL", "pg_isready -U hello"]
interval: 15s
retries: 5
start_period: 15s
timeout: 10s
# mongodb的可视化应用被nginx代理不要求登录
mongo-express:
image: quay.io/wandoubaba517/mongo-express:1.0.2
container_name: devkit_mongo-express
restart: always
networks:
- ${NETWORK_NAME}
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: hello
ME_CONFIG_MONGODB_ADMINPASSWORD: helloworld
ME_CONFIG_MONGODB_URL: mongodb://hello:helloworld@mongo:27017/
ME_CONFIG_BASICAUTH_USERNAME: hello
ME_CONFIG_BASICAUTH_PASSWORD: helloworld
ME_CONFIG_BASICAUTH: true
depends_on:
mongo:
condition: service_healthy
restart: true
# mongodb数据库
mongo:
image: quay.io/wandoubaba517/mongo:7.0
container_name: devkit_mongo
restart: always
environment:
TZ: Asia/Shanghai
MONGO_INITDB_ROOT_USERNAME: hello
MONGO_INITDB_ROOT_PASSWORD: helloworld
MONGO_INITDB_DATABASE: log
volumes:
- ./mongo/data/db:/data/db
- ./mongo/data/configdb:/data/configdb
- ./mongo/conf:/etc/mongo
networks:
- ${NETWORK_NAME}
command: ['mongod', '--config', '/etc/mongo/mongod.conf']
healthcheck:
test: ["CMD-SHELL", "mongosh -u hello -p helloworld --eval \"db.stats().ok\""]
interval: 20s
retries: 5
start_period: 15s
timeout: 10s
# redis缓存服务
redis:
image: quay.io/wandoubaba517/redis:7.2
container_name: devkit_redis
restart: always
volumes:
- ./redis/data:/data
- ./redis/conf:/etc/redis
networks:
- ${NETWORK_NAME}
command: ['redis-server', '/etc/redis/redis.conf']
healthcheck:
test: ["CMD", "redis-cli", "-a", "helloworld", "ping"]
interval: 15s
retries: 3
start_period: 15s
timeout: 10s
# minio对象存储服务
minio:
image: quay.io/minio/minio:RELEASE.2024-07-16T23-46-41Z
container_name: devkit_minio
restart: always
environment:
MINIO_ROOT_USER: hello
MINIO_ROOT_PASSWORD: helloworld
MINIO_VOLUMES: /mnt/data
networks:
- ${NETWORK_NAME}
volumes:
- ./minio/data:/mnt/data
- ./minio/config:/root/.minio/
command: server --console-address ':9001'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 15s
retries: 3
start_period: 60s
timeout: 10s
# rabbitmq消息队列服务
rabbitmq:
image: quay.io/wandoubaba517/rabbitmq:3.13.4
container_name: devkit_rabbitmq
restart: always
networks:
- ${NETWORK_NAME}
ports:
- 15672:15672
volumes:
- ./rabbitmq/data:/var/lib/rabbitmq/mnesia
- ./rabbitmq/conf/conf.d:/etc/rabbitmq/conf.d
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "check_running"]
interval: 30s
timeout: 10s
retries: 5
start_period: 5s
# es-setup、es、kibana是整套elasticsearch服务
# es-setup:
# image: quay.io/wandoubaba517/elasticsearch:${ES_STACK_VERSION}
# volumes:
# - ./elasticsearch/certs:/usr/share/elasticsearch/config/certs
# user: "0"
# networks:
# - ${NETWORK_NAME}
# command: >
# bash -c '
# if [ x${ELASTIC_PASSWORD} == x ]; then
# echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
# exit 1;
# elif [ x${KIBANA_PASSWORD} == x ]; then
# echo "Set the KIBANA_PASSWORD environment variable in the .env file";
# exit 1;
# fi;
# if [ ! -f config/certs/ca.zip ]; then
# echo "Creating CA";
# bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
# unzip config/certs/ca.zip -d config/certs;
# fi;
# if [ ! -f config/certs/certs.zip ]; then
# echo "Creating certs";
# echo -ne \
# "instances:\n"\
# " - name: es\n"\
# " dns:\n"\
# " - es\n"\
# " - localhost\n"\
# " ip:\n"\
# " - 127.0.0.1\n"\
# > config/certs/instances.yml;
# bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
# unzip config/certs/certs.zip -d config/certs;
# fi;
# echo "Setting file permissions"
# chown -R root:root config/certs;
# find . -type d -exec chmod 750 \{\} \;;
# find . -type f -exec chmod 640 \{\} \;;
# echo "Waiting for Elasticsearch availability";
# until curl -s --cacert config/certs/ca/ca.crt https://es:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
# echo "Setting kibana_system password";
# until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
# echo "All done!";
# '
# healthcheck:
# test: ["CMD-SHELL", "[ -f config/certs/es/es.crt ]"]
# interval: 1s
# timeout: 5s
# retries: 120
# es:
# depends_on:
# es-setup:
# condition: service_healthy
# image: quay.io/wandoubaba517/elasticsearch:${ES_STACK_VERSION}
# container_name: devkit_es
# restart: always
# volumes:
# - ./elasticsearch/certs:/usr/share/elasticsearch/config/certs
# - ./elasticsearch/esdata:/usr/share/elasticsearch/data
# - ./elasticsearch/eslogs:/usr/share/elasticsearch/logs
# - ./elasticsearch/plugins/ik:/usr/share/elasticsearch/plugins/ik
# ports:
# - ${ES_PORT}:9200
# networks:
# - ${NETWORK_NAME}
# environment:
# - node.name=es
# - cluster.name=${ES_CLUSTER_NAME}
# - cluster.initial_master_nodes=es
# - ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
# - bootstrap.memory_lock=true
# - xpack.security.enabled=true
# - xpack.security.http.ssl.enabled=true
# - xpack.security.http.ssl.key=certs/es/es.key
# - xpack.security.http.ssl.certificate=certs/es/es.crt
# - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
# - xpack.security.transport.ssl.enabled=true
# - xpack.security.transport.ssl.key=certs/es/es.key
# - xpack.security.transport.ssl.certificate=certs/es/es.crt
# - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
# - xpack.security.transport.ssl.verification_mode=certificate
# - xpack.license.self_generated.type=${ES_LICENSE}
# mem_limit: ${ES_MEM_LIMIT}
# ulimits:
# memlock:
# soft: -1
# hard: -1
# healthcheck:
# test:
# [
# "CMD-SHELL",
# "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
# ]
# interval: 120s
# timeout: 30s
# retries: 120
# start_period: 120s
# kibana:
# depends_on:
# es:
# condition: service_healthy
# image: quay.io/wandoubaba517/kibana:${ES_STACK_VERSION}
# container_name: devkit_kibana
# restart: always
# volumes:
# - ./elasticsearch/certs:/usr/share/kibana/config/certs
# - ./elasticsearch/kibanadata:/usr/share/kibana/data
# ports:
# - ${KIBANA_PORT}:5601
# networks:
# - ${NETWORK_NAME}
# environment:
# - SERVERNAME=kibana
# - ELASTICSEARCH_HOSTS=https://es:9200
# - ELASTICSEARCH_USERNAME=kibana_system
# - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
# - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
# - SERVER_BASEPATH="/kibana"
# mem_limit: ${ES_MEM_LIMIT}
# healthcheck:
# test:
# [
# "CMD-SHELL",
# "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
# ]
# interval: 10s
# timeout: 10s
# retries: 120
networks:
devkit:
name: ${NETWORK_NAME}
driver: bridge