Redis
docker run \
-d \
-p 127.0.0.1:6379:6379 \
--name=redis \
redis
docker-compose.yml
services:
redis:
image: redis
container_name: redis
ports:
- "127.0.0.1:6379:6379"
Nginx
mkdir -p ./html/
mkdir -p ./conf.d/
mkdir -p ./log/
cat > ./html/index.html << EOF
Hello World!
EOF
cat > ./conf.d/default.conf << EOF
server {
listen 80;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
EOF
docker run \
-d \
-p 127.0.0.1:8014:80 \
--name=nginx \
-v ./html/:/usr/share/nginx/html/ \
-v ./conf.d/:/etc/nginx/conf.d/ \
-v ./log/:/var/log/nginx/ \
nginx:alpine
docker-compose.yml
services:
nginx:
image: nginx:alpine
container_name: nginx
ports:
- "127.0.0.1:8014:80"
volumes:
- ./html:/usr/share/nginx/html/
- ./conf.d:/etc/nginx/conf.d/
- ./log:/var/log/nginx/
html/index.html
Hello World!
conf.d/default.conf
server {
listen 80;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
MySQL
docker network create mysql
mkdir -p ./data/
docker run \
-d \
--name=mysql \
-p 127.0.0.1:3306:3306 \
--net mysql \
-e MYSQL_ROOT_PASSWORD=Passw0rd \
-e MYSQL_DATABASE=mydb \
-e MYSQL_USER=luanrz \
-e MYSQL_PASSWORD=Passw0rd \
-v ./data/:/var/lib/mysql/ \
mysql
docker run \
-d \
--name phpmyadmin \
-p 127.0.0.1:8016:80 \
--net mysql \
-e PMA_HOST=mysql \
-e PMA_USER=root \
-e PMA_PASSWORD=Passw0rd \
phpmyadmin
docker-compose.yml
services:
mysql:
image: mysql
container_name: mysql
restart: unless-stopped
ports:
- "127.0.0.1:3306:3306"
environment:
- MYSQL_ROOT_PASSWORD=Passw0rd
- MYSQL_DATABASE=mydb
- MYSQL_USER=luanrz
- MYSQL_PASSWORD=Passw0rd
volumes:
- ./data/:/var/lib/mysql/
networks:
- mysql
phpmyadmin:
image: phpmyadmin
container_name: phpmyadmin
restart: unless-stopped
ports:
- "127.0.0.1:8016:80"
environment:
- PMA_HOST=mysql
- PMA_USER=root
- PMA_PASSWORD=Passw0rd
depends_on:
- mysql
networks:
- mysql
networks:
mysql:
name: mysql
driver: bridge
XXL-JOB
mkdir -p ./log/
docker run \
-d \
--name xxl-job-admin \
-p 127.0.0.1:8024:8080 \
--net mysql \
-e PARAMS="--spring.datasource.url=jdbc:mysql://mysql:3306/xxl_job?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.password=Passw0rd" \
-v ./log/:/data/applogs/xxl-job/ \
xuxueli/xxl-job-admin:2.4.0
docker-compose.yml
services:
xxl-job-admin:
image: xuxueli/xxl-job-admin:2.4.0
container_name: xxl-job-admin
restart: unless-stopped
ports:
- "127.0.0.1:8024:8080"
volumes:
- ./log/:/data/applogs/xxl-job/
environment:
PARAMS: >-
--spring.datasource.url=jdbc:mysql://mysql:3306/xxl_job?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai
--spring.datasource.password=Passw0rd
networks:
- mysql
networks:
mysql:
name: mysql
external: true
ELK
参见ElasticSearch官方文档、Kibana官方文档、LogStash官方文档、Kafka镜像仓库、Filebeat官方文档
# 初始化镜像与网络
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.10.2
docker pull docker.elastic.co/kibana/kibana:7.10.2
docker pull docker.elastic.co/logstash/logstash:7.10.2
docker pull apache/kafka:4.1.1
docker pull docker.elastic.co/beats/filebeat:7.10.2
docker network create elastic
# 初始化目录
mkdir -p elasticsearch/data logstash/config logstash/log filebeat/config filebeat/log
# 目录权限变更(elastic容器的uid:gid默认为1000:1000)
chown -R 1000:1000 elasticsearch/data
ElasticSearch
(1)启动ElasticSearch容器
docker run \
-d \
--name elasticsearch01 \
-p 127.0.0.1:9200:9200 \
--net elastic \
-e discovery.type=single-node \
-e xpack.security.enabled=true \
-e "ELASTIC_PASSWORD=Passw0rd" \
-v ./elasticsearch/data/:/usr/share/elasticsearch/data/ \
-m 1GB \
docker.elastic.co/elasticsearch/elasticsearch:7.10.2
(2)设置ES密码
可选,如果在这里设置了ES密码,上述
ELASTIC_PASSWORD=Passw0rd配置将会失效。执行下述命令需要配置xpack.security.enabled=true。
docker exec -it elasticsearch01 /usr/share/elasticsearch/bin/elasticsearch-setup-passwords auto
Kibana
(1)启动Kibana容器
docker run \
-d \
--name kibana01 \
-p 127.0.0.1:5601:5601 \
--net elastic \
-e ELASTICSEARCH_HOSTS=http://elasticsearch01:9200 \
-e ELASTICSEARCH_USERNAME=elastic \
-e ELASTICSEARCH_PASSWORD=Passw0rd \
-m 0.5GB \
docker.elastic.co/kibana/kibana:7.10.2
ELASTICSEARCH_PASSWORD从上一步ElasticSearch配置中获取。
LogStash
(1)创建logstash.yml配置文件
cat > ./logstash/config/logstash.yml << EOF
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch01:9200" ]
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "Passw0rd"
EOF
(2)创建logstash.conf配置文件
cat > ./logstash/config/logstash.conf << EOF
input {
file {
path => "/var/log/logstash/*.log"
start_position => "beginning"
codec => multiline {
pattern => "[#%&*^]"
negate => false
what => "next"
}
}
}
output {
# 输出ES
elasticsearch {
hosts => ["http://elasticsearch01:9200"]
index => "log-%{+YYYY.MM.dd}"
user => "elastic"
password => "Passw0rd"
}
# 输出到控制台
stdout {
codec => rubydebug
}
}
EOF
(3)启动LogStash容器
docker run \
-d \
--name logstash01 \
--net elastic \
-v ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml \
-v ./logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf \
-v ./logstash/log/:/var/log/logstash/ \
-m 1GB \
docker.elastic.co/logstash/logstash:7.10.2
(4)验证LogStash
echo -e "[#%&*^]666" >> ./logstash/log/1.log
docker exec -it logstash01 bash
curl -X GET "http://elasticsearch01:9200/_search/" \
-H 'Content-Type: application/json' \
-u 'elastic:Passw0rd' \
-d '{
"query": {
"match": {
"message": "666"
}
}
}'
Kafka
(1)启动Kafka容器
docker run \
-d \
--name kafka01 \
-p 127.0.0.1:9094:9094 \
--net elastic \
-e KAFKA_PROCESS_ROLES=broker,controller \
-e KAFKA_NODE_ID=1 \
-e KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka01:9093 \
-e KAFKA_LISTENERS=INTERNAL://:9092,CONTROLLER://:9093,EXTERNAL://:9094 \
-e KAFKA_ADVERTISED_LISTENERS=INTERNAL://kafka01:9092,EXTERNAL://localhost:9094 \
-e KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL \
-e KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER \
-e KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT \
-e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
-m 1GB \
apache/kafka:4.1.1
为了同时支持在容器内部和宿主机外部访问Kafka,可为
KAFKA_ADVERTISED_LISTENERS指定两个监听器:INTERNAL与EXTERNAL。一旦配置该参数,就不会使用默认配置(/opt/kafka/config/server.properties)了,上述其它-e参数都得加上,不然会启动报错。
单节点模式需配置
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR为1,不然会启动报错。
(2)验证Kafka
# 进入Kafka容器
docker exec --workdir /opt/kafka/bin/ -it kafka01 bash
# 创建Topic
./kafka-topics.sh --bootstrap-server localhost:9092 --create --topic elastic-topic
# 使用Producer
./kafka-console-producer.sh --bootstrap-server localhost:9092 --topic elastic-topic <<< "[#%&*^]666"
# 使用Consumer
./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic elastic-topic --from-beginning
(3)集成LogStash:将Kafka数据推送到LogStash
修改logstash.conf配置文件(参见LogStash)的input内容:
input {
kafka {
bootstrap_servers => "kafka01:9092"
topics => ["elastic-topic"]
group_id => "logstash-consumer-group"
decorate_events => true
codec => "json"
}
}
完整配置如下:
cat > ./logstash/config/logstash.conf << EOF
input {
kafka {
bootstrap_servers => "kafka01:9092"
topics => ["elastic-topic"]
group_id => "logstash-consumer-group"
decorate_events => true
codec => "json"
}
}
output {
# 输出ES
elasticsearch {
hosts => ["http://elasticsearch01:9200"]
index => "log-%{+YYYY.MM.dd}"
user => "elastic"
password => "Passw0rd"
}
# 输出到控制台
stdout {
codec => rubydebug
}
}
EOF
重启LogStash,使用kafka-producer生成日志(json格式),在Kibana中验证日志是否写入成功。
Filebeat
(1)创建filebeat.yml配置文件
cat > ./filebeat/config/filebeat.yml << EOF
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/filebeat/*.log
output.elasticsearch:
hosts: 'elasticsearch01:9200'
username: 'elastic'
password: 'Passw0rd'
EOF
(2)启动Filebeat容器
docker run \
-d \
--name filebeat01 \
--net elastic \
-v ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro \
-v ./filebeat/log/:/var/log/filebeat/:ro \
-m 0.1GB \
docker.elastic.co/beats/filebeat:7.10.2
(3)验证Filebeat
echo -e "[#%&*^]666" >> ./filebeat/log/1.log
docker exec -it filebeat01 bash
curl -X GET "http://elasticsearch01:9200/_search/" \
-H 'Content-Type: application/json' \
-u 'elastic:Passw0rd' \
-d '{
"query": {
"match": {
"message": "666"
}
}
}'
(4)集成Kafka:将Filebeat数据推送到Kafka
修改logstash.yml配置文件的output内容:
output.kafka:
hosts: ['kafka01:9092']
topic: "elastic-topic"
group_id => "elastic-consumer-group"
decorate_events => true
codec => "json"
另外,为了兼容日志含有换行符的情况,增加input的multiline配置:
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/filebeat/*.log
multiline:
pattern: '[#%&*^]'
negate: true
match: after
完整配置如下:
cat > ./filebeat/config/filebeat.yml << EOF
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/filebeat/*.log
multiline:
pattern: '[#%&*^]'
negate: true
match: after
output.kafka:
hosts: ['kafka01:9092']
topic: "elastic-topic"
EOF
重启Filebeat,往filebeat/log/下写入任意日志,在Kafka中验证日志是否写入成功。
最后,可启动对应的ELK容器,验证常用的ELK调用链路:
- LogStash -> ElasticSearch -> Kibana
- Filebeat -> ElasticSearch -> Kibana
- Filebeat -> Kafka -> LogStash -> ElasticSearch -> Kibana
ELK Compose
docker-compose.yml
services:
elasticsearch01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.2
container_name: elasticsearch01
ports:
- "127.0.0.1:9200:9200"
environment:
- discovery.type=single-node
- xpack.security.enabled=true
- ELASTIC_PASSWORD=Passw0rd
volumes:
- ./elasticsearch/data/:/usr/share/elasticsearch/data/
deploy:
resources:
limits:
memory: 1GB
networks:
- elastic
kibana01:
image: docker.elastic.co/kibana/kibana:7.10.2
container_name: kibana01
ports:
- "127.0.0.1:5601:5601"
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch01:9200
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD=Passw0rd
depends_on:
- elasticsearch01
deploy:
resources:
limits:
memory: 0.5GB
networks:
- elastic
logstash01:
image: docker.elastic.co/logstash/logstash:7.10.2
container_name: logstash01
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- ./logstash/log/:/var/log/logstash/
depends_on:
- elasticsearch01
- kafka01
deploy:
resources:
limits:
memory: 1GB
networks:
- elastic
kafka01:
image: apache/kafka:4.1.1
container_name: kafka01
ports:
- "127.0.0.1:9094:9094"
environment:
- KAFKA_PROCESS_ROLES=broker,controller
- KAFKA_NODE_ID=1
- KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka01:9093
- KAFKA_LISTENERS=INTERNAL://:9092,CONTROLLER://:9093,EXTERNAL://:9094
- KAFKA_ADVERTISED_LISTENERS=INTERNAL://kafka01:9092,EXTERNAL://localhost:9094
- KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL
- KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
depends_on:
- elasticsearch01
deploy:
resources:
limits:
memory: 1GB
networks:
- elastic
filebeat01:
image: docker.elastic.co/beats/filebeat:7.10.2
container_name: filebeat01
volumes:
- ./filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- ./filebeat/log/:/var/log/filebeat/:ro
depends_on:
- kafka01
deploy:
resources:
limits:
memory: 0.1GB
networks:
- elastic
networks:
elastic:
name: elastic
driver: bridge
filebeat/config/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/filebeat/*.log
multiline:
pattern: '[#%&*^]'
negate: true
match: after
output.kafka:
hosts: ['kafka01:9092']
topic: "elastic-topic"
logstash/config/logstash.yml
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch01:9200" ]
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "Passw0rd"
ogstash/config/logstash.conf
input {
kafka {
bootstrap_servers => "kafka01:9092"
topics => ["elastic-topic"]
group_id => "logstash-consumer-group"
decorate_events => true
codec => "json"
}
}
output {
# 输出ES
elasticsearch {
hosts => ["http://elasticsearch01:9200"]
index => "log-%{+YYYY.MM.dd}"
user => "elastic"
password => "Passw0rd"
}
# 输出到控制台
stdout {
codec => rubydebug
}
}
Windows
参见官方Github页面与这篇文章 。
下载windows镜像文件(如:Windows 10 LTSC),重命名为boot.iso,放到当前目录。
docker run \
-it \
--name windows \
-p 127.0.0.1:8006:8006 \
-p 127.0.0.1:3389:3389/tcp \
-p 127.0.0.1:3389:3389/udp \
-e VERSION="./boot.iso" \
-e RAM_SIZE="2G" \
-e CPU_CORES="2" \
-e DISK_SIZE="32G" \
-e USERNAME="luanrz" \
-e PASSWORD="Passw0rd" \
-v ./boot.iso:/boot.iso \
-v ./storage/:/storage/ \
-v ./data/:/data/ \
--device=/dev/kvm \
--device=/dev/net/tun \
--cap-add NET_ADMIN \
--stop-timeout 120 \
dockurr/windows
在浏览器中访问http://127.0.0.1:8006,执行windows安装流程。安装完成后开启远程桌面访问,后续可通过Remmina等支持RDP协议的工具连接3389端口访问远程桌面。
docker-compose.yml
services:
windows:
image: dockurr/windows
container_name: windows
environment:
VERSION: "./boot.iso"
RAM_SIZE: "2G"
CPU_CORES: "2"
DISK_SIZE: "32G"
USERNAME: "luanrz"
PASSWORD: "Passw0rd"
ports:
- 127.0.0.1:8006:8006
- 127.0.0.1:3389:3389/tcp
- 127.0.0.1:3389:3389/udp
volumes:
- ./boot.iso:/boot.iso
- ./storage/:/storage/
- ./data/:/data/
devices:
- /dev/kvm
- /dev/net/tun
cap_add:
- NET_ADMIN
restart: on-failure
stop_grace_period: 2m