Browse Source

2.2.2.RELEASE

test
smallchill 5 years ago
parent
commit
56f0dddc75
  1. 0
      script/docker/app/.env
  2. 15
      script/docker/app/deploy.sh
  3. 64
      script/docker/app/docker-compose.yml
  4. 0
      script/docker/app/nacos/init.d/custom.properties
  5. 4
      script/docker/app/nginx/api/nginx.conf
  6. 0
      script/docker/app/nginx/web/html/index.html
  7. 0
      script/docker/app/nginx/web/nginx.conf
  8. 27
      script/docker/elk/README.md
  9. 88
      script/docker/elk/deploy.sh
  10. 115
      script/docker/elk/docker-compose.yml
  11. 28
      script/docker/elk/es-master.yml
  12. 28
      script/docker/elk/es-slave1.yml
  13. 28
      script/docker/elk/es-slave2.yml
  14. 37
      script/docker/elk/filebeat.yml
  15. 8
      script/docker/elk/kibana.yml
  16. 23
      script/docker/elk/logstash-filebeat.conf
  17. 8
      script/docker/elk/logstash.yml
  18. 16
      script/docker/elk/undeploy.sh
  19. 0
      script/fatjar/service.cmd
  20. 0
      script/fatjar/service.sh

0
script/docker/.env → script/docker/app/.env

15
script/docker/deploy.sh → script/docker/app/deploy.sh

@ -14,6 +14,9 @@ port(){
firewall-cmd --add-port=3306/tcp --permanent
firewall-cmd --add-port=3379/tcp --permanent
firewall-cmd --add-port=7002/tcp --permanent
firewall-cmd --add-port=7003/tcp --permanent
firewall-cmd --add-port=9411/tcp --permanent
firewall-cmd --add-port=9999/tcp --permanent
service firewalld restart
}
@ -39,14 +42,14 @@ base(){
docker-compose up -d nacos sentinel web-nginx blade-nginx blade-redis
}
#启动程序模块
modules(){
docker-compose up -d blade-gateway1 blade-gateway2 blade-auth1 blade-auth2 blade-user blade-desk blade-system blade-log
}
#启动监控模块
monitor(){
docker-compose up -d blade-admin
docker-compose up -d blade-admin blade-turbine blade-zipkin
}
#启动程序模块
modules(){
docker-compose up -d blade-gateway1 blade-gateway2 blade-auth1 blade-auth2 blade-user blade-desk blade-system blade-log blade-flow blade-flow-design blade-resource
}
#关闭所有模块

64
script/docker/docker-compose.yml → script/docker/app/docker-compose.yml

@ -1,10 +1,11 @@
version: '3'
services:
nacos:
image: nacos/nacos-server:1.0.0
image: nacos/nacos-server:1.1.3
hostname: "nacos-standalone"
environment:
- MODE=standalone
- TZ=Asia/Shanghai
volumes:
- /docker/nacos/standalone-logs/:/home/nacos/logs
- /docker/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
@ -17,6 +18,8 @@ services:
sentinel:
image: bladex/sentinel-dashboard:1.6.0
hostname: "sentinel"
environment:
- TZ=Asia/Shanghai
ports:
- 8858:8858
restart: on-failure
@ -27,6 +30,8 @@ services:
blade-nginx:
image: nginx:stable-alpine-perl
hostname: "blade-nginx"
environment:
- TZ=Asia/Shanghai
ports:
- 88:88
volumes:
@ -39,6 +44,8 @@ services:
web-nginx:
image: nginx:stable-alpine-perl
hostname: "web-nginx"
environment:
- TZ=Asia/Shanghai
ports:
- 8000:8000
volumes:
@ -52,6 +59,8 @@ services:
blade-redis:
image: redis:5.0.2-alpine
hostname: "blade-redis"
environment:
- TZ=Asia/Shanghai
ports:
- 3379:6379
volumes:
@ -64,6 +73,8 @@ services:
blade-admin:
image: "${REGISTER}/blade/blade-admin:${TAG}"
environment:
- TZ=Asia/Shanghai
ports:
- 7002:7002
privileged: true
@ -71,8 +82,32 @@ services:
networks:
- blade_net
blade-turbine:
image: "${REGISTER}/blade/blade-turbine:${TAG}"
environment:
- TZ=Asia/Shanghai
ports:
- 7003:7003
privileged: true
restart: always
networks:
- blade_net
blade-zipkin:
image: "${REGISTER}/blade/blade-zipkin:${TAG}"
environment:
- TZ=Asia/Shanghai
ports:
- 9411:9411
privileged: true
restart: always
networks:
- blade_net
blade-gateway1:
image: "${REGISTER}/blade/blade-gateway:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -81,6 +116,8 @@ services:
blade-gateway2:
image: "${REGISTER}/blade/blade-gateway:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -89,6 +126,8 @@ services:
blade-auth1:
image: "${REGISTER}/blade/blade-auth:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -97,6 +136,8 @@ services:
blade-auth2:
image: "${REGISTER}/blade/blade-auth:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -105,6 +146,8 @@ services:
blade-log:
image: "${REGISTER}/blade/blade-log:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -112,6 +155,8 @@ services:
blade-desk:
image: "${REGISTER}/blade/blade-desk:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -119,6 +164,8 @@ services:
blade-user:
image: "${REGISTER}/blade/blade-user:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -126,6 +173,8 @@ services:
blade-system:
image: "${REGISTER}/blade/blade-system:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -133,6 +182,8 @@ services:
blade-flow:
image: "${REGISTER}/blade/blade-flow:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
@ -140,6 +191,8 @@ services:
blade-flow-design:
image: "${REGISTER}/blade/blade-flow-design:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
ports:
@ -147,6 +200,15 @@ services:
networks:
- blade_net
blade-resource:
image: "${REGISTER}/blade/blade-resource:${TAG}"
environment:
- TZ=Asia/Shanghai
privileged: true
restart: always
networks:
- blade_net
networks:
blade_net:
driver: bridge

0
script/docker/nacos/init.d/custom.properties → script/docker/app/nacos/init.d/custom.properties

4
script/docker/nginx/api/nginx.conf → script/docker/app/nginx/api/nginx.conf

@ -36,8 +36,8 @@ http {
}
upstream auth {
server 172.30.0.91;
server 172.30.0.92;
server 172.30.0.91:8100;
server 172.30.0.92:8100;
}
server {

0
script/docker/nginx/web/html/index.html → script/docker/app/nginx/web/html/index.html

0
script/docker/nginx/web/nginx.conf → script/docker/app/nginx/web/nginx.conf

27
script/docker/elk/README.md

@ -0,0 +1,27 @@
## 一、调整内存:max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144](elasticsearch用户拥有的内存权限太小,至少需要262144)
#### 1.修改配置sysctl.conf
[root@localhost ~]# vi /etc/sysctl.conf
#### 2.添加下面配置:
vm.max_map_count=262144
#### 3.重新加载:
[root@localhost ~]# sysctl -p
#### 4.最后重新启动elasticsearch,即可启动成功。
## 二、Docker 命令自动补全
#### 1.安装依赖工具bash-complete
[root@localhost ~]# yum install -y bash-completion
[root@localhost ~]# source /usr/share/bash-completion/completions/docker
[root@localhost ~]# source /usr/share/bash-completion/bash_completion
## 三、将本文件夹内的文件拷贝至服务器
#### 1.对sh脚本赋予执行权限
#### 2.执行 ./deploy.sh
#### 3.等待服务启动完毕即可
#### 4.卸载执行 ./undeploy.sh

88
script/docker/elk/deploy.sh

@ -0,0 +1,88 @@
#./bin/bash
# 定义颜色
BLUE_COLOR="\033[36m"
RED_COLOR="\033[31m"
GREEN_COLOR="\033[32m"
VIOLET_COLOR="\033[35m"
RES="\033[0m"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"
echo -e "${BLUE_COLOR}# Docker ELK Deploy Script #${RES}"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"
# 创建目录
echo -e "${BLUE_COLOR}---> create [elasticsearch]directory start.${RES}"
if [ ! -d "./elasticsearch/" ]; then
mkdir -p ./elasticsearch/master/conf ./elasticsearch/master/data ./elasticsearch/master/logs \
./elasticsearch/slave1/conf ./elasticsearch/slave1/data ./elasticsearch/slave1/logs \
./elasticsearch/slave2/conf ./elasticsearch/slave2/data ./elasticsearch/slave2/logs
fi
echo -e "${RED_COLOR}---> create [kibana]directory start.${RES}"
if [ ! -d "./kibana/" ]; then
mkdir -p ./kibana/conf ./kibana/logs
fi
echo -e "${GREEN_COLOR}---> create [logstash]directory start.${RES}"
if [ ! -d "./logstash/" ]; then
mkdir -p ./logstash/conf ./logstash/logs
fi
echo -e "${GREEN_COLOR}---> create [filebeat]directory start.${RES}"
if [ ! -d "./filebeat/" ]; then
mkdir -p ./filebeat/conf ./filebeat/logs ./filebeat/data
fi
echo -e "${VIOLET_COLOR}---> create [nginx]directory start.${RES}"
if [ ! -d "./nginx/" ]; then
mkdir -p ./nginx/conf ./nginx/logs ./nginx/www
fi
echo -e "${BLUE_COLOR}===> create directory success.${RES}"
# 目录授权(data/logs 都要授读/写权限)
echo -e "${BLUE_COLOR}---> directory authorize start.${RES}"
if [ -d "./elasticsearch/" ]; then
chmod 777 ./elasticsearch/master/data/ ./elasticsearch/master/logs/ \
./elasticsearch/slave1/data/ ./elasticsearch/slave1/logs/ \
./elasticsearch/slave2/data/ ./elasticsearch/slave2/logs
fi
if [ -d "./filebeat/" ]; then
chmod 777 ./filebeat/data/ ./filebeat/logs/
fi
echo -e "${BLUE_COLOR}===> directory authorize success.${RES}"
# 移动配置文件
echo -e "${BLUE_COLOR}---> move [elasticsearch]config file start.${RES}"
if [ -f "./es-master.yml" ] && [ -f "./es-slave1.yml" ] && [ -f "./es-slave2.yml" ]; then
mv ./es-master.yml ./elasticsearch/master/conf
mv ./es-slave1.yml ./elasticsearch/slave1/conf
mv ./es-slave2.yml ./elasticsearch/slave2/conf
fi
echo -e "${RED_COLOR}---> move [kibana]config file start.${RES}"
if [ -f "./kibana.yml" ]; then
mv ./kibana.yml ./kibana/conf
fi
echo -e "${GREEN_COLOR}---> move [logstash]config file start.${RES}"
if [ -f "./logstash.yml" ] && [ -f "./logstash-filebeat.conf" ]; then
mv ./logstash-filebeat.conf ./logstash/conf
mv ./logstash.yml ./logstash/conf
fi
echo -e "${GREEN_COLOR}---> move [filebeat]config file start.${RES}"
if [ -f "./filebeat.yml" ]; then
mv ./filebeat.yml ./filebeat/conf
fi
echo -e "${VIOLET_COLOR}---> move [nginx]config file start.${RES}"
if [ -f "./nginx.conf" ]; then
mv ./nginx.conf ./nginx/conf
fi
echo -e "${BLUE_COLOR}===> move config files success.${RES}"
echo -e "${GREEN_COLOR}>>>>>>>>>>>>>>>>>> The End <<<<<<<<<<<<<<<<<<${RES}"
# 部署项目
echo -e "${BLUE_COLOR}==================> Docker deploy Start <==================${RES}"
docker-compose up --build -d

115
script/docker/elk/docker-compose.yml

@ -0,0 +1,115 @@
version: "3"
services:
es-master:
container_name: es-master
hostname: es-master
image: elasticsearch:7.1.1
restart: always
ports:
- 9200:9200
- 9300:9300
volumes:
- ./elasticsearch/master/conf/es-master.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/master/data:/usr/share/elasticsearch/data
- ./elasticsearch/master/logs:/usr/share/elasticsearch/logs
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
es-slave1:
container_name: es-slave1
image: elasticsearch:7.1.1
restart: always
ports:
- 9201:9200
- 9301:9300
volumes:
- ./elasticsearch/slave1/conf/es-slave1.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/slave1/data:/usr/share/elasticsearch/data
- ./elasticsearch/slave1/logs:/usr/share/elasticsearch/logs
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
es-slave2:
container_name: es-slave2
image: elasticsearch:7.1.1
restart: always
ports:
- 9202:9200
- 9302:9300
volumes:
- ./elasticsearch/slave2/conf/es-slave2.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/slave2/data:/usr/share/elasticsearch/data
- ./elasticsearch/slave2/logs:/usr/share/elasticsearch/logs
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
es-head:
container_name: es-head
image: mobz/elasticsearch-head:5
restart: always
ports:
- 9100:9100
depends_on:
- es-master
- es-slave1
- es-slave2
kibana:
container_name: kibana
hostname: kibana
image: kibana:7.1.1
restart: always
ports:
- 5601:5601
volumes:
- ./kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml
environment:
- elasticsearch.hosts=http://es-master:9200
depends_on:
- es-master
- es-slave1
- es-slave2
filebeat:
# 容器名称
container_name: filebeat
# 主机名称
hostname: filebeat
# 镜像
image: docker.elastic.co/beats/filebeat:7.1.1
# 重启机制
restart: always
# 持久化挂载
volumes:
- ./filebeat/conf/filebeat.yml:/usr/share/filebeat/filebeat.yml
# 映射到容器中[作为数据源]
- ./logs:/home/project/elk/logs
- ./filebeat/logs:/usr/share/filebeat/logs
- ./filebeat/data:/usr/share/filebeat/data
# 将指定容器连接到当前连接,可以设置别名,避免ip方式导致的容器重启动态改变的无法连接情况
links:
- logstash
ports:
- 9000:9000
# 依赖服务[可无]
depends_on:
- es-master
- es-slave1
- es-slave2
logstash:
container_name: logstash
hostname: logstash
image: logstash:7.1.1
command: logstash -f ./conf/logstash-filebeat.conf
restart: always
volumes:
# 映射到容器中
- ./logstash/conf/logstash-filebeat.conf:/usr/share/logstash/conf/logstash-filebeat.conf
- ./logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml
ports:
- 5044:5044
depends_on:
- es-master
- es-slave1
- es-slave2

28
script/docker/elk/es-master.yml

@ -0,0 +1,28 @@
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-master
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: false
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9200
# 设置节点间交互的tcp端口
transport.port: 9300
# 集群发现
discovery.seed_hosts:
- es-master
- es-slave1
- es-slave2
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
- es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
# 安全认证
xpack.security.enabled: false
#http.cors.allow-headers: "Authorization"

28
script/docker/elk/es-slave1.yml

@ -0,0 +1,28 @@
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-slave1
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: true
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9201
# 设置节点间交互的tcp端口
#transport.port: 9301
# 集群发现
discovery.seed_hosts:
- es-master
- es-slave1
- es-slave2
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
- es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
# 安全认证
xpack.security.enabled: false
#http.cors.allow-headers: "Authorization"

28
script/docker/elk/es-slave2.yml

@ -0,0 +1,28 @@
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-slave2
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: true
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9202
# 设置节点间交互的tcp端口
#transport.port: 9302
# 集群发现
discovery.seed_hosts:
- es-master
- es-slave1
- es-slave2
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
- es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
# 安全认证
xpack.security.enabled: false
#http.cors.allow-headers: "Authorization"

37
script/docker/elk/filebeat.yml

@ -0,0 +1,37 @@
filebeat.inputs:
- type: log
enabled: true
paths:
# 当前目录下的所有.log文件
- /home/project/elk/logs/*.log
multiline.pattern: ^\[
multiline.negate: true
multiline.match: after
- type: tcp
enabled: true
max_message_size: 10MiB
host: "0.0.0.0:9000"
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.dashboards.enabled: false
setup.kibana:
host: "http://kibana:5601"
# 不直接传输至ES
#output.elasticsearch:
# hosts: ["http://es-master:9200"]
# index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}"
output.logstash:
hosts: ["logstash:5044"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~

8
script/docker/elk/kibana.yml

@ -0,0 +1,8 @@
# 服务端口
server.port: 5601
# 服务IP
server.host: "0.0.0.0"
# ES
elasticsearch.hosts: ["http://es-master:9200"]
# 汉化
i18n.locale: "zh-CN"

23
script/docker/elk/logstash-filebeat.conf

@ -0,0 +1,23 @@
input {
# 来源beats
beats {
# 端口
port => "5044"
}
}
# 分析、过滤插件,可以多个
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}"}
}
geoip {
source => "clientip"
}
}
output {
# 选择elasticsearch
elasticsearch {
hosts => ["http://es-master:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}

8
script/docker/elk/logstash.yml

@ -0,0 +1,8 @@
# 服务IP
http.host: "0.0.0.0"
# ES
xpack.monitoring.elasticsearch.hosts: [ "http://es-master:9200" ]
xpack.monitoring.enabled: true
xpack.management.enabled: false

16
script/docker/elk/undeploy.sh

@ -0,0 +1,16 @@
#./bin/bash
# 定义颜色
BLUE_COLOR="\033[36m"
RED_COLOR="\033[31m"
GREEN_COLOR="\033[32m"
VIOLET_COLOR="\033[35m"
RES="\033[0m"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"
echo -e "${BLUE_COLOR}# Docker ELK UnDeploy Script #${RES}"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"
# 部署项目
echo -e "${BLUE_COLOR}==================> Docker UnDeploy Start <==================${RES}"
docker-compose stop
docker-compose rm

0
script/service.cmd → script/fatjar/service.cmd

0
script/service.sh → script/fatjar/service.sh

Loading…
Cancel
Save