日志分析平台ELK 7.0时代来了

2021-11-15 17:43:43 浏览数 (1)

集群架构图

1. Elasticsearch集群部署

1.1 初始化java组件

代码语言:javascript复制
# 安装jdk
root@es-logs-20-114:/usr/local# ls -l | grep jdk
lrwxrwxrwx  1 root root   22 Apr  1  2017 jdk -> /usr/local/jdk1.8.0_65
drwxr-xr-x  8 root root 4096 Nov 25  2015 jdk1.8.0_65

# 配置java环境变量
export JAVA_HOME=/usr/local/jdk
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

# 优化系统设置
root@es-logs-20-114:/usr/local# cat /etc/sysctl.conf |grep vm.max
vm.max_map_count = 262144

# 打开文件数限制
root@es-logs-20-114:/usr/local# cat /etc/security/limits.conf | egrep -v '^#'
root soft nofile 65536
root hard nofile 65536
* soft nofile 65536
* hard nofile 65536
suoper soft memlock unlimited
suoper hard memlock unlimited

1.2 准备elasticsearch相关配置文件

代码语言:javascript复制
# 准备数据目录以及授权
chown -R suoper.suoper /data/es
chown -R suoper.suoper /data/es

root@es-logs-20-114:/apps# ls -l |grep 7.0
drwxr-xr-x 10 suoper suoper      4096 Jun 21 11:51 elasticsearch-7.0.0

# node1配置文件 /apps/elasticsearch-7.0.0/config/elasticsearch.yml
cluster.name: log-cluster
node.name: es-logs-node-114
path.data: /data/es/data
path.logs: /data/es/logs
bootstrap.memory_lock: true
network.host: 192.168.20.114
http.port: 9200
discovery.zen.minimum_master_nodes: 2
discovery.zen.fd.ping_timeout: 1000s
discovery.zen.fd.ping_retries: 10
discovery.seed_hosts: ["192.168.20.112", "192.168.20.113","192.168.20.114"]
cluster.initial_master_nodes: ["192.168.20.112", "192.168.20.113","192.168.20.114"]
http.cors.enabled: true
http.cors.allow-origin: "*"

# node2配置文件 /apps/elasticsearch-7.0.0/config/elasticsearch.yml
cluster.name: log-cluster
node.name: es-logs-node-114
path.data: /data/es/data
path.logs: /data/es/logs
bootstrap.memory_lock: true
network.host: 192.168.20.113
http.port: 9200
discovery.zen.minimum_master_nodes: 2
discovery.zen.fd.ping_timeout: 1000s
discovery.zen.fd.ping_retries: 10
discovery.seed_hosts: ["192.168.20.112", "192.168.20.113","192.168.20.114"]
cluster.initial_master_nodes: ["192.168.20.112", "192.168.20.113","192.168.20.114"]
http.cors.enabled: true
http.cors.allow-origin: "*"

# node3配置文件 /apps/elasticsearch-7.0.0/config/elasticsearch.yml
cluster.name: log-cluster
node.name: es-logs-node-114
path.data: /data/es/data
path.logs: /data/es/logs
bootstrap.memory_lock: true
network.host: 192.168.20.112
http.port: 9200
discovery.zen.minimum_master_nodes: 2
discovery.zen.fd.ping_timeout: 1000s
discovery.zen.fd.ping_retries: 10
discovery.seed_hosts: ["192.168.20.112", "192.168.20.113","192.168.20.114"]
cluster.initial_master_nodes: ["192.168.20.112", "192.168.20.113","192.168.20.114"]
http.cors.enabled: true
http.cors.allow-origin: "*"

# 启动elasticsearch
su suoper
/apps/elasticsearch-7.0.0/bin/elasticsearch -d

# 验证服务运行是否正常以及集群创建索引验证
root@es-logs-20-114:~# curl http://192.168.20.114:9200/_cat/nodes
192.168.20.114  8 96 0 0.01 0.01 0.00 mdi * es-logs-node-114
192.168.20.113 63 94 3 0.05 0.13 0.20 mdi - es-logs-node-113
192.168.20.112 62 93 3 0.14 0.18 0.23 mdi - es-logs-node-112
root@es-logs-20-114:~# curl http://192.168.20.114:9200/_cat/indices?v
health status index                                uuid                   pri rep docs.count docs.deleted store.size pri.store.size
green  open   twitter                              hVI1v3MXRUuj1qMMBi7A_A   3   2          0            0      2.4kb           849b
green  open   .kibana_task_manager                 phs7DstZRHyFv_hMM9gGPQ   1   1          2            0       75kb         45.5kb
green  open   logstash-nginx-access-log-2019.06.21 E07-ekY3Ri2ILbXXfy4WBg   1   1    8178718            0      7.8gb          3.9gb
green  open   .kibana_1                            8zivlf8nT3KPkwe3EYdqSg   1   1         18           10    105.4kb         53.2kb

注意:

  • elasticsearch旧版本中的discovery.zen.ping.unicast.hosts配置项在elasticsearch7.0版本中变更为discovery.seed_hosts

2. Logstash

两台一样的配置

docker-compose运行logstash

代码语言:javascript复制
version: "3"
services:
  logstash:
    image: logstash:7.0.0
    labels:
      common: input nginx_access_log kafka topic and output to es
    logging:
      driver: "journald"
    network_mode: "bridge"
    ports:
      - "9600:9600"
    volumes:
      - "/dockerProjects/logstash_access/config:/usr/share/logstash/config"
      - "/dockerProjects/logstash_access/pipeline_7.0:/usr/share/logstash/pipeline"
    restart: always
    hostname: docker_logstash_147
    container_name: logstash_access_147

/dockerProjects/logstash_access/config目录是从logstash:7.0.0镜像中拷贝出来的,修改的文件有config/logstash.yml

代码语言:javascript复制
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.20.114:9200","http://192.168.20.113:9200","http://192.168.20.114:9200"]

Pipeline_7.0目录下有一个文件logstash_access.conf

代码语言:javascript复制
input {
    kafka{
	bootstrap_servers => "192.168.20.105:9092,192.168.20.106:9092,192.168.20.107:9092"
        group_id => "logstash_new"
        topics => ["nginx_access_log"]
        codec => "json"
        consumer_threads => 4
    }
}

filter {
    mutate {
       gsub => ["message", "\x", "\x"]
   }
    json {
       source => "message"
       remove_field => "message"
    }
}

output {
   stdout {codec => json}
   elasticsearch {
        hosts => ["192.168.20.112:9200","192.168.20.113:9200","192.168.20.114:9200"]
        index => "logstash-nginx-access-log-%{ YYYY.MM.dd}"
   }
}

启动logstash

代码语言:javascript复制
cd /dockerProjects/logstash_access
docker-compose up -d
root@logstash-20-147:/dockerProjects/logstash_access# docker-compose ps
       Name                      Command               State                Ports
-----------------------------------------------------------------------------------------------
logstash_access_147   /usr/local/bin/docker-entr ...   Up      5044/tcp, 0.0.0.0:9600->9600/tcp

3. filebeat

配置filebeat的配置文件

代码语言:javascript复制
filebeat.inputs:
- input_type: log
  paths:
    - /logs/nginx/access.log
    - /logs/nginx/tech.log
  input_type: log
  document_type: nginx-log
output.kafka:
      hosts: ["192.168.20.105:9092","192.168.20.106:9092","192.168.20.107:9092"]
      topic: nginx_access_log
      version: "0.8.2.0"
      partition.round_robin:
         reachable_only: false
      required_acks: 1
      compression: gzip
      max_message_bytes: 10000

注意

  • output.kafka.version配置段,如果filebeat不能成功的输出消息到kafka,请修改相对应的kafka插件的版本,本实验使用到的kafka集群的版本为kafka_2.11-0.10.1.1,参考文件:https://blog.csdn.net/pml18710973036/article/details/86600647
  • 在filebeat 5.2.2 中使用的是filebeat.prospectors,在filebeat7.0中使用的是filebeat.inputs.

配置systemd管理filebeat文件

代码语言:javascript复制
[Unit]
Description=filebeat
Documentation=https://www.elastic.co/guide/en/beats/filebeat/current/index.html
Wants=network-online.target
After=network-online.target

[Service]
ExecStart=/apps/filebeat/filebeat -c /apps/filebeat/filebeat.yml -path.home /apps/filebeat -path.config /apps/filebeat/ -path.data /var/lib/filebeat -path.logs /var/log/filebeat
Restart=always

[Install]
WantedBy=multi-user.target

启动filebeat服务

代码语言:javascript复制
systemctl start filebeat

4. kibana

配置文件

代码语言:javascript复制
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://192.168.20.114:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true

docker-compose file

代码语言:javascript复制
version: "3"
services:
  kibana:
    image: kibana:7.0.0
    labels:
      common: elk kibana
    logging:
      driver: "journald"
    network_mode: "bridge"
    volumes:
      - "/dockerProjects/kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml"
    environment:
      TZ: "Asia/Shanghai"
    ports:
      - "5601:5601"
    restart: always
    hostname: kibana-20-150
    container_name: kibana
    ulimits:
      nproc: 65535
      nofile:
        soft: 20000
        hard: 40000

0 人点赞