0. java
代码语言:txt复制# 确认java版本1.8以上
java -version
# 安装
yum -y install java-1.8.0-openjdk
1. elasticsearch
代码语言:txt复制# 下载包
wget https://repo.huaweicloud.com/elasticsearch/7.7.1/elasticsearch-7.7.1-linux-x86_64.tar.gz
# 解压到 /data/soft 目录
tar -xzvf elasticsearch-7.7.1-linux-x86_64.tar.gz -C /data/soft/
# 创建专用用户和属组
groupadd es
useradd es -g es
# 用户权限
cd /data
mkdir -p soft/es/es_data
mkdir -p soft/es/es_logs
chown -R es:es soft/
# 修改es配置
vim /data/soft/elasticsearch-7.7.1/config/elasticsearch.yml
---
cluster.name: elk_es
node.name: elk_es_node_1
path.data: /data/soft/es/es_data
path.logs: /data/soft/es/es_logs
network.host: ["127.0.0.1", "公网ip"]
network.publish_host: 127.0.0.1
http.port: 9200
cluster.initial_master_nodes: ["elk_es_node_1"] #必须与node.name一致
---
# 启动es. 可加守护进程参数 -d
cd /data/soft/elasticsearch-7.7.1/
./bin/elasticsearch
解决报错
① max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535]
解释:elasticsearch用户拥有的可创建文件描述的权限太低,至少需要65536;
代码语言:txt复制# 查看当前用户的软限制
ulimit -Sn
# 查看当前用户的硬限制
ulimit -Hn
# vim /etc/security/limits.conf 在最后面追加下面内容
es hard nofile 65536
es soft nofile 65536
② max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
解释:vm.max_map_count的值太小了。需要修改到262144
代码语言:txt复制# 修改sysctl.conf
vim etc/sysctl.conf
# 添加内容
vm.max_map_count = 262144
# 修改生效
/sbin/sysctl -p
2. kibana
代码语言:txt复制# 下载包
wget https://repo.huaweicloud.com/kibana/7.7.1/kibana-7.7.1-linux-x86_64.tar.gz
# 解压
tar -xzvf kibana-7.7.1-linux-x86_64.tar.gz -C /data/soft/
# 修改配置
vim /data/soft/kibana-7.7.1-linux-x86_64/config/kibana.yml
---
server.port: 5601
server.host: "127.0.0.1"
server.name: "kibana_elk"
elasticsearch.hosts: ["http://127.0.0.1:9200"]
---
# 后台启动
nohup /data/soft/kibana-7.7.1-linux-x86_64/bin/kibana >> /data/soft/kibana-7.7.1-linux-x86_64/nohup.out 2>&1 &
# 此时应该能通过 IP:9200 访问到kibana界面,注意防火墙端口配置
2.1 配置 kibana 域名访问
代码语言:txt复制# vim /etc/nginx/conf.d/elk.conf
---
server {
listen 80;
server_name elk.domain.com;
access_log /data/logs/kibana_access.log main;
error_log /data/logs/kibana_error.log error;
location / {
auth_basic "elk auth";
auth_basic_user_file /etc/nginx/htpasswd;
proxy_pass http://127.0.0.1:5601;
}
}
---
nginx -t
nginx -s reload
2.2 访问 kibana 配置索引模板 Index Templates
Index pattern: project_logs-*
3. logstash
代码语言:txt复制# 下载包
wget https://repo.huaweicloud.com/logstash/7.7.1/logstash-7.7.1.tar.gz
# 解压
tar -xzvf logstash-7.7.1.tar.gz -C /data/soft
# 编写配置
cd /data/soft/logstash-7.7.1/config/elk_log.conf
---
input {
redis {
data_type => "list"
host => "redis_server_ip"
db => "0"
port => "6379"
password => "redis_pwd"
key => "redis_list_key"
}
}
filter {
ruby {
code => "
log_info=event.get('log')
if log_info and log_info['file']
file_path = log_info['file']['path']
dir_path=File::dirname(file_path)
dir_path=dir_path.delete_prefix('/data')
f_abs_name=dir_path.concat('/')
f_abs_name=f_abs_name.concat(event.get('host')['name'])
f_abs_name=f_abs_name.concat('.')
file_name=File::basename(file_path)
if file_name.length > 200
file_name='file_name_too_long.log'
end
f_abs_name=f_abs_name.concat(file_name)
if event.get('@metadata') != nil
metadata_hash=event.get('@metadata')
else
metadata_hash=Hash.new
end
metadata_hash['f_name'] = File::basename(file_path)
metadata_hash['f_abs_name'] = f_abs_name
dir_arr=file_path.split('/')
if dir_arr.at(2) == 'logs'
metadata_hash['log_type'] = dir_arr.at(3)
end
event.set('@metadata', metadata_hash)
end
"
}
}
output {
# 日志同步到es
if [@metadata][log_type] == "project_logs"{
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "project_logs-%{ YYYY.MM.dd}"
}
}
}
---
# 后台启动
nohup /data/soft/logstash-7.7.1/bin/logstash -f /data/soft/logstash-7.7.1/config/elk_log.conf >> /data/soft/logstash-7.7.1/nohup.out 2>&1 &
# 此时 logstash 会去redis里面取待同步的数据
4. filebeat
代码语言:txt复制在待同步日志的服务器上安装
# 下载包
wget https://repo.huaweicloud.com/filebeat/7.7.1/filebeat-7.7.1-linux-x86_64.tar.gz
# 解压
tar -xzvf filebeat-7.7.1-linux-x86_64.tar.gz -C /data/soft/
# 编辑配置
vim /data/soft/filebeat-7.7.1-linux-x86_64/elk_log.yml
---
filebeat.inputs:
- type: log
enabled: true
paths:
- /data/logs/project_logs*/*.log
encoding: utf-8
json.keys_under_root: true
json.overwrite_keys: true
output.redis:
enabled: true
hosts: ["redis_server_ip:6379"]
key: redis_list_key
password: redis_pwd
db: 0
worker: 2
loadbalance: false
timeout: 10s
max_retries: 3
bulk_max_size: 2048
---
# 启动
/data/soft/filebeat-7.7.1-linux-x86_64/filebeat -c elk_log.yml
# 此时filebeat会监听文件变更,并将信息输出到redis,以通过logstash同步到elasticsearch。