本篇文章是基于"ELK 部署可视化网络日志分析监控平台"进行升级, 实现网络异常日志联动ZABBIX告警,网络日志分析监控平台部署请参考前期文章。
简介
由于ELK 开源版本不提供告警模块,网络异常日志只能通过ELK 过滤查看,无法实现告警实时推送,存在告警遗漏等问题! 本着能用机器做的事情就不要人去介入的懒惰原则实现网络设备异常日志自动化监控告警。
实现思路
使用logstash-output-zabbix插件,将logstash收集到的数据过滤出异常日志输出到ZABBIX实现告警推送。
安装logstash-output-zabbix插件
代码语言:javascript复制/usr/share/logstash/bin/logstash-plugin install logstash-output-zabbix
filebeat 配置
代码语言:javascript复制# egrep -v "*#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /mnt/huawei/*
tags: ["huawei"]
include_lines: ['Failed','failed','error','ERROR','bDOWNb','bdownb','bUPb','bupb']
drop_fields:
fields: ["beat","input_type","source","offset","prospector"]
- type: log
paths:
- /mnt/h3c/*
tags: ["h3c"]
include_lines: ['Failed','failed','error','ERROR','bDOWNb','bdownb','bUPb','bupb']
drop_fields:
fields: ["beat","input_type","source","offset","prospector"]
- type: log
paths:
- /mnt/ruijie/*
tags: ["ruijie"]
include_lines: ['Failed','failed','error','ERROR','bDOWNb','bdownb','bUPb','bupb']
drop_fields:
fields: ["beat","input_type","source","offset","prospector"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
name: 192.168.99.185
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.logstash:
hosts: ["192.168.99.185:5044"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
logstash 配置
代码语言:javascript复制[root@elk-node1 conf.d]# cat networklog.conf
input {
beats {
port => 5044
}
}
filter {
if "huawei" in [tags] {
grok{
match => {"message" => "%{SYSLOGTIMESTAMP:time} %{DATA:hostname} %{GREEDYDATA:info}"}
}
}
else if "h3c" in [tags] {
grok{
match => {"message" => "%{SYSLOGTIMESTAMP:time} %{YEAR:year} %{DATA:hostname} %{GREEDYDATA:info}"}
}
}
else if "ruijie" in [tags] {
grok{
match => {"message" => "%{SYSLOGTIMESTAMP:time} %{DATA:hostname} %{GREEDYDATA:info}"}
}
}
mutate {
add_field => [ "[zabbix_key]", "networklogs" ]
add_field => [ "[zabbix_host]", "192.168.99.185" ]
add_field => [ "count","%{hostname}%{info}" ]
remove_field => ["message","time","year","offset","tags","path","host","@version","[log]","[prospector]","[beat]","[input][type]","[source]"]
}
}
output{
stdout{codec => rubydebug}
elasticsearch{
index => "networklogs-%{ YYYY.MM.dd}"
hosts => ["192.168.99.185:9200"]
sniffing => false
}
if [count] =~ /(error|ERROR|Failed|failed)/ {
zabbix {
zabbix_host => "[zabbix_host]"
zabbix_key => "[zabbix_key]"
zabbix_server_host => "192.168.99.200"
zabbix_server_port => "10051"
zabbix_value => "count"
}
}
}
logstash参数介绍
代码语言:javascript复制 add_field => [ "[zabbix_key]", "networklogs" ]
#新增的字段,字段名是zabbix_key,值为networklogs。
add_field => [ "[zabbix_host]", "192.168.99.185" ]
#新增的字段,字段名是zabbix_host,该字段必须和zabbix 中调用模板的主机的名称对应
add_field => [ "count","%{hostname}%{info}" ]
#新增的字段,字段名是count,该字段是合并网络日志切割后的主机名和告警信息,用于在zabbix上区分网络主机的日志
if [count] =~ /(error|ERROR|Failed|failed)/ {
#定义在count字段中过滤的网络异常关键字日志推送zabbix。
zabbix {
zabbix_host => "[zabbix_host]"
#zabbix_host 调用filter中定义的变量
zabbix_key => "[zabbix_key]"
#zabbix_key 调用filter中定义的变量
zabbix_server_host => "192.168.99.200"
#指定zabbix server的IP地址
zabbix_server_port => "10051"
#指定zabbix server 监听的端口
zabbix_value => "count"
#指定传送zabbix监控项item(networklogs)的值
}
}
测试字段切割合并是否正常
ZABBIX Web 配置
创建模板
创建应用集
创建监控项
创建触发器
代码语言:javascript复制{logstash-output-zabbix:networklogs.strlen()}>0 and {logstash-output-zabbix:networklogs.nodata(60)}=0
主机关联模板
zabbix-sender 安装
代码语言:javascript复制yum install zabbix-sender
#使用zabbix-sender向zabbix 发送数据测试
[root@elk-node1 conf.d]# zabbix_sender -s 192.168.99.185 -z 192.168.99.200 -k "networklogs" -o 1 -vv
zabbix_sender [2444]: DEBUG: answer [{"response":"success","info":"processed: 1; failed: 0; total: 1; seconds spent: 0.000057"}]
Response from "192.168.99.200:10051": "processed: 1; failed: 0; total: 1; seconds spent: 0.000057"
sent: 1; skipped: 0; total: 1
参数介绍:
-s:指定agent ip
-z:指定zabbix server
-k:指定键值
ZABBIX 查看最新文本数据
Kibana 查看网络日志
ZABBIX 告警