1. 首页 > Linux教程 > 正文

Linux教程FG386-日志分析平台实战

内容简介:本文风哥教程参考Linux官方文档、Red Hat Enterprise Linux官方文档、Ansible Automation Platform官方文档、Docker官方文档、Kubernetes官方文档和Podman官方文档等内容,详细介绍了相关技术的配置和使用方法。

风哥提示:

本文档介绍日志分析平台的部署和应用。

Part01-ELK高级配置

1.1 Elasticsearch集群

# 配置Elasticsearch集群
[root@es-node1 ~]# cat > /etc/elasticsearch/elasticsearch.yml << 'EOF' cluster.name: log-cluster node.name: node-1 path.data: /var/lib/elasticsearch path.logs: /var/log/elasticsearch network.host: 0.0.0.0 http.port: 9200 transport.port: 9300 discovery.seed_hosts: ["192.168.1.10", "192.168.1.11", "192.168.1.12"] cluster.initial_master_nodes: ["node-1", "node-2", "node-3"] node.roles: [master, data, ingest] xpack.security.enabled: false EOF # 配置JVM [root@es-node1 ~]# cat > /etc/elasticsearch/jvm.options.d/heap.options << 'EOF' -Xms8g -Xmx8g EOF # 启动Elasticsearch [root@es-node1 ~]# systemctl enable --now elasticsearch # 查看集群状态 [root@es-node1 ~]# curl http://localhost:9200/_cluster/health?pretty { "cluster_name" : "log-cluster", "status" : "green", "timed_out" : false, "number_of_nodes" : 3, "number_of_data_nodes" : 3, "active_primary_shards" : 10, "active_shards" : 20, "relocating_shards" : 0, "initializing_shards" : 0, "unassigned_shards" : 0, "delayed_unassigned_shards" : 0, "number_of_pending_tasks" : 0, "number_of_in_flight_fetch" : 0, "task_max_waiting_in_queue_millis" : 0, "active_shards_percent_as_number" : 100.0 } # 查看节点信息 [root@es-node1 ~]# curl http://localhost:9200/_cat/nodes?v ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name 192.168.1.10 30 95 5 0.50 0.30 0.20 cdfhilmrstw * node-1 192.168.1.11 25 90 3 0.40 0.25 0.15 cdfhilmrstw - node-2 192.168.1.12 28 92 4 0.45 0.28 0.18 cdfhilmrstw - node-3

1.2 Logstash Pipeline

# 配置Logstash Pipeline
[root@logstash ~]# cat > /etc/logstash/pipelines.yml << 'EOF' - pipeline.id: nginx path.config: "/etc/logstash/conf.d/nginx.conf" - pipeline.id: syslog path.config: "/etc/logstash/conf.d/syslog.conf" - pipeline.id: mysql path.config: "/etc/logstash/conf.d/mysql.conf" EOF # Nginx日志解析配置 [root@logstash ~]# cat > /etc/logstash/conf.d/nginx.conf << 'EOF' input { beats { port => 5044
}
}

filter {
if [fields][type] == “nginx-access” {
grok {
match => { “message” => ‘%{IPORHOST:client_ip} – %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] “%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}” %{NUMBER:status} %{NUMBER:body_bytes_sent} “%{GREEDYDATA:http_referer}” “%{GREEDYDATA:http_user_agent}”‘ }
}
date {
match => [ “timestamp”, “dd/MMM/yyyy:HH:mm:ss Z” ]
}
geoip {
source => “client_ip”
target => “geoip”
}
useragent {
source => “http_user_agent”
target => “user_agent”
}
mutate {
convert => {
“status” => “integer”
“body_bytes_sent” => “integer”
}
}
}
}

output {
if [fields][type] == “nginx-access” {
elasticsearch {
hosts => [“http://192.168.1.10:9200”, “http://192.168.1.11:9200”, “http://192.168.1.12:9200”]
index => “nginx-access-%{+YYYY.MM.dd}”
}
}
}
EOF

# MySQL慢查询日志解析
[root@logstash ~]# cat > /etc/logstash/conf.d/mysql.conf << 'EOF' input { file { path => “/var/log/mysql/slow.log”
start_position => “beginning”
sincedb_path => “/var/lib/logstash/sincedb_mysql”
codec => multiline {
pattern => “^# Time:”
negate => true
what => “previous”
}
}
}

filter {
grok {
match => { “message” => “# Time: %{TIMESTAMP_ISO8601:timestamp}\n# User@Host: %{USER:user}\[%{USER:user}\] @ %{HOSTNAME:host} \[%{IP:ip}\]\n# Query_time: %{NUMBER:query_time:float} Lock_time: %{NUMBER:lock_time:float} Rows_sent: %{NUMBER:rows_sent:integer} Rows_examined: %{NUMBER:rows_examined:integer}\n%{GREEDYDATA:query}” }
}
date {
match => [ “timestamp”, “ISO8601” ]
}
}

output {
elasticsearch {
hosts => [“http://192.168.1.10:9200”]
index => “mysql-slow-%{+YYYY.MM.dd}”
}
}
EOF

# 测试配置
[root@logstash ~]# /usr/share/logstash/bin/logstash –con更多学习教程公众号风哥教程itpux_comfig.test_and_exit -f /etc/logstash/conf.d/nginx.conf
Using bundled JDK: /usr/share/logstash/jdk
Sending Logstash logs to /var/log/logstash which is now configured via log4j2.properties
Configuration OK

# 启动Logstash
[root@logstash ~]# systemctl enable –now logstash

Part02-Kibana可视化

2.1 创建仪表盘

# 创建索引模式
[root@kibana ~]# curl -X POST “http://localhost:5601/api/saved_objects/index-pattern” \
-H “kbn-xsrf: true” \
-H “Content-Type: application/json” \
-d ‘{
“attributes”: {
“title”: “nginx-access-*”,
“timeFieldName”: “@timestamp”
}
}’

# 创建可视化(通过API)
[root@kibana ~]# cat > /tmp/create_visualization.sh << 'EOF' #!/bin/bash # HTTP状态码分布 curl -X POST "http://localhost:5601/api/saved_objects/visualization" \ -H "kbn-xsrf: true" \ -H "Content-Type: application/json" \ -d '{ "attributes": { "title": "HTTP Status Distribution", "visState": "{\"title\":\"HTTP Status Distribution\",\"type\":\"pie\",\"params\":{\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"status.keyword\",\"size\":10}}]}", "searchSourceJSON": "{\"index\":\"nginx-access-*\",\"query\":{\"match_all\":{}}}" } }' # 请求量趋势 curl -X POST "http://localhost:5601/api/saved_objects/visualization" \ -H "kbn-xsrf: true" \ -H "Content-Type: application/json" \ -d '{ "attributes": { "title": "Request Trend", "visState": "{\"title\":\"Request Trend\",\"type\":\"line\",\"params\":{\"type\":\"line\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\"}}]}", "searchSourceJSON": "{\"index\":\"nginx-access-*\",\"query\":{\"match_all\":{}}}" } }' EOF # Kibana查询示例 [root@kibana ~]# cat > /root/kibana-queries.txt << 'EOF' Kibana常用查询 ============= # 查询特定IP的访问 client_ip: "192.168.1.更多视频教程www.fgedu.net.cn100" # 查询特定状态码 status: 500 OR status: 502 OR status: 503 # 查询慢请求 query_time: > 1

# 查询特定URL
request: “/api/*”

# 组合查询
status: 200 AND client_ip: “192.168.1.*”

# 范围查询
body_bytes_sent: [1000 TO 10000]

# 排除查询
NOT status: 200
EOF

风哥针对日志分析建议:

  • 配置索引生命周期管理
  • 设置合理的分片数量
  • 使用冷热数据分层
  • 配置告警规则
  • 定期清理旧索引

本文由风哥教程整理发布,仅用于学习测试使用,转载注明出处:http://www.fgedu.net.cn/10327.html

联系我们

在线咨询:点击这里给我发消息

微信号:itpux-com

工作日:9:30-18:30,节假日休息