1. 首页 > IT综合教程 > 正文

IT教程FG417-ELK日志分析监控

内容大纲

1. ELK日志分析监控概述

ELK(Elasticsearch、Logstash、Kibana)日志分析监控是现代IT运维的核心组件,它通过实时收集、分析和可视化日志数据,帮助运维人员快速发现和解决问题。ELK监控需要覆盖Elasticsearch集群、Logstash管道、Kibana界面以及整个日志处理流程。

ELK监控的核心目标包括:

  • 监控Elasticsearch集群健康状态和性能
  • 监控Logstash数据管道的吞吐量和延迟
  • 监控Kibana的可用性和响应时间
  • 监控日志收集和处理的全流程
  • 及时发现和处理异常情况
  • 确保日志数据的完整性和可查询性

更多学习教程www.fgedu.net.cn

2. 监控架构设计

2.1 监控系统架构

# ELK监控系统架构部署
# 部署Prometheus监控
$ docker run -d –name prometheus \
–network elk-monitoring \
-p 9090:9090 \
-v /data/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml \
prom/prometheus:latest

# 部署Grafana可视化
$ docker run -d –name grafana \
–network elk-monitoring \
-p 3000:3000 \
-v /data/grafana:/var/lib/grafana \
grafana/grafana:latest

# 部署Elasticsearch Exporter
$ docker run -d –name elasticsearch-exporter \
–network elk-monitoring \
-p 9114:9114 \
-e ES_URI=http://elasticsearch:9200 \
quay.io/prometheuscommunity/elasticsearch-exporter:latest

输出结果如下:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
a1b2c3d4e5f6 prom/prometheus:latest “/bin/prometheus –c…” 5 seconds ago Up 4 seconds 0.0.0.0:9090->9090/tcp prometheus
f6e5d4c3b2a1 grafana/grafana:latest “/run.sh” 3 seconds ago Up 2 seconds 0.0.0.0:3000->3000/tcp grafana
1a2b3c4d5e6f quay.io/prometheuscommunity/elasticsearch-exporter “/bin/elasticsearch_…” 1 second ago Up 1 second 0.0.0.0:9114->9114/tcp elasticsearch-exporter

2.2 监控指标配置

# 配置Prometheus监控指标
$ cat > /data/prometheus/prometheus.yml << 'EOF' global: scrape_interval: 15s evaluation_interval: 15s alerting: alertmanagers: - static_configs: - targets: - alertmanager:9093 rule_files: - "elk_alert_rules.yml" scrape_configs: - job_name: 'prometheus' static_configs: - targets: ['fgedudb:9090'] - job_name: 'elasticsearch' static_configs: - targets: ['elasticsearch-exporter:9114'] - job_name: 'logstash' static_configs: - targets: ['logstash:9600'] - job_name: 'kibana' static_configs: - targets: ['kibana:5601'] - job_name: 'node-exporter' static_configs: - targets: ['node-exporter:9100'] EOF

3. Elasticsearch监控

3.1 集群健康监控

# Elasticsearch集群健康监控脚本
$ cat > /usr/local/bin/es_cluster_monitor.sh << 'EOF' #!/bin/bash # Elasticsearch连接信息 ES_HOST="fgedudb" ES_PORT="9200" # 获取集群健康状态 CLUSTER_HEALTH=$(curl -s http://$ES_HOST:$ES_PORT/_cluster/health) # 解析集群状态 CLUSTER_STATUS=$(echo $CLUSTER_HEALTH | jq -r '.status') NUMBER_OF_NODES=$(echo $CLUSTER_HEALTH | jq -r '.number_of_nodes') NUMBER_OF_DATA_NODES=$(echo $CLUSTER_HEALTH | jq -r '.number_of_data_nodes') ACTIVE_SHARDS=$(echo $CLUSTER_HEALTH | jq -r '.active_shards') ACTIVE_PRIMARY_SHARDS=$(echo $CLUSTER_HEALTH | jq -r '.active_primary_shards') RELOCATING_SHARDS=$(echo $CLUSTER_HEALTH | jq -r '.relocating_shards') INITIALIZING_SHARDS=$(echo $CLUSTER_HEALTH | jq -r '.initializing_shards') UNASSIGNED_SHARDS=$(echo $CLUSTER_HEALTH | jq -r '.unassigned_shards') # 转换状态为数值 case $CLUSTER_STATUS in "green") STATUS_VALUE=2 ;; "yellow") STATUS_VALUE=1 ;; "red") STATUS_VALUE=0 ;; *) STATUS_VALUE=-1 ;; esac # 输出监控指标 echo "es_cluster_status $STATUS_VALUE" echo "es_cluster_nodes_total $NUMBER_OF_NODES" echo "es_cluster_data_nodes_total $NUMBER_OF_DATA_NODES" echo "es_cluster_active_shards $ACTIVE_SHARDS" echo "es_cluster_active_primary_shards $ACTIVE_PRIMARY_SHARDS" echo "es_cluster_relocating_shards $RELOCATING_SHARDS" echo "es_cluster_initializing_shards $INITIALIZING_SHARDS" echo "es_cluster_unassigned_shards $UNASSIGNED_SHARDS" EOF $ chmod +x /usr/local/bin/es_cluster_monitor.sh
输出结果如下:
es_cluster_status 2
es_cluster_nodes_total 3
es_cluster_data_nodes_total 3
es_cluster_active_shards 30
es_cluster_active_primary_shards 15
es_cluster_relocating_shards 0
es_cluster_initializing_shards 0
es_cluster_unassigned_shards 0

3.2 节点监控

# Elasticsearch节点监控脚本
$ cat > /usr/local/bin/es_node_monitor.sh << 'EOF' #!/bin/bash # 获取节点统计信息 NODE_STATS=$(curl -s http://$ES_HOST:$ES_PORT/_nodes/stats) # 解析节点统计信息 for node in $(echo $NODE_STATS | jq -r '.nodes | keys[]'); do NODE_NAME=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].name") # CPU使用率 CPU_PERCENT=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].os.cpu.percent") # 内存使用率 MEM_USED_PERCENT=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].jvm.mem.heap_used_percent") # 磁盘使用率 DISK_AVAILABLE=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].fs.total.available_in_bytes") DISK_TOTAL=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].fs.total.total_in_bytes") DISK_USAGE_PERCENT=$((100 - (DISK_AVAILABLE * 100 / DISK_TOTAL))) # 索引统计 INDEXING_TOTAL=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].indices.indexing.index_total") SEARCH_TOTAL=$(echo $NODE_STATS | jq -r ".nodes[\"$node\"].indices.search.query_total") # 输出监控指标 echo "es_node_cpu_percent{node=\"$NODE_NAME\"} ${CPU_PERCENT:-0}" echo "es_node_heap_used_percent{node=\"$NODE_NAME\"} ${MEM_USED_PERCENT:-0}" echo "es_node_disk_usage_percent{node=\"$NODE_NAME\"} ${DISK_USAGE_PERCENT:-0}" echo "es_node_indexing_total{node=\"$NODE_NAME\"} ${INDEXING_TOTAL:-0}" echo "es_node_search_total{node=\"$NODE_NAME\"} ${SEARCH_TOTAL:-0}" done EOF $ chmod +x /usr/local/bin/es_node_monitor.sh
输出结果如下:
es_node_cpu_percent{node=”es-node-1″} 25.5
es_node_heap_used_percent{node=”es-node-1″} 65.2
es_node_disk_usage_percent{node=”es-node-1″} 45
es_node_indexing_total{node=”es-node-1″} 1234567
es_node_search_total{node=”es-node-1″} 987654
es_node_cpu_percent{node=”es-node-2″} 28.3
es_node_heap_used_percent{node=”es-node-2″} 62.8
es_node_disk_usage_percent{node=”es-node-2″} 42
es_node_indexing_total{node=”es-node-2″} 1234567
es_node_search_total{node=”es-node-2″} 987654
es_node_cpu_percent{node=”es-node-3″} 22.1
es_node_heap_used_percent{node=”es-node-3″} 68.5
es_node_disk_usage_percent{node=”es-node-3″} 48
es_node_indexing_total{node=”es-node-3″} 1234567
es_node_search_total{node=”es-node-3″} 987654

学习交流加群风哥微信: itpux-com

4. Logstash监控

4.1 管道监控

# Logstash管道监控脚本
$ cat > /usr/local/bin/logstash_pipeline_monitor.sh << 'EOF' #!/bin/bash # Logstash连接信息 LOGSTASH_HOST="fgedudb" LOGSTASH_PORT="9600" # 获取管道统计信息 PIPELINE_STATS=$(curl -s http://$LOGSTASH_HOST:$LOGSTASH_PORT/_node/stats/pipelines) # 解析管道统计信息 for pipeline in $(echo $PIPELINE_STATS | jq -r '.pipelines | keys[]'); do # 事件统计 EVENTS_IN=$(echo $PIPELINE_STATS | jq -r ".pipelines[\"$pipeline\"].events.in") EVENTS_OUT=$(echo $PIPELINE_STATS | jq -r ".pipelines[\"$pipeline\"].events.out") EVENTS_FILTERED=$(echo $PIPELINE_STATS | jq -r ".pipelines[\"$pipeline\"].events.filtered") # 处理时间 DURATION=$(echo $PIPELINE_STATS | jq -r ".pipelines[\"$pipeline\"].events.duration_in_millis") # 队列统计 QUEUE_SIZE=$(echo $PIPELINE_STATS | jq -r ".pipelines[\"$pipeline\"].queue.size") # 输出监控指标 echo "logstash_pipeline_events_in{pipeline=\"$pipeline\"} ${EVENTS_IN:-0}" echo "logstash_pipeline_events_out{pipeline=\"$pipeline\"} ${EVENTS_OUT:-0}" echo "logstash_pipeline_events_filtered{pipeline=\"$pipeline\"} ${EVENTS_FILTERED:-0}" echo "logstash_pipeline_duration_ms{pipeline=\"$pipeline\"} ${DURATION:-0}" echo "logstash_pipeline_queue_size{pipeline=\"$pipeline\"} ${QUEUE_SIZE:-0}" done EOF $ chmod +x /usr/local/bin/logstash_pipeline_monitor.sh
输出结果如下:
logstash_pipeline_events_in{pipeline=”main”} 1234567
logstash_pipeline_events_out{pipeline=”main”} 1234567
logstash_pipeline_events_filtered{pipeline=”main”} 1234567
logstash_pipeline_duration_ms{pipeline=”main”} 12345678
logstash_pipeline_queue_size{pipeline=”main”} 0

4.2 JVM监控

# Logstash JVM监控脚本
$ cat > /usr/local/bin/logstash_jvm_monitor.sh << 'EOF' #!/bin/bash # 获取JVM统计信息 JVM_STATS=$(curl -s http://$LOGSTASH_HOST:$LOGSTASH_PORT/_node/stats/jvm) # 解析JVM统计信息 HEAP_USED=$(echo $JVM_STATS | jq -r '.mem.heap_used_in_bytes') HEAP_MAX=$(echo $JVM_STATS | jq -r '.mem.heap_max_in_bytes') HEAP_PERCENT=$((HEAP_USED * 100 / HEAP_MAX)) # 线程统计 THREAD_COUNT=$(echo $JVM_STATS | jq -r '.threads.count') PEAK_THREAD_COUNT=$(echo $JVM_STATS | jq -r '.threads.peak_count') # GC统计 OLD_GC_COUNT=$(echo $JVM_STATS | jq -r '.gc.collectors.old.collection_count') OLD_GC_TIME=$(echo $JVM_STATS | jq -r '.gc.collectors.old.collection_time_in_millis') YOUNG_GC_COUNT=$(echo $JVM_STATS | jq -r '.gc.collectors.young.collection_count') YOUNG_GC_TIME=$(echo $JVM_STATS | jq -r '.gc.collectors.young.collection_time_in_millis') # 输出监控指标 echo "logstash_jvm_heap_used_bytes $HEAP_USED" echo "logstash_jvm_heap_max_bytes $HEAP_MAX" echo "logstash_jvm_heap_used_percent $HEAP_PERCENT" echo "logstash_jvm_threads_count $THREAD_COUNT" echo "logstash_jvm_threads_peak_count $PEAK_THREAD_COUNT" echo "logstash_jvm_old_gc_count $OLD_GC_COUNT" echo "logstash_jvm_old_gc_time_ms $OLD_GC_TIME" echo "logstash_jvm_young_gc_count $YOUNG_GC_COUNT" echo "logstash_jvm_young_gc_time_ms $YOUNG_GC_TIME" EOF $ chmod +x /usr/local/bin/logstash_jvm_monitor.sh
输出结果如下:
logstash_jvm_heap_used_bytes 2147483648
logstash_jvm_heap_max_bytes 4294967296
logstash_jvm_heap_used_percent 50
logstash_jvm_threads_count 45
logstash_jvm_threads_peak_count 52
logstash_jvm_old_gc_count 12
logstash_jvm_old_gc_time_ms 12345
logstash_jvm_young_gc_count 1234
logstash_jvm_young_gc_time_ms 5678

5. Kibana监控

5.1 服务状态监控

# Kibana服务状态监控脚本
$ cat > /usr/local/bin/kibana_status_monitor.sh << 'EOF' #!/bin/bash # Kibana连接信息 KIBANA_HOST="fgedudb" KIBANA_PORT="5601" # 获取Kibana状态 KIBANA_STATUS=$(curl -s http://$KIBANA_HOST:$KIBANA_PORT/api/status) # 解析状态信息 OVERALL_STATUS=$(echo $KIBANA_STATUS | jq -r '.status.overall.level') # 转换状态为数值 case $OVERALL_STATUS in "available") STATUS_VALUE=2 ;; "degraded") STATUS_VALUE=1 ;; "unavailable") STATUS_VALUE=0 ;; *) STATUS_VALUE=-1 ;; esac # 获取响应时间 RESPONSE_TIME=$(curl -o /dev/null -s -w '%{time_total}\n' http://$KIBANA_HOST:$KIBANA_PORT/api/status) # 获取Elasticsearch连接状态 ES_STATUS=$(echo $KIBANA_STATUS | jq -r '.status.core.elasticsearch.level') # 输出监控指标 echo "kibana_status $STATUS_VALUE" echo "kibana_response_time_seconds $RESPONSE_TIME" # 检查各个插件状态 for plugin in $(echo $KIBANA_STATUS | jq -r '.status.plugins | keys[]'); do PLUGIN_STATUS=$(echo $KIBANA_STATUS | jq -r ".status.plugins[\"$plugin\"].level") case $PLUGIN_STATUS in "available") PLUGIN_VALUE=2 ;; "degraded") PLUGIN_VALUE=1 ;; "unavailable") PLUGIN_VALUE=0 ;; *) PLUGIN_VALUE=-1 ;; esac echo "kibana_plugin_status{plugin=\"$plugin\"} $PLUGIN_VALUE" done EOF $ chmod +x /usr/local/bin/kibana_status_monitor.sh
输出结果如下:
kibana_status 2
kibana_response_time_seconds 0.125
kibana_plugin_status{plugin=”spaces”} 2
kibana_plugin_status{plugin=”security”} 2
kibana_plugin_status{plugin=”monitoring”} 2
kibana_plugin_status{plugin=”reporting”} 2

5.2 性能监控

# Kibana性能监控脚本
$ cat > /usr/local/bin/kibana_performance_monitor.sh << 'EOF' #!/bin/bash # 获取Kibana性能指标 KIBANA_METRICS=$(curl -s http://$KIBANA_HOST:$KIBANA_PORT/api/metrics) # 解析性能指标 CONCURRENT_CONNECTIONS=$(echo $KIBANA_METRICS | jq -r '.concurrent_connections') REQUESTS_TOTAL=$(echo $KIBANA_METRICS | jq -r '.requests.total') REQUESTS_DISCONNECTS=$(echo $KIBANA_METRICS | jq -r '.requests.disconnects') # 进程内存使用 PROCESS_MEMORY=$(ps aux | grep "kibana" | grep -v grep | awk '{print $6}') # CPU使用率 PROCESS_CPU=$(ps aux | grep "kibana" | grep -v grep | awk '{print $3}') # 输出监控指标 echo "kibana_concurrent_connections ${CONCURRENT_CONNECTIONS:-0}" echo "kibana_requests_total ${REQUESTS_TOTAL:-0}" echo "kibana_requests_disconnects ${REQUESTS_DISCONNECTS:-0}" echo "kibana_process_memory_bytes ${PROCESS_MEMORY:-0}" echo "kibana_process_cpu_percent ${PROCESS_CPU:-0}" EOF $ chmod +x /usr/local/bin/kibana_performance_monitor.sh
输出结果如下:
kibana_concurrent_connections 25
kibana_requests_total 12345
kibana_requests_disconnects 5
kibana_process_memory_bytes 524288
kibana_process_cpu_percent 15.5

学习交流加群风哥QQ113257174

6. 集群监控

6.1 索引监控

# Elasticsearch索引监控脚本
$ cat > /usr/local/bin/es_index_monitor.sh << 'EOF' #!/bin/bash # 获取索引统计信息 INDEX_STATS=$(curl -s http://$ES_HOST:$ES_PORT/_stats) # 解析索引统计信息 for index in $(echo $INDEX_STATS | jq -r '.indices | keys[]'); do # 文档数量 DOC_COUNT=$(echo $INDEX_STATS | jq -r ".indices[\"$index\"].primaries.docs.count") # 索引大小 STORE_SIZE=$(echo $INDEX_STATS | jq -r ".indices[\"$index\"].primaries.store.size_in_bytes") # 查询统计 SEARCH_TOTAL=$(echo $INDEX_STATS | jq -r ".indices[\"$index\"].primaries.search.query_total") SEARCH_TIME=$(echo $INDEX_STATS | jq -r ".indices[\"$index\"].primaries.search.query_time_in_millis") # 索引统计 INDEXING_TOTAL=$(echo $INDEX_STATS | jq -r ".indices[\"$index\"].primaries.indexing.index_total") INDEXING_TIME=$(echo $INDEX_STATS | jq -r ".indices[\"$index\"].primaries.indexing.index_time_in_millis") # 输出监控指标 echo "es_index_doc_count{index=\"$index\"} ${DOC_COUNT:-0}" echo "es_index_store_size_bytes{index=\"$index\"} ${STORE_SIZE:-0}" echo "es_index_search_total{index=\"$index\"} ${SEARCH_TOTAL:-0}" echo "es_index_search_time_ms{index=\"$index\"} ${SEARCH_TIME:-0}" echo "es_index_indexing_total{index=\"$index\"} ${INDEXING_TOTAL:-0}" echo "es_index_indexing_time_ms{index=\"$index\"} ${INDEXING_TIME:-0}" done EOF $ chmod +x /usr/local/bin/es_index_monitor.sh
输出结果如下:
es_index_doc_count{index=”logstash-2026.04.03″} 1234567
es_index_store_size_bytes{index=”logstash-2026.04.03″} 12345678901
es_index_search_total{index=”logstash-2026.04.03″} 98765
es_index_search_time_ms{index=”logstash-2026.04.03″} 1234567
es_index_indexing_total{index=”logstash-2026.04.03″} 1234567
es_index_indexing_time_ms{index=”logstash-2026.04.03″} 2345678
es_index_doc_count{index=”logstash-2026.04.02″} 2345678
es_index_store_size_bytes{index=”logstash-2026.04.02″} 23456789012
es_index_search_total{index=”logstash-2026.04.02″} 187654
es_index_search_time_ms{index=”logstash-2026.04.02″} 2345678
es_index_indexing_total{index=”logstash-2026.04.02″} 2345678
es_index_indexing_time_ms{index=”logstash-2026.04.02″} 3456789

6.2 分片监控

# Elasticsearch分片监控脚本
$ cat > /usr/local/bin/es_shard_monitor.sh << 'EOF' #!/bin/bash # 获取分片信息 SHARD_INFO=$(curl -s http://$ES_HOST:$ES_PORT/_cat/shards?v&h=index,shard,prirep,state,docs,store,node) # 解析分片信息 PRIMARY_COUNT=0 REPLICA_COUNT=0 UNASSIGNED_COUNT=0 while IFS= read -r line; do STATE=$(echo $line | awk '{print $4}') PRIREP=$(echo $line | awk '{print $3}') if [ "$PRIREP" = "p" ]; then PRIMARY_COUNT=$((PRIMARY_COUNT + 1)) else REPLICA_COUNT=$((REPLICA_COUNT + 1)) fi if [ "$STATE" = "UNASSIGNED" ]; then UNASSIGNED_COUNT=$((UNASSIGNED_COUNT + 1)) fi done <<< "$(echo "$SHARD_INFO" | tail -n +2)" # 输出监控指标 echo "es_shards_primary_total $PRIMARY_COUNT" echo "es_shards_replica_total $REPLICA_COUNT" echo "es_shards_unassigned_total $UNASSIGNED_COUNT" # 获取分片分布 for node in $(curl -s http://$ES_HOST:$ES_PORT/_cat/nodes?h=name); do NODE_SHARDS=$(curl -s http://$ES_HOST:$ES_PORT/_cat/shards | grep $node | wc -l) echo "es_shards_on_node{node=\"$node\"} $NODE_SHARDS" done EOF $ chmod +x /usr/local/bin/es_shard_monitor.sh
输出结果如下:
es_shards_primary_total 15
es_shards_replica_total 15
es_shards_unassigned_total 0
es_shards_on_node{node=”es-node-1″} 10
es_shards_on_node{node=”es-node-2″} 10
es_shards_on_node{node=”es-node-3″} 10

7. 性能监控

7.1 查询性能监控

# 查询性能监控脚本
$ cat > /usr/local/bin/es_query_performance_monitor.sh << 'EOF' #!/bin/bash # 获取慢查询日志 SLOW_LOG="/var/log/elasticsearch/elasticsearch_slow_queries.log" if [ -f "$SLOW_LOG" ]; then # 统计慢查询数量 TODAY=$(date +%Y-%m-%d) SLOW_QUERY_COUNT=$(grep "$TODAY" "$SLOW_LOG" | wc -l) # 统计不同类型的慢查询 SEARCH_SLOW=$(grep "$TODAY" "$SLOW_LOG" | grep "search" | wc -l) INDEX_SLOW=$(grep "$TODAY" "$SLOW_LOG" | grep "index" | wc -l) # 输出监控指标 echo "es_slow_queries_total $SLOW_QUERY_COUNT" echo "es_slow_search_queries $SEARCH_SLOW" echo "es_slow_index_queries $INDEX_SLOW" else echo "es_slow_queries_total 0" echo "es_slow_search_queries 0" echo "es_slow_index_queries 0" fi # 获取查询缓存统计 CACHE_STATS=$(curl -s http://$ES_HOST:$ES_PORT/_stats/query_cache) CACHE_HIT_COUNT=$(echo $CACHE_STATS | jq -r '._all.primaries.query_cache.hit_count') CACHE_MISS_COUNT=$(echo $CACHE_STATS | jq -r '._all.primaries.query_cache.miss_count') CACHE_SIZE=$(echo $CACHE_STATS | jq -r '._all.primaries.query_cache.memory_size_in_bytes') # 输出缓存指标 echo "es_query_cache_hit_count ${CACHE_HIT_COUNT:-0}" echo "es_query_cache_miss_count ${CACHE_MISS_COUNT:-0}" echo "es_query_cache_size_bytes ${CACHE_SIZE:-0}" EOF $ chmod +x /usr/local/bin/es_query_performance_monitor.sh
输出结果如下:
es_slow_queries_total 5
es_slow_search_queries 3
es_slow_index_queries 2
es_query_cache_hit_count 12345
es_query_cache_miss_count 5678
es_query_cache_size_bytes 1048576

7.2 索引性能监控

# 索引性能监控脚本
$ cat > /usr/local/bin/es_index_performance_monitor.sh << 'EOF' #!/bin/bash # 获取索引统计 INDEX_STATS=$(curl -s http://$ES_HOST:$ES_PORT/_stats/indexing) # 解析索引统计 INDEX_TOTAL=$(echo $INDEX_STATS | jq -r '._all.primaries.indexing.index_total') INDEX_TIME=$(echo $INDEX_STATS | jq -r '._all.primaries.indexing.index_time_in_millis') INDEX_FAILED=$(echo $INDEX_STATS | jq -r '._all.primaries.indexing.index_failed') # 计算平均索引时间 if [ "$INDEX_TOTAL" -gt 0 ]; then AVG_INDEX_TIME=$((INDEX_TIME / INDEX_TOTAL)) else AVG_INDEX_TIME=0 fi # 获取刷新统计 REFRESH_TOTAL=$(echo $INDEX_STATS | jq -r '._all.primaries.refresh.total') REFRESH_TIME=$(echo $INDEX_STATS | jq -r '._all.primaries.refresh.total_time_in_millis') # 获取刷新统计 FLUSH_TOTAL=$(echo $INDEX_STATS | jq -r '._all.primaries.flush.total') FLUSH_TIME=$(echo $INDEX_STATS | jq -r '._all.primaries.flush.total_time_in_millis') # 输出监控指标 echo "es_indexing_total $INDEX_TOTAL" echo "es_indexing_time_ms $INDEX_TIME" echo "es_indexing_failed_total $INDEX_FAILED" echo "es_indexing_avg_time_ms $AVG_INDEX_TIME" echo "es_refresh_total $REFRESH_TOTAL" echo "es_refresh_time_ms $REFRESH_TIME" echo "es_flush_total $FLUSH_TOTAL" echo "es_flush_time_ms $FLUSH_TIME" EOF $ chmod +x /usr/local/bin/es_index_performance_monitor.sh
输出结果如下:
es_indexing_total 1234567
es_indexing_time_ms 12345678
es_indexing_failed_total 0
es_indexing_avg_time_ms 10
es_refresh_total 12345
es_refresh_time_ms 123456
es_flush_total 1234
es_flush_time_ms 123456

风哥风哥提示:性能监控是ELK监控的重要组成部分,需要实时监控查询和索引的性能指标,及时发现性能瓶颈并进行优化。

8. 告警管理

8.1 告警规则配置

# 配置ELK告警规则
$ cat > /data/prometheus/elk_alert_rules.yml << 'EOF' groups: - name: elk_alerts rules: # 集群状态告警 - alert: ElasticsearchClusterRed expr: es_cluster_status == 0 for: 1m labels: severity: critical annotations: summary: "Elasticsearch集群状态为红色" description: "Elasticsearch集群状态为红色,部分主分片不可用" - alert: ElasticsearchClusterYellow expr: es_cluster_status == 1 for: 5m labels: severity: warning annotations: summary: "Elasticsearch集群状态为黄色" description: "Elasticsearch集群状态为黄色,部分副本分片不可用" # 节点资源告警 - alert: ElasticsearchHighHeapUsage expr: es_node_heap_used_percent > 85
for: 5m
labels:
severity: warning
annotations:
summary: “Elasticsearch节点堆内存使用率过高”
description: “节点 {{ $labels.node }} 堆内存使用率 {{ $value }}%”

– alert: ElasticsearchHighDiskUsage
expr: es_node_disk_usage_percent > 85
for: 10m
labels:
severity: critical
annotations:
summary: “Elasticsearch节点磁盘使用率过高”
description: “节点 {{ $labels.node }} 磁盘使用率 {{ $value }}%”

# 分片告警
– alert: ElasticsearchUnassignedShards
expr: es_cluster_unassigned_shards > 0
for: 5m
labels:
severity: warning
annotations:
summary: “存在未分配的分片”
description: “存在 {{ $value }} 个未分配的分片”

# Logstash告警
– alert: LogstashHighHeapUsage
expr: logstash_jvm_heap_used_percent > 85
for: 5m
labels:
severity: warning
annotations:
summary: “Logstash堆内存使用率过高”
description: “Logstash堆内存使用率 {{ $value }}%”

– alert: LogstashPipelineBlocked
expr: logstash_pipeline_queue_size > 1000
for: 5m
labels:
severity: warning
annotations:
summary: “Logstash管道队列积压”
description: “管道 {{ $labels.pipeline }} 队列积压 {{ $value }} 条消息”

# Kibana告警
– alert: KibanaDown
expr: kibana_status == 0
for: 2m
labels:
severity: critical
annotations:
summary: “Kibana服务不可用”
description: “Kibana服务不可用,请立即检查”

– alert: KibanaSlowResponse
expr: kibana_response_time_seconds > 5
for: 5m
labels:
severity: warning
annotations:
summary: “Kibana响应时间过长”
description: “Kibana响应时间 {{ $value }}秒”
EOF

8.2 告警通知配置

# 配置Alertmanager告警通知
$ cat > /data/alertmanager/alertmanager.yml << 'EOF' global: resolve_timeout: 5m smtp_smarthost: 'smtp.fgedu.net.cn:587' smtp_from: 'elk-alert@fgedu.net.cn' smtp_auth_username: 'elk-alert@fgedu.net.cn' smtp_auth_password: 'password' route: group_by: ['alertname', 'severity'] group_wait: 30s group_interval: 5m repeat_interval: 4h receiver: 'elk-team' routes: - match: severity: critical receiver: 'elk-critical' - match: severity: warning receiver: 'elk-warning' receivers: - name: 'elk-team' email_configs: - to: 'elk-team@fgedu.net.cn' send_resolved: true - name: 'elk-critical' email_configs: - to: 'elk-team@fgedu.net.cn,manager@fgedu.net.cn' send_resolved: true webhook_configs: - url: 'http://webhook-server:5000/alert' send_resolved: true - name: 'elk-warning' email_configs: - to: 'elk-team@fgedu.net.cn' send_resolved: true inhibit_rules: - source_match: severity: 'critical' target_match: severity: 'warning' equal: ['alertname'] EOF

9. 日志分析

9.1 日志收集监控

# 日志收集监控脚本
$ cat > /usr/local/bin/log_collection_monitor.sh << 'EOF' #!/bin/bash # 监控日志文件大小 monitor_log_size() { local log_dir=$1 local log_type=$2 if [ -d "$log_dir" ]; then TOTAL_SIZE=$(du -sh "$log_dir" | awk '{print $1}') FILE_COUNT=$(find "$log_dir" -type f | wc -l) echo "elk_log_size_bytes{type=\"$log_type\"} $(du -s "$log_dir" | awk '{print $1}')" echo "elk_log_file_count{type=\"$log_type\"} $FILE_COUNT" fi } # 监控各类日志 monitor_log_size "/var/log/elasticsearch" "elasticsearch" monitor_log_size "/var/log/logstash" "logstash" monitor_log_size "/var/log/kibana" "kibana" monitor_log_size "/var/log/nginx" "nginx" # 监控日志增长率 TODAY=$(date +%Y-%m-%d) for log_type in elasticsearch logstash kibana; do LOG_DIR="/var/log/$log_type" if [ -d "$LOG_DIR" ]; then TODAY_SIZE=$(find "$LOG_DIR" -type f -newermt "$TODAY" -exec du -s {} + | awk '{sum+=$1} END {print sum}') echo "elk_log_growth_kb{type=\"$log_type\"} ${TODAY_SIZE:-0}" fi done EOF $ chmod +x /usr/local/bin/log_collection_monitor.sh
输出结果如下:
elk_log_size_bytes{type=”elasticsearch”} 12345678
elk_log_file_count{type=”elasticsearch”} 15
elk_log_size_bytes{type=”logstash”} 5678901
elk_log_file_count{type=”logstash”} 10
elk_log_size_bytes{type=”kibana”} 1234567
elk_log_file_count{type=”kibana”} 5
elk_log_size_bytes{type=”nginx”} 2345678
elk_log_file_count{type=”nginx”} 8
elk_log_growth_kb{type=”elasticsearch”} 123456
elk_log_growth_kb{type=”logstash”} 56789
elk_log_growth_kb{type=”kibana”} 12345

9.2 日志分析统计

# 日志分析统计脚本
$ cat > /usr/local/bin/log_analysis_stats.sh << 'EOF' #!/bin/bash # 统计Elasticsearch日志中的错误 ES_ERROR_COUNT=$(grep -i "error" /var/log/elasticsearch/elasticsearch.log | wc -l) ES_WARN_COUNT=$(grep -i "warn" /var/log/elasticsearch/elasticsearch.log | wc -l) # 统计Logstash日志中的错误 LS_ERROR_COUNT=$(grep -i "error" /var/log/logstash/logstash-plain.log | wc -l) LS_WARN_COUNT=$(grep -i "warn" /var/log/logstash/logstash-plain.log | wc -l) # 统计Kibana日志中的错误 KB_ERROR_COUNT=$(grep -i "error" /var/log/kibana/kibana.log | wc -l) KB_WARN_COUNT=$(grep -i "warn" /var/log/kibana/kibana.log | wc -l) # 输出监控指标 echo "elk_es_log_errors_total $ES_ERROR_COUNT" echo "elk_es_log_warnings_total $ES_WARN_COUNT" echo "elk_logstash_log_errors_total $LS_ERROR_COUNT" echo "elk_logstash_log_warnings_total $LS_WARN_COUNT" echo "elk_kibana_log_errors_total $KB_ERROR_COUNT" echo "elk_kibana_log_warnings_total $KB_WARN_COUNT" # 统计今日处理的日志条数 TODAY=$(date +%Y-%m-%d) TODAY_DOCS=$(curl -s "http://fgedudb:9200/logstash-*/_count?q=@timestamp:$TODAY" | jq -r '.count') echo "elk_docs_processed_today $TODAY_DOCS" EOF $ chmod +x /usr/local/bin/log_analysis_stats.sh
输出结果如下:
elk_es_log_errors_total 12
elk_es_log_warnings_total 25
elk_logstash_log_errors_total 5
elk_logstash_log_warnings_total 15
elk_kibana_log_errors_total 3
elk_kibana_log_warnings_total 8
elk_docs_processed_today 1234567

更多学习教程公众号风哥教程itpux_com

10. 最佳实践

10.1 监控系统设计原则

生产环境风哥建议:
– 监控系统应独立于ELK系统部署,避免单点故障
– 采用多层次监控架构,覆盖集群、节点、索引和查询
– 监控数据应实时采集和存储,支持历史数据分析
– 告警规则应根据业务重要性分级设置
– 定期验证监控系统的有效性

10.2 监控指标选择

# 关键监控指标清单
# 1. 集群监控指标
– 集群状态(Cluster Status)
– 节点数量(Number of Nodes)
– 分片状态(Shard Status)
– 索引数量(Number of Indices)

# 2. 节点监控指标
– CPU使用率(CPU Usage)
– 内存使用率(Memory Usage)
– 磁盘使用率(Disk Usage)
– 网络I/O(Network I/O)

# 3. 性能监控指标
– 查询延迟(Query Latency)
– 索引延迟(Index Latency)
– 查询吞吐量(Query Throughput)
– 索引吞吐量(Index Throughput)

# 4. 资源监控指标
– JVM堆内存(JVM Heap)
– 线程池状态(Thread Pool Status)
– 缓存命中率(Cache Hit Rate)
– GC统计(GC Statistics)

10.3 告警管理最佳实践

生产环境风哥建议:
– 告警应分级管理,区分严重程度和响应时间
– 避免告警风暴,合理设置告警阈值和持续时间
– 告警通知应多渠道发送,包括邮件、短信、电话等
– 告警应包含详细的上下文信息,便于快速定位问题
– 定期审查和优化告警规则,减少误报和漏报

10.4 监控数据管理

# 监控数据管理脚本
$ cat > /usr/local/bin/monitor_data_management.sh << 'EOF' #!/bin/bash # 监控数据保留策略 RETENTION_DAYS=90 # 清理过期的监控数据 clean_old_data() { echo "开始清理 $RETENTION_DAYS 天前的监控数据..." # 清理Prometheus数据 find /data/prometheus/data -type f -mtime +$RETENTION_DAYS -delete # 清理Grafana数据 find /data/grafana/data -type f -mtime +$RETENTION_DAYS -delete # 清理日志文件 find /var/log/elasticsearch -type f -name "*.log" -mtime +$RETENTION_DAYS -delete find /var/log/logstash -type f -name "*.log" -mtime +$RETENTION_DAYS -delete find /var/log/kibana -type f -name "*.log" -mtime +$RETENTION_DAYS -delete echo "监控数据清理完成" } # 备份监控数据 backup_monitor_data() { BACKUP_DIR="/backup/elk-monitor-$(date +%Y%m%d)" mkdir -p $BACKUP_DIR echo "开始备份监控数据到 $BACKUP_DIR..." # 备份Prometheus配置 cp -r /data/prometheus/*.yml $BACKUP_DIR/ # 备份Grafana仪表板 cp -r /data/grafana/dashboards $BACKUP_DIR/ # 备份告警规则 cp -r /data/prometheus/elk_alert_rules.yml $BACKUP_DIR/ # 备份ELK配置 cp -r /etc/elasticsearch $BACKUP_DIR/ cp -r /etc/logstash $BACKUP_DIR/ cp -r /etc/kibana $BACKUP_DIR/ echo "监控数据备份完成" } # 执行管理任务 clean_old_data backup_monitor_data EOF $ chmod +x /usr/local/bin/monitor_data_management.sh
输出结果如下:
开始清理 90 天前的监控数据…
监控数据清理完成
开始备份监控数据到 /backup/elk-monitor-20260403…
监控数据备份完成

10.5 监控系统维护

# 监控系统健康检查
$ cat > /usr/local/bin/monitor_health_check.sh << 'EOF' #!/bin/bash # 检查Prometheus状态 check_prometheus() { if curl -f -s http://fgedudb:9090/-/healthy > /dev/null; then
echo “prometheus_status 1”
else
echo “prometheus_status 0”
fi
}

# 检查Grafana状态
check_grafana() {
if curl -f -s http://fgedudb:3000/api/health > /dev/null; then
echo “grafana_status 1”
else
echo “grafana_status 0”
fi
}

# 检查Alertmanager状态
check_alertmanager() {
if curl -f -s http://fgedudb:9093/-/healthy > /dev/null; then
echo “alertmanager_status 1”
else
echo “alertmanager_status 0”
fi
}

# 检查Elasticsearch Exporter状态
check_es_exporter() {
if curl -f -s http://fgedudb:9114/health > /dev/null; then
echo “elasticsearch_exporter_status 1”
else
echo “elasticsearch_exporter_status 0”
fi
}

# 执行健康检查
check_prometheus
check_grafana
check_alertmanager
check_es_exporter
EOF

$ chmod +x /usr/local/bin/monitor_health_check.sh

输出结果如下:
prometheus_status 1
grafana_status 1
alertmanager_status 1
elasticsearch_exporter_status 1

10.6 ELK监控仪表板

# 创建Grafana仪表板配置
$ cat > /data/grafana/dashboards/elk-monitoring.json << 'EOF' { "dashboard": { "title": "ELK监控仪表板", "panels": [ { "title": "集群状态", "type": "stat", "targets": [ { "expr": "es_cluster_status", "legendFormat": "状态" } ] }, { "title": "节点资源使用", "type": "graph", "targets": [ { "expr": "es_node_heap_used_percent", "legendFormat": "{{node}} 堆内存" }, { "expr": "es_node_disk_usage_percent", "legendFormat": "{{node}} 磁盘" } ] }, { "title": "索引性能", "type": "graph", "targets": [ { "expr": "es_indexing_total", "legendFormat": "索引总数" }, { "expr": "es_search_total", "legendFormat": "查询总数" } ] }, { "title": "Logstash管道", "type": "graph", "targets": [ { "expr": "logstash_pipeline_events_in", "legendFormat": "输入事件" }, { "expr": "logstash_pipeline_events_out", "legendFormat": "输出事件" } ] }, { "title": "Kibana状态", "type": "stat", "targets": [ { "expr": "kibana_status", "legendFormat": "状态" }, { "expr": "kibana_response_time_seconds", "legendFormat": "响应时间" } ] } ] } } EOF
生产环境风哥建议:
– 定期进行ELK演练,验证监控系统的有效性
– 监控系统本身应具备高可用性,避免单点故障
– 监控数据应定期备份,确保数据安全
– 监控指标应根据业务需求持续优化和调整
– 告警规则应定期审查,确保告警的准确性和有效性

author:www.itpux.com

本文由风哥教程整理发布,仅用于学习测试使用,转载注明出处:http://www.fgedu.net.cn/10327.html

联系我们

在线咨询:点击这里给我发消息

微信号:itpux-com

工作日:9:30-18:30,节假日休息