1. 首页 > IT综合教程 > 正文

IT教程FG341-Linux系统日志管理

一、Linux日志系统概述

Linux系统日志是运维管理的重要基础,通过日志可以追踪系统事件、诊断故障、审计安全事件。Linux系统主要使用rsyslog和journald两种日志系统,它们各有特点,可以配合使用。

在FGedu企业的Linux服务器管理中,我们建立了完善的日志管理体系,包括日志收集、存储、分析和告警等环节。author:www.itpux.com,有效的日志管理能够帮助运维团队快速定位问题、满足合规要求、优化系统性能。

1.1 日志文件位置

Linux系统日志主要存储在/var/log目录下,不同的日志文件记录不同类型的信息。

# 常见日志文件位置
/var/log/messages # 系统主日志文件(RHEL/CentOS)
/var/log/syslog # 系统主日志文件(Debian/Ubuntu)
/var/log/secure # 安全相关日志(登录、认证)
/var/log/auth.log # 认证日志(Debian/Ubuntu)
/var/log/cron # 定时任务日志
/var/log/maillog # 邮件服务日志
/var/log/boot.log # 系统启动日志
/var/log/dmesg # 内核环形缓冲区日志
/var/log/yum.log # YUM包管理日志
/var/log/apt/ # APT包管理日志目录
/var/log/audit/ # 审计日志目录

# 查看日志文件
$ ls -la /var/log/
total 123456
drwxr-xr-x. 8 root root 4096 Apr 3 10:00 .
drwxr-xr-x. 18 root root 4096 Mar 1 08:00 ..
-rw——-. 1 root root 123456 Apr 3 10:00 audit
-rw-r–r–. 1 root root 12345 Apr 3 09:55 boot.log
-rw——-. 1 root root 12345 Apr 3 10:00 cron
-rw-r—–. 1 root root 12345 Apr 3 10:00 maillog
-rw——-. 1 root root 123456 Apr 3 10:00 messages
-rw——-. 1 root root 12345 Apr 3 10:00 secure

# 查看日志文件大小
$ du -sh /var/log/*
125M /var/log/audit
12M /var/log/boot.log
8.5M /var/log/cron
15M /var/log/maillog
120M /var/log/messages
10M /var/log/secure

二、rsyslog配置管理

2.1 rsyslog基础配置

rsyslog是Linux系统默认的日志服务,支持日志的接收、处理和转发。

# rsyslog主配置文件
# /etc/rsyslog.conf

# 加载模块
module(load=”imuxsock”) # 本地系统日志
module(load=”imjournal”) # 从journald读取日志
module(load=”imklog”) # 内核日志
module(load=”imudp”) # UDP输入模块
module(load=”imtcp”) # TCP输入模块

# 全局指令
global(
workDirectory=”/var/lib/rsyslog”
maxMessageSize=”64k”
)

# 模板定义
template(name=”TraditionalFormat” type=”string”
string=”%timegenerated% %HOSTNAME% %syslogtag%%msg:::drop-last-lf%\n”
)

template(name=”DynamicFile” type=”string”
string=”/var/log/remote/%fromhost-ip%/%$YEAR%-%$MONTH%-%$DAY%.log”
)

# 规则定义(设施.优先级 动作)
# 格式:facility.priority action

# 所有消息
*.info;mail.none;authpriv.none;cron.none /var/log/messages

# 认证相关
authpriv.* /var/log/secure

# 邮件服务
mail.* -/var/log/maillog

# 定时任务
cron.* /var/log/cron

# 紧急消息
*.emerg :omusrmsg:*

# 检查rsyslog配置
$ rsyslogd -N1
rsyslogd: version 8.2102.0-106.el8, config validation run (level 1), master config /etc/rsyslog.conf
rsyslogd: End of config validation run. Bye.

# 重启rsyslog服务
$ systemctl restart rsyslog
$ systemctl status rsyslog
● rsyslog.service – System Logging Service
Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled)
Active: active (running) since Fri 2026-04-03 10:00:00 CST; 5s ago
Main PID: 12345 (rsyslogd)
Tasks: 3 (limit: 12345)
Memory: 5.5M
CGroup: /system.slice/rsyslog.service
└─12345 /usr/sbin/rsyslogd -n

2.2 rsyslog高级配置

配置rsyslog实现日志过滤、转发和集中存储。

# 日志过滤配置
# /etc/rsyslog.d/filter.conf

# 过滤特定程序日志
if $programname == ‘nginx’ then {
action(type=”omfile” file=”/var/log/nginx/access.log”)
stop
}

# 过滤包含特定内容的日志
if $msg contains ‘error’ then {
action(type=”omfile” file=”/var/log/error.log”)
}

# 过滤特定优先级
if $syslogseverity <= 3 then { action(type="omfile" file="/var/log/critical.log") } # 日志转发配置 # /etc/rsyslog.d/forward.conf # 转发到远程日志服务器(TCP) *.* @@logserver.fgedu.net.cn:514 # 转发到远程日志服务器(UDP) *.* @logserver.fgedu.net.cn:514 # 使用RELTP协议转发 *.* action(type="omrelp" target="logserver.fgedu.net.cn" port="20514") # 日志服务器配置 # /etc/rsyslog.d/server.conf # 加载输入模块 module(load="imudp") input(type="imudp" port="514") module(load="imtcp") input(type="imtcp" port="514") module(load="imrelp") input(type="imrelp" port="20514") # 使用模板存储远程日志 $template RemoteLogs,"/var/log/remote/%fromhost-ip%/%$YEAR%-%$MONTH%-%$DAY%.log" *.* ?RemoteLogs # 测试日志转发 $ logger -p local0.info "Test message from fgedu-server01" # 在日志服务器上查看 $ tail -f /var/log/remote/10.0.1.11/2026-04-03.log Apr 3 10:00:00 fgedu-server01 root: Test message from fgedu-server01

三、journald日志管理

3.1 journald基础

journald是systemd的日志组件,采用二进制格式存储日志,支持索引和快速查询。

# journald配置文件
# /etc/systemd/journald.conf

[Journal]
# 日志存储位置
Storage=persistent

# 压缩设置
Compress=yes

# 日志文件大小限制
SystemMaxUse=1G
SystemMaxFileSize=100M

# 日志保留时间
MaxRetentionSec=1month

# 日志转发到syslog
ForwardToSyslog=yes

# 日志转发到控制台
ForwardToConsole=no

# 重启journald
$ systemctl restart systemd-journald

# 查看日志存储位置
$ ls -la /var/log/journal/
total 4.0K
drwxr-xr-x. 2 root root 4.0K Apr 3 10:00 abc123456789…

# 查看日志文件
$ journalctl –disk-usage
Archived and active journals take up 256.0M on disk.

# 查看所有日志
$ journalctl
— Logs begin at Mon 2026-03-01 08:00:00 CST, end at Fri 2026-04-03 10:00:00 CST. —
Apr 03 10:00:00 fgedu-server01 systemd[1]: Started Session 123 of user root.
Apr 03 10:00:01 fgedu-server01 sshd[12345]: Accepted publickey for root from 10.0.1.100 port 54321 ssh2

# 实时查看日志
$ journalctl -f
— Logs begin at Fri 2026-04-03 10:00:00 CST. —
Apr 03 10:00:00 fgedu-server01 systemd[1]: Starting Daily Cleanup…

# 查看最近的日志
$ journalctl -n 20
Apr 03 10:00:00 fgedu-server01 systemd[1]: Started Daily Cleanup.
Apr 03 10:00:01 fgedu-server01 systemd[1]: Starting Daily man-db regeneration…

3.2 journalctl高级查询

journalctl提供了丰富的查询选项,可以精确过滤日志内容。

# 按时间过滤
$ journalctl –since “2026-04-03 09:00:00”
$ journalctl –since “09:00” –until “10:00”
$ journalctl –since yesterday
$ journalctl –since “1 hour ago”

# 按服务过滤
$ journalctl -u nginx.service
$ journalctl -u docker.service -u kubelet.service

# 按优先级过滤
$ journalctl -p err
$ journalctl -p warning..err

# 按进程过滤
$ journalctl _PID=12345

# 按用户过滤
$ journalctl _UID=0

# 按可执行文件过滤
$ journalctl /usr/sbin/sshd

# 按主机过滤
$ journalctl _HOSTNAME=fgedu-server01

# 组合过滤
$ journalctl -u nginx.service -p err –since today

# 以JSON格式输出
$ journalctl -u nginx.service -o json | jq .

# 以简洁格式输出
$ journalctl -u nginx.service -o verbose

# 查看启动日志
$ journalctl -b

# 查看上一次启动日志
$ journalctl -b -1

# 查看内核日志
$ journalctl -k

# 持续监控特定服务
$ journalctl -u nginx.service -f

# 导出日志
$ journalctl –since “2026-04-01” > /tmp/april-logs.txt
$ journalctl -u nginx.service –since today -o json > nginx-logs.json

# 验证日志完整性
$ journalctl –verify
PASS: /var/log/journal/abc123/system.journal

四、日志轮转配置

4.1 logrotate配置

logrotate是Linux系统的日志轮转工具,可以自动压缩、删除和轮转日志文件。

# logrotate主配置文件
# /etc/logrotate.conf

# 全局配置
weekly # 每周轮转
rotate 4 # 保留4个备份
create # 创建新日志文件
dateext # 使用日期作为后缀
compress # 压缩旧日志
delaycompress # 延迟压缩

# 包含子配置目录
include /etc/logrotate.d

# 单个日志文件配置
/var/log/wtmp {
monthly
create 0664 root utmp
minsize 1M
rotate 1
}

# 应用日志轮转配置
# /etc/logrotate.d/fgedu-app

/var/log/fgedu/*.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 0644 app app
dateext
dateformat -%Y%m%d
sharedscripts
postrotate
/usr/bin/systemctl reload nginx > /dev/null 2>&1 || true
endscript
}

# Nginx日志轮转
# /etc/logrotate.d/nginx

/var/log/nginx/*.log {
daily
rotate 14
compress
delaycompress
missingok
notifempty
create 0640 nginx adm
sharedscripts
postrotate
[ -f /var/run/nginx.pid ] && kill -USR1 `cat /var/run/nginx.pid`
endscript
}

# 手动执行logrotate
$ logrotate -vf /etc/logrotate.conf
reading config file /etc/logrotate.conf
including /etc/logrotate.d
reading config file nginx

Handling 1 logs

rotating pattern: /var/log/nginx/*.log forced from command line (14 rotations)
empty log files are rotated, old logs are removed
considering log /var/log/nginx/access.log
log needs rotating
rotating log /var/log/nginx/access.log, log->rotateCount is 14
dateext suffix ‘-20260403’
renaming /var/log/nginx/access.log to /var/log/nginx/access.log-20260403
creating new /var/log/nginx/access.log mode = 0640 uid = 996 gid = 4
running postrotate script

# 测试配置
$ logrotate -d /etc/logrotate.d/nginx
WARNING: logrotate in debug mode does nothing except printing debug messages! Consider using verbose mode (-v) instead of this.

rotating pattern: /var/log/nginx/*.log weekly (14 rotations)
empty log files are rotated, old logs are removed

五、日志分析方法

5.1 常用日志分析命令

掌握日志分析命令可以快速定位问题和提取有用信息。

# 基础日志查看
$ tail -f /var/log/messages
$ head -n 50 /var/log/messages
$ cat /var/log/messages | less

# 搜索关键字
$ grep “error” /var/log/messages
$ grep -i “error\|warning” /var/log/messages
$ grep -c “Failed” /var/log/secure

# 统计分析
$ grep “Failed password” /var/log/secure | awk ‘{print $11}’ | sort | uniq -c | sort -nr
15 192.168.1.100
10 192.168.1.101
5 192.168.1.102

# 查看登录成功的IP
$ grep “Accepted” /var/log/secure | awk ‘{print $11}’ | sort | uniq -c | sort -nr
25 10.0.1.100
15 10.0.1.101

# 查看系统重启记录
$ last reboot
reboot system boot 5.4.0-100-generic Fri Apr 3 08:00 still running
reboot system boot 5.4.0-100-generic Mon Mar 1 08:00 – Fri Apr 3 07:59 (33+00:59)

# 查看用户登录历史
$ last -n 10
root pts/0 10.0.1.100 Fri Apr 3 09:00 still logged in
admin pts/1 10.0.1.101 Fri Apr 3 08:30 – 09:00 (00:30)

# 查看失败的登录尝试
$ lastb | head -10
root ssh:notty 192.168.1.100 Fri Apr 3 09:55 – 09:55 (00:00)
admin ssh:notty 192.168.1.101 Fri Apr 3 09:54 – 09:54 (00:00)

# 分析Nginx访问日志
$ awk ‘{print $1}’ /var/log/nginx/access.log | sort | uniq -c | sort -rn | head -10
1250 10.0.1.100
850 10.0.1.101
650 10.0.1.102

# 分析HTTP状态码
$ awk ‘{print $9}’ /var/log/nginx/access.log | sort | uniq -c | sort -rn
50000 200
5000 304
1000 404
500 500

# 查找特定时间段的日志
$ awk ‘/2026-04-03 09:00/,/2026-04-03 10:00/’ /var/log/messages

# 使用sed提取日志
$ sed -n ‘/error/p’ /var/log/messages

# 日志分析脚本
#!/bin/bash
# 文件名: log_analysis.sh

echo “=== 系统错误统计 ===”
grep -c “error” /var/log/messages

echo “=== 登录失败统计 ===”
grep “Failed password” /var/log/secure | awk ‘{print $11}’ | sort | uniq -c | sort -nr | head -5

echo “=== 磁盘空间警告 ===”
grep “No space left” /var/log/messages | tail -5

echo “=== 服务重启记录 ===”
grep “Restarting” /var/log/messages | tail -10

$ chmod +x log_analysis.sh
$ ./log_analysis.sh
=== 系统错误统计 ===
125

=== 登录失败统计 ===
15 192.168.1.100
10 192.168.1.101

=== 磁盘空间警告 ===
Apr 3 09:55:00 fgedu-server01 kernel: No space left on device

=== 服务重启记录 ===
Apr 3 10:00:00 fgedu-server01 systemd[1]: Restarting nginx.service.

六、集中式日志管理

6.1 ELK Stack部署

ELK Stack(Elasticsearch、Logstash、Kibana)是企业级日志管理解决方案。

# ELK Stack架构
架构组件:
– Filebeat:日志采集代理
– Logstash:日志处理管道
– Elasticsearch:日志存储和搜索
– Kibana:可视化界面

# Filebeat配置
# /etc/filebeat/filebeat.yml

filebeat.inputs:
– type: log
enabled: true
paths:
– /var/log/messages
– /var/log/secure
– /var/log/cron
fields:
type: system
fields_under_root: true

– type: log
enabled: true
paths:
– /var/log/nginx/*.log
fields:
type: nginx
fields_under_root: true
multiline:
pattern: ‘^\d{4}-\d{2}-\d{2}’
negate: true
match: after

output.logstash:
hosts: [“logstash.fgedu.net.cn:5044”]
bulk_max_size: 2048

# Logstash配置
# /etc/logstash/conf.d/fgedu.conf

input {
beats {
port => 5044
}
}

filter {
if [type] == “system” {
grok {
match => { “message” => “%{SYSLOGBASE} %{GREEDYDATA:syslog_message}” }
}
date {
match => [ “timestamp”, “MMM d HH:mm:ss”, “MMM dd HH:mm:ss” ]
}
}

if [type] == “nginx” {
grok {
match => { “message” => “%{COMBINEDAPACHELOG}” }
}
date {
match => [ “timestamp”, “dd/MMM/yyyy:HH:mm:ss Z” ]
}
}
}

output {
elasticsearch {
hosts => [“http://elasticsearch.fgedu.net.cn:9200”]
index => “fgedu-logs-%{+YYYY.MM.dd}”
}
}

# Elasticsearch配置
# /etc/elasticsearch/elasticsearch.yml

cluster.name: fgedu-logs
node.name: fgedu-es-node1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: [“10.0.1.21”, “10.0.1.22”, “10.0.1.23”]
cluster.initial_master_nodes: [“fgedu-es-node1”, “fgedu-es-node2”, “fgedu-es-node3”]

# Kibana配置
# /etc/kibana/kibana.yml

server.port: 5601
server.host: “0.0.0.0”
elasticsearch.hosts: [“http://elasticsearch.fgedu.net.cn:9200”]
kibana.index: “.kibana”

# 启动服务
$ systemctl start filebeat logstash elasticsearch kibana
$ systemctl enable filebeat logstash elasticsearch kibana

# 检查服务状态
$ systemctl status elasticsearch
● elasticsearch.service – Elasticsearch
Loaded: loaded (/usr/lib/systemd/system/elasticsearch.service; enabled)
Active: active (running) since Fri 2026-04-03 10:00:00 CST; 10s ago

# 访问Kibana
http://kibana.fgedu.net.cn:5601

6.2 日志告警配置

配置日志告警规则,及时发现异常情况。

# 使用ElastAlert配置日志告警
# /etc/elastalert/config.yaml

rules_folder: /etc/elastalert/rules
run_every:
minutes: 1
buffer_time:
minutes: 15
es_host: elasticsearch.fgedu.net.cn
es_port: 9200

# 告警规则示例
# /etc/elastalert/rules/ssh_failed.yml

name: SSH Failed Login Alert
type: frequency
index: fgedu-logs-*
num_events: 10
timeframe:
minutes: 5
filter:
– term:
type: “system”
– query:
query_string:
query: “message: \”Failed password\””
alert:
– “email”
email:
– “ops@fgedu.net.cn”

# 磁盘空间告警
# /etc/elastalert/rules/disk_space.yml

name: Disk Space Alert
type: frequency
index: fgedu-logs-*
num_events: 1
timeframe:
minutes: 1
filter:
– query:
query_string:
query: “message: \”No space left on device\””
alert:
– “email”
– “slack”

# 测试告警规则
$ elastalert-test-rule /etc/elastalert/rules/ssh_failed.yml

# 启动ElastAlert
$ systemctl start elastalert
$ systemctl enable elastalert

# 日志告警脚本
#!/bin/bash
# 文件名: log_alert.sh

# 检查SSH暴力破解
FAILED_COUNT=$(grep “Failed password” /var/log/secure | grep “$(date ‘+%b %d’)” | wc -l)
if [ $FAILED_COUNT -gt 50 ]; then
echo “Warning: $FAILED_COUNT failed login attempts today” | mail -s “SSH Alert” ops@fgedu.net.cn
fi

# 检查磁盘空间
DISK_USAGE=$(df -h / | awk ‘NR==2 {print $5}’ | tr -d ‘%’)
if [ $DISK_USAGE -gt 90 ]; then
echo “Warning: Root partition is ${DISK_USAGE}% full” | mail -s “Disk Alert” ops@fgedu.net.cn
fi

# 检查内存使用
MEM_USAGE=$(free | awk ‘/Mem/{printf(“%.0f”), $3/$2*100}’)
if [ $MEM_USAGE -gt 90 ]; then
echo “Warning: Memory usage is ${MEM_USAGE}%” | mail -s “Memory Alert” ops@fgedu.net.cn
fi

# 设置定时任务
$ crontab -l
*/5 * * * * /opt/scripts/log_alert.sh

总结

Linux系统日志管理是运维工作的基础,通过合理的日志配置和管理,可以快速定位问题、满足审计要求、优化系统性能。本教程详细介绍了rsyslog和journald的配置方法、日志轮转、日志分析和集中式日志管理等技术。

更多学习教程www.fgedu.net.cn,在实际工作中,建议建立完善的日志管理体系,包括日志收集、存储、分析和告警等环节,确保日志的有效性和可用性。

风哥风哥提示:日志管理要注意日志的保留周期和存储空间规划,同时要确保日志的安全性,防止日志被篡改。

本文由风哥教程整理发布,仅用于学习测试使用,转载注明出处:http://www.fgedu.net.cn/10327.html

联系我们

在线咨询:点击这里给我发消息

微信号:itpux-com

工作日:9:30-18:30,节假日休息