1. 首页 > Linux教程 > 正文

Linux教程FG331-大规模环境架构设计

内容简介:本文风哥教程参考Linux官方文档、Red Hat Enterprise Linux官方文档、Ansible Automation Platform官方文档、Docker官方文档、Kubernetes官方文档和Podman官方文档等内容,详细介绍了相关技术的配置和使用方法。

本文

风哥提示:

档介绍大规模Linux环境的架构设计原则和方法。

Part01-架构设计原则

1.1 系统架构规划

# 查看系统架构信息
[root@arch-server ~]# uname -a
Linux arch-server 5.14.0-284.11.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Apr 4 13:30:00 UTC 2026 x86_64 GNU/Linux

# 查看CPU信息
[root@arch-server ~]# lscpu
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 64
On-line CPU(s) list: 0-63
Vendor ID: GenuineIntel
Model name: Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz
CPU family: 6
Model: 85
Thread(s) per core: 2
Core(s) per socket: 24
Socket(s): 2
Stepping: 7
CPU max MHz: 4000.0000
CPU min MHz: 1000.0000

# 查看内存信息
[root@arch-server ~]# free -h
total used free shared buff/cache available
Mem: 251Gi 50Gi 150Gi 5.0Gi 51Gi 196Gi
Swap: 8.0Gi 0B 8.0Gi

# 查看存储信息
[root@arch-server ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 931.5G 0 disk
├─sda1 8:1 0 1G 0 part /boot
├─sda2 8:2 0 100G 0 part /
├─sda3 8:3 0 50G 0 part /var
└─sda4 8:4 0 780.5G 0 part /data
sdb 8:16 0 3.7T 0 disk
└─sdb1 8:17 0 3.7T 0 part /backup

1.2 网络架构设计

# 查看网络接口
[root@arch-server ~]# ip addr show
1: lo: mtu 65536
inet 127.0.0.1/8 scope host lo
2: eth0: mtu 1500
inet 10.0.1.10/24 brd 10.0.1.255 scope global eth0
3: eth1: mtu 1500
inet 10.0.2.10/24 brd 10.0.2.255 scope global eth1
4: eth2: mtu 9000
inet 10.0.3.10/24 brd 10.0.3.255 scope global eth2

# 查看路由表
[root@arch-server ~]# ip route show
default via 10.0.1.1 dev eth0
10.0.1.0/24 dev eth0 proto kernel scope link src 10.0.1.10
10.0.2.0/24 dev eth1 proto kernel scope link src 10.0.2.10
10.0.3.0/24 dev eth2 proto kernel scope link src 10.0.3.10

# 配置网络绑定
[root@arch-server ~]# nmcli connection add type bond con-name bond0 ifname bond0 mode active-backup
Connection ‘bond0’ (12345678-90ab-cdef-1234-567890abcdef) successfully added.

[root@arch-server ~]# nmcli connection add type ethernet slave-type bond con-name bond0-port1 ifname eth0 master bond0
Connection ‘bond0-port1’ successfully added.

[root@arch-server ~]# nmcli connection add type ethernet slave-type bond con-name bond0-port2 ifname eth1 master bond0
Connection ‘bond0-port2’ successfully added.

[root@arch-server ~]# nmcli connection up bond0
Connection successfully activated (master waiting for slaves) (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/1)

# 验证绑定配置
[root@arch-server ~]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v5.更多视频教程www.fgedu.net.cn14.0-284.11.1.el9_2.x86_64

Bonding Mode: fault-tolerance (active-backup)
Primary Slave: None
Currently Active Slave: eth0
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0

Slave Interface: eth0
MII Status: up
Speed: 10000 Mbps
Duplex: full

Slave Interface: eth1
MII Status: up
Speed: 10000 Mbps
Duplex: full

Part02-分层架构设计

2.1 应用层设计

# 查看应用服务状态
[root@app-server ~]# systemctl list-units –type=service –state=running | grep -E “nginx|httpd|tomcat|node”
nginx.service loaded active running The nginx HTTP and reverse proxy server
nodeapp.service loaded active running Node.js Application

# 查看应用端口
[root@app-server ~]# netstat -tlnp | grep -E “80|443|3000|8080”
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 12345/nginx: master
tcp 0 0 0.0.0.0:443 0.0.0.0:* LISTEN 12345/更多学习教程公众号风哥教程itpux_comnginx: master
tcp 0 0 0.0.0.0:3000 0.0.0.0:* LISTEN 12346/node
tcp 0 0 0.0.0.0:8080 0.0.0.0:* LISTEN 12347/java

# 配置负载均衡
[root@lb-server ~]# cat /etc/nginx/conf.d/upstream.conf
upstream backend {
least_conn;
server 10.0.1.101:8080 weight=5;
server 10.0.1.102:8080 weight=5;
server 10.0.1.from PG视频:www.itpux.com103:8080 weight=5;
keepalive 32;
}

server {
listen 80;
server_name app.fgedu.net.cn;

location / {
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Connection “”;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

# 测试负载均衡配置
[root@lb-server ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful

[root@lb-server ~]# systemctl reload nginx

2.2 数据库层设计

# 查看数据库集群状态
[root@db-master ~]# mysql -e “SHOW MASTER STATUS\G”
*************************** 1. row ***************************
File: mysql-bin.000001
Position: 1234
Binlog_Do_DB: appdb
Binlog_Ignore_DB: mysql,information_schema,performance_schema
Executed_Gtid_Set: 12345678-90ab-cdef-1234-567890abcdef:1-100

# 查看从库状态
[root@db-slave ~]# mysql -e “SHOW SLAVE STATUS\G”
*************************** 1. row ***************************
Slave_IO_State: Waiting for master to send event
Master_Host: 10.0.2.10
Master_User: repl
Master_Port: 3306
Connect_Retry: 60
Master_Log_File: mysql-bin.000001
Read_Master_Log_Pos: 1234
Relay_Log_File: relay-bin.000002
Relay_Log_Pos: 567
Relay_Master_Log_File: mysql-bin.000001
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
Replicate_Do_DB: appdb
Replicate_Ignore_DB: mysql,information_schema,performance_schema
Replicate_Do_Table:
Replicate_Ignore_Table:
Replicate_Wild_Do_Table:
Replicate_Wild_Ignore_Table:
Last_Errno: 0
Last_Error:
Skip_Counter: 0
Exec_Master_Log_Pos: 1234
Relay_Log_Space: 789
Until_Condition: None
Until_Log_File:
Until_Log_Pos: 0
Master_SSL_Allowed: No
Seconds_Behind_Master: 0

# 配置数据库读写分离
[root@proxy-server ~]# cat /etc/proxysql.cnf
datadir=”/var/lib/proxysql”

admin_variables=
{
admin_credentials=”admin:admin”
mysql_ifaces=”0.0.0.0:6032″
}

mysql_variables=
{
threads=4
max_connections=2048
default_query_delay=0
default_query_timeout=10000
have_compress=true
poll_timeout=2000
interfaces=”0.0.0.0:6033″
default_schema=”information_schema”
stacksize=1048576
server_version=”8.0.32″
connect_timeout_server=3000
monitor_username=”monitor”
monitor_password=”monitor”
monitor_history=600000
monitor_connect_interval=60000
monitor_ping_interval=10000
ping_interval_server_msec=120000
ping_timeout_server=500
commands_stats=true
sessions_sort=true
}

mysql_servers=
(
{
address=”10.0.2.10″
port=3306
hostgroup=10
max_connections=1000
},
{
address=”10.0.2.11″
port=3306
hostgroup=20
max_connections=1000
}
)

mysql_users=
(
{
username = “appuser”
password = “apppass”
default_hostgroup = 10
}
)

Part03-高可用架构设计

3.1 集群架构设计

# 查看集群节点状态
[root@cluster-node1 ~]# pcs status nodes
Pacemaker Nodes:
Online: node1 node2 node3 node4 node5
Standby:
Maintenance:
Offline:

# 查看资源分布
[root@cluster-node1 ~]# pcs status resources
* vip-web (ocf::heartbeat:IPaddr2): Started node1
* vip-db (ocf::heartbeat:IPaddr2): Started node3
* web-clone (systemd:nginx): Started [ node1 node2 ]
* db-master (ocf::heartbeat:mysql): Master node3

# 配置资源约束
[root@cluster-node1 ~]# pcs constraint location vip-web rule score=INFINITY #uname eq node1 or #uname eq node2
[root@cluster-node1 ~]# pcs constraint location vip-db rule score=INFINITY #uname eq node3 or #uname eq node4

# 查看约束配置
[root@cluster-node1 ~]# pcs constraint
Location Constraints:
Resource: vip-web
Rule: score=INFINITY
Expression: #uname eq node1 or #uname eq node2
Resource: vip-db
Rule: score=INFINITY
Expression: #uname eq node3 or #uname eq node4
Ordering Constraints:
start vip-web then start web-clone (Mandatory)
Colocation Constraints:
web-clone with vip-web (score:INFINITY)

风哥针对大规模架构设计建议:

  • 采用分层架构设计
  • 实现服务高可用
  • 配置负载均衡
  • 设计数据库读写分离
  • 规划网络架构

本文由风哥教程整理发布,仅用于学习测试使用,转载注明出处:http://www.fgedu.net.cn/10327.html

联系我们

在线咨询:点击这里给我发消息

微信号:itpux-com

工作日:9:30-18:30,节假日休息