1.需求描述
如下规划图所示,DELL T440
服务器安装CENTOS
系统,现需将服务器的两个网卡连接至H3C
交换机的GE1/0/1
和GE1/0/2
接口。服务器和交换机对接的网卡形成主备模式,当一个宕掉另一个可以由备份转换为主设备。
2.配置分析
由于2个网卡工作在主备模式下,同一时刻只有一条链路在工作,所以采用bond=1
模式配置,而bond=1
模式只需配置服务器,不需要配置交换机,所以插好网线后,下面对服务器网卡进行配置。
3.配置步骤
3.1查看服务器网卡信息
[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: em1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether f4:ee:08:31:90:b3 brd ff:ff:ff:ff:ff:ff
inet 10.129.79.252/24 brd 10.129.79.255 scope global noprefixroute em1
valid_lft forever preferred_lft forever
inet6 fe80::f6ee:8ff:fe31:90b3/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: em2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether f4:ee:08:31:90:b4 brd ff:ff:ff:ff:ff:ff
inet 10.129.79.101/24 brd 10.129.79.255 scope global noprefixroute dynamic em2
valid_lft 85149sec preferred_lft 85149sec
inet6 fe80::71d7:b5e6:f832:c01e/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@localhost ~]#
当前服务器物理网卡有2个,分别为em1
和em2
。
3.2备份网卡配置信息
[root@localhost ~]# cp /etc/sysconfig/network-scripts/ifcfg-em1 /etc/sysconfig/network-scripts/ifcfg-em1.mybak
[root@localhost ~]# cp /etc/sysconfig/network-scripts/ifcfg-em2 /etc/sysconfig/network-scripts/ifcfg-em2.mybak
3.3创建虚拟网卡并配置
# 创建一个虚拟网卡bond0
,创建文件ifcfg-bond0
,保存退出。
[root@localhost ~]# vim /etc/sysconfig/network-scripts/ifcfg-bond0
# 编辑文件ifcfg-bond0
,写入网卡配置,保存并退出。
BONDING_OPTS="mode=1 miimon=100 updelay=100 downdelay=100"
TYPE=Bond
BONDING_MASTER=yes
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
IPADDR=10.129.79.250
PREFIX=24
GATEWAY=10.129.79.254
DEFROUTE=no
IPV4_FAILURE_FATAL=no
NAME=bond0
DEVICE=bond0
ONBOOT=yes
3.4编辑物理网卡文件ifcfg-em1
# 编辑文件ifcfg-em1
,写入网卡配置,保存并退出。
[root@localhost ~]# vim /etc/sysconfig/network-scripts/ifcfg-em1
DEVICE=em1
TYPE=Ethernet
ONBOOT=yes
SLAVE=yes
MASTER=bond0
BOOTPROTO=none
3.5编辑物理网卡文件ifcfg-em2
# 编辑文件ifcfg-em2
,写入网卡配置,保存并退出。
[root@localhost ~]# vim /etc/sysconfig/network-scripts/ifcfg-em2
DEVICE=em2
TYPE=Ethernet
ONBOOT=yes
SLAVE=yes
MASTER=bond0
BOOTPROTO=none
3.6重启网络服务
[root@localhost ~]# ifdown bond0
[root@localhost ~]# ifup bond0
或者执行:
[root@localhost ~]# systemctl restart network
4.验证配置
# 在LINUX
服务器中查看bond0
状态,可以看到其工作在主备模式下,有两个成员端口。
[root@localhost ~]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011)
Bonding Mode: adaptive load balancing
Primary Slave: None
Currently Active Slave: em2
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: em1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 1
Permanent HW addr: f4:ee:08:30:99:b3
Slave queue ID: 0
Slave Interface: em2
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 1
Permanent HW addr: f4:ee:08:30:99:b4
Slave queue ID: 0
[root@localhost ~]#
# 在H3C
交换机上查看入流量。
<SERVER-SW>display counters rate inbound interface
Usage: Bandwidth utilization in percentage
Interface Usage (%) Total (pps) Broadcast (pps) Multicast (pps)
GE1/0/1 10 2336 -- --
GE1/0/2 0 6 -- --
……
# 在H3C
交换机上shutdown
端口GigabitEthernet1/0/1
后,再次查看入流量,发现流量已经切换到端口GigabitEthernet1/0/2
。
[SERVER-SW]interface GigabitEthernet 1/0/1
[SERVER-SW-GigabitEthernet1/0/1]shutdown
[SERVER-SW-GigabitEthernet1/0/1]quit
[SERVER-SW]display counters rate inbound interface
Usage: Bandwidth utilization in percentage
Interface Usage (%) Total (pps) Broadcast (pps) Multicast (pps)
GE1/0/1 0 0 -- --
GE1/0/2 90 17865 -- --
……
同样,在H3C
交换机上恢复端口GigabitEthernet1/0/1
后,再次查看入流量,因服务器工作在主备模式下,GigabitEthernet1/0/1
端口down
后,em1
自动设置为备设备,所以流量仍然在端口GigabitEthernet1/0/2
上。