diff options
Diffstat (limited to 'drivers/net/bonding/bond_main.c')
-rw-r--r-- | drivers/net/bonding/bond_main.c | 1340 |
1 files changed, 549 insertions, 791 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e953c6ad6e6d..63c22b0bb5ad 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -59,15 +59,12 @@ | |||
59 | #include <linux/uaccess.h> | 59 | #include <linux/uaccess.h> |
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/netdevice.h> | 61 | #include <linux/netdevice.h> |
62 | #include <linux/netpoll.h> | ||
63 | #include <linux/inetdevice.h> | 62 | #include <linux/inetdevice.h> |
64 | #include <linux/igmp.h> | 63 | #include <linux/igmp.h> |
65 | #include <linux/etherdevice.h> | 64 | #include <linux/etherdevice.h> |
66 | #include <linux/skbuff.h> | 65 | #include <linux/skbuff.h> |
67 | #include <net/sock.h> | 66 | #include <net/sock.h> |
68 | #include <linux/rtnetlink.h> | 67 | #include <linux/rtnetlink.h> |
69 | #include <linux/proc_fs.h> | ||
70 | #include <linux/seq_file.h> | ||
71 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
72 | #include <linux/if_ether.h> | 69 | #include <linux/if_ether.h> |
73 | #include <net/arp.h> | 70 | #include <net/arp.h> |
@@ -76,6 +73,7 @@ | |||
76 | #include <linux/if_vlan.h> | 73 | #include <linux/if_vlan.h> |
77 | #include <linux/if_bonding.h> | 74 | #include <linux/if_bonding.h> |
78 | #include <linux/jiffies.h> | 75 | #include <linux/jiffies.h> |
76 | #include <linux/preempt.h> | ||
79 | #include <net/route.h> | 77 | #include <net/route.h> |
80 | #include <net/net_namespace.h> | 78 | #include <net/net_namespace.h> |
81 | #include <net/netns/generic.h> | 79 | #include <net/netns/generic.h> |
@@ -91,8 +89,7 @@ | |||
91 | 89 | ||
92 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; | 90 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; |
93 | static int tx_queues = BOND_DEFAULT_TX_QUEUES; | 91 | static int tx_queues = BOND_DEFAULT_TX_QUEUES; |
94 | static int num_grat_arp = 1; | 92 | static int num_peer_notif = 1; |
95 | static int num_unsol_na = 1; | ||
96 | static int miimon = BOND_LINK_MON_INTERV; | 93 | static int miimon = BOND_LINK_MON_INTERV; |
97 | static int updelay; | 94 | static int updelay; |
98 | static int downdelay; | 95 | static int downdelay; |
@@ -109,15 +106,18 @@ static char *arp_validate; | |||
109 | static char *fail_over_mac; | 106 | static char *fail_over_mac; |
110 | static int all_slaves_active = 0; | 107 | static int all_slaves_active = 0; |
111 | static struct bond_params bonding_defaults; | 108 | static struct bond_params bonding_defaults; |
109 | static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; | ||
112 | 110 | ||
113 | module_param(max_bonds, int, 0); | 111 | module_param(max_bonds, int, 0); |
114 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); | 112 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); |
115 | module_param(tx_queues, int, 0); | 113 | module_param(tx_queues, int, 0); |
116 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); | 114 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); |
117 | module_param(num_grat_arp, int, 0644); | 115 | module_param_named(num_grat_arp, num_peer_notif, int, 0644); |
118 | MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); | 116 | MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " |
119 | module_param(num_unsol_na, int, 0644); | 117 | "failover event (alias of num_unsol_na)"); |
120 | MODULE_PARM_DESC(num_unsol_na, "Number of unsolicited IPv6 Neighbor Advertisements packets to send on failover event"); | 118 | module_param_named(num_unsol_na, num_peer_notif, int, 0644); |
119 | MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " | ||
120 | "failover event (alias of num_grat_arp)"); | ||
121 | module_param(miimon, int, 0); | 121 | module_param(miimon, int, 0); |
122 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); | 122 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); |
123 | module_param(updelay, int, 0); | 123 | module_param(updelay, int, 0); |
@@ -129,7 +129,7 @@ module_param(use_carrier, int, 0); | |||
129 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " | 129 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " |
130 | "0 for off, 1 for on (default)"); | 130 | "0 for off, 1 for on (default)"); |
131 | module_param(mode, charp, 0); | 131 | module_param(mode, charp, 0); |
132 | MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, " | 132 | MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " |
133 | "1 for active-backup, 2 for balance-xor, " | 133 | "1 for active-backup, 2 for balance-xor, " |
134 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " | 134 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " |
135 | "6 for balance-alb"); | 135 | "6 for balance-alb"); |
@@ -144,30 +144,41 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " | |||
144 | "2 for only on active slave " | 144 | "2 for only on active slave " |
145 | "failure"); | 145 | "failure"); |
146 | module_param(lacp_rate, charp, 0); | 146 | module_param(lacp_rate, charp, 0); |
147 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " | 147 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
148 | "(slow/fast)"); | 148 | "0 for slow, 1 for fast"); |
149 | module_param(ad_select, charp, 0); | 149 | module_param(ad_select, charp, 0); |
150 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)"); | 150 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " |
151 | "0 for stable (default), 1 for bandwidth, " | ||
152 | "2 for count"); | ||
151 | module_param(xmit_hash_policy, charp, 0); | 153 | module_param(xmit_hash_policy, charp, 0); |
152 | MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)" | 154 | MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " |
153 | ", 1 for layer 3+4"); | 155 | "0 for layer 2 (default), 1 for layer 3+4, " |
156 | "2 for layer 2+3"); | ||
154 | module_param(arp_interval, int, 0); | 157 | module_param(arp_interval, int, 0); |
155 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); | 158 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); |
156 | module_param_array(arp_ip_target, charp, NULL, 0); | 159 | module_param_array(arp_ip_target, charp, NULL, 0); |
157 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); | 160 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); |
158 | module_param(arp_validate, charp, 0); | 161 | module_param(arp_validate, charp, 0); |
159 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); | 162 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " |
163 | "0 for none (default), 1 for active, " | ||
164 | "2 for backup, 3 for all"); | ||
160 | module_param(fail_over_mac, charp, 0); | 165 | module_param(fail_over_mac, charp, 0); |
161 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); | 166 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " |
167 | "the same MAC; 0 for none (default), " | ||
168 | "1 for active, 2 for follow"); | ||
162 | module_param(all_slaves_active, int, 0); | 169 | module_param(all_slaves_active, int, 0); |
163 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | 170 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" |
164 | "by setting active flag for all slaves. " | 171 | "by setting active flag for all slaves; " |
165 | "0 for never (default), 1 for always."); | 172 | "0 for never (default), 1 for always."); |
173 | module_param(resend_igmp, int, 0); | ||
174 | MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " | ||
175 | "link failure"); | ||
166 | 176 | ||
167 | /*----------------------------- Global variables ----------------------------*/ | 177 | /*----------------------------- Global variables ----------------------------*/ |
168 | 178 | ||
169 | static const char * const version = | 179 | #ifdef CONFIG_NET_POLL_CONTROLLER |
170 | DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; | 180 | atomic_t netpoll_block_tx = ATOMIC_INIT(0); |
181 | #endif | ||
171 | 182 | ||
172 | int bond_net_id __read_mostly; | 183 | int bond_net_id __read_mostly; |
173 | 184 | ||
@@ -176,9 +187,6 @@ static int arp_ip_count; | |||
176 | static int bond_mode = BOND_MODE_ROUNDROBIN; | 187 | static int bond_mode = BOND_MODE_ROUNDROBIN; |
177 | static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; | 188 | static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; |
178 | static int lacp_fast; | 189 | static int lacp_fast; |
179 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
180 | static int disable_netpoll = 1; | ||
181 | #endif | ||
182 | 190 | ||
183 | const struct bond_parm_tbl bond_lacp_tbl[] = { | 191 | const struct bond_parm_tbl bond_lacp_tbl[] = { |
184 | { "slow", AD_LACP_SLOW}, | 192 | { "slow", AD_LACP_SLOW}, |
@@ -235,13 +243,12 @@ struct bond_parm_tbl ad_select_tbl[] = { | |||
235 | 243 | ||
236 | /*-------------------------- Forward declarations ---------------------------*/ | 244 | /*-------------------------- Forward declarations ---------------------------*/ |
237 | 245 | ||
238 | static void bond_send_gratuitous_arp(struct bonding *bond); | ||
239 | static int bond_init(struct net_device *bond_dev); | 246 | static int bond_init(struct net_device *bond_dev); |
240 | static void bond_uninit(struct net_device *bond_dev); | 247 | static void bond_uninit(struct net_device *bond_dev); |
241 | 248 | ||
242 | /*---------------------------- General routines -----------------------------*/ | 249 | /*---------------------------- General routines -----------------------------*/ |
243 | 250 | ||
244 | static const char *bond_mode_name(int mode) | 251 | const char *bond_mode_name(int mode) |
245 | { | 252 | { |
246 | static const char *names[] = { | 253 | static const char *names[] = { |
247 | [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", | 254 | [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", |
@@ -307,6 +314,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) | |||
307 | 314 | ||
308 | pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); | 315 | pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); |
309 | 316 | ||
317 | block_netpoll_tx(); | ||
310 | write_lock_bh(&bond->lock); | 318 | write_lock_bh(&bond->lock); |
311 | 319 | ||
312 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | 320 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { |
@@ -341,36 +349,11 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) | |||
341 | 349 | ||
342 | out: | 350 | out: |
343 | write_unlock_bh(&bond->lock); | 351 | write_unlock_bh(&bond->lock); |
352 | unblock_netpoll_tx(); | ||
344 | return res; | 353 | return res; |
345 | } | 354 | } |
346 | 355 | ||
347 | /** | 356 | /** |
348 | * bond_has_challenged_slaves | ||
349 | * @bond: the bond we're working on | ||
350 | * | ||
351 | * Searches the slave list. Returns 1 if a vlan challenged slave | ||
352 | * was found, 0 otherwise. | ||
353 | * | ||
354 | * Assumes bond->lock is held. | ||
355 | */ | ||
356 | static int bond_has_challenged_slaves(struct bonding *bond) | ||
357 | { | ||
358 | struct slave *slave; | ||
359 | int i; | ||
360 | |||
361 | bond_for_each_slave(bond, slave, i) { | ||
362 | if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) { | ||
363 | pr_debug("found VLAN challenged slave - %s\n", | ||
364 | slave->dev->name); | ||
365 | return 1; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | pr_debug("no VLAN challenged slaves found\n"); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * bond_next_vlan - safely skip to the next item in the vlans list. | 357 | * bond_next_vlan - safely skip to the next item in the vlans list. |
375 | * @bond: the bond we're working on | 358 | * @bond: the bond we're working on |
376 | * @curr: item we're advancing from | 359 | * @curr: item we're advancing from |
@@ -405,54 +388,26 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) | |||
405 | return next; | 388 | return next; |
406 | } | 389 | } |
407 | 390 | ||
391 | #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) | ||
392 | |||
408 | /** | 393 | /** |
409 | * bond_dev_queue_xmit - Prepare skb for xmit. | 394 | * bond_dev_queue_xmit - Prepare skb for xmit. |
410 | * | 395 | * |
411 | * @bond: bond device that got this skb for tx. | 396 | * @bond: bond device that got this skb for tx. |
412 | * @skb: hw accel VLAN tagged skb to transmit | 397 | * @skb: hw accel VLAN tagged skb to transmit |
413 | * @slave_dev: slave that is supposed to xmit this skbuff | 398 | * @slave_dev: slave that is supposed to xmit this skbuff |
414 | * | ||
415 | * When the bond gets an skb to transmit that is | ||
416 | * already hardware accelerated VLAN tagged, and it | ||
417 | * needs to relay this skb to a slave that is not | ||
418 | * hw accel capable, the skb needs to be "unaccelerated", | ||
419 | * i.e. strip the hwaccel tag and re-insert it as part | ||
420 | * of the payload. | ||
421 | */ | 399 | */ |
422 | int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, | 400 | int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, |
423 | struct net_device *slave_dev) | 401 | struct net_device *slave_dev) |
424 | { | 402 | { |
425 | unsigned short uninitialized_var(vlan_id); | 403 | skb->dev = slave_dev; |
404 | skb->priority = 1; | ||
426 | 405 | ||
427 | /* Test vlan_list not vlgrp to catch and handle 802.1p tags */ | 406 | skb->queue_mapping = bond_queue_mapping(skb); |
428 | if (!list_empty(&bond->vlan_list) && | ||
429 | !(slave_dev->features & NETIF_F_HW_VLAN_TX) && | ||
430 | vlan_get_tag(skb, &vlan_id) == 0) { | ||
431 | skb->dev = slave_dev; | ||
432 | skb = vlan_put_tag(skb, vlan_id); | ||
433 | if (!skb) { | ||
434 | /* vlan_put_tag() frees the skb in case of error, | ||
435 | * so return success here so the calling functions | ||
436 | * won't attempt to free is again. | ||
437 | */ | ||
438 | return 0; | ||
439 | } | ||
440 | } else { | ||
441 | skb->dev = slave_dev; | ||
442 | } | ||
443 | 407 | ||
444 | skb->priority = 1; | 408 | if (unlikely(netpoll_tx_running(slave_dev))) |
445 | #ifdef CONFIG_NET_POLL_CONTROLLER | 409 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); |
446 | if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { | 410 | else |
447 | struct netpoll *np = bond->dev->npinfo->netpoll; | ||
448 | slave_dev->npinfo = bond->dev->npinfo; | ||
449 | np->real_dev = np->dev = skb->dev; | ||
450 | slave_dev->priv_flags |= IFF_IN_NETPOLL; | ||
451 | netpoll_send_skb(np, skb); | ||
452 | slave_dev->priv_flags &= ~IFF_IN_NETPOLL; | ||
453 | np->dev = bond->dev; | ||
454 | } else | ||
455 | #endif | ||
456 | dev_queue_xmit(skb); | 411 | dev_queue_xmit(skb); |
457 | 412 | ||
458 | return 0; | 413 | return 0; |
@@ -488,9 +443,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev, | |||
488 | struct slave *slave; | 443 | struct slave *slave; |
489 | int i; | 444 | int i; |
490 | 445 | ||
491 | write_lock(&bond->lock); | 446 | write_lock_bh(&bond->lock); |
492 | bond->vlgrp = grp; | 447 | bond->vlgrp = grp; |
493 | write_unlock(&bond->lock); | 448 | write_unlock_bh(&bond->lock); |
494 | 449 | ||
495 | bond_for_each_slave(bond, slave, i) { | 450 | bond_for_each_slave(bond, slave, i) { |
496 | struct net_device *slave_dev = slave->dev; | 451 | struct net_device *slave_dev = slave->dev; |
@@ -663,7 +618,8 @@ down: | |||
663 | static int bond_update_speed_duplex(struct slave *slave) | 618 | static int bond_update_speed_duplex(struct slave *slave) |
664 | { | 619 | { |
665 | struct net_device *slave_dev = slave->dev; | 620 | struct net_device *slave_dev = slave->dev; |
666 | struct ethtool_cmd etool; | 621 | struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET }; |
622 | u32 slave_speed; | ||
667 | int res; | 623 | int res; |
668 | 624 | ||
669 | /* Fake speed and duplex */ | 625 | /* Fake speed and duplex */ |
@@ -677,7 +633,8 @@ static int bond_update_speed_duplex(struct slave *slave) | |||
677 | if (res < 0) | 633 | if (res < 0) |
678 | return -1; | 634 | return -1; |
679 | 635 | ||
680 | switch (etool.speed) { | 636 | slave_speed = ethtool_cmd_speed(&etool); |
637 | switch (slave_speed) { | ||
681 | case SPEED_10: | 638 | case SPEED_10: |
682 | case SPEED_100: | 639 | case SPEED_100: |
683 | case SPEED_1000: | 640 | case SPEED_1000: |
@@ -695,7 +652,7 @@ static int bond_update_speed_duplex(struct slave *slave) | |||
695 | return -1; | 652 | return -1; |
696 | } | 653 | } |
697 | 654 | ||
698 | slave->speed = etool.speed; | 655 | slave->speed = slave_speed; |
699 | slave->duplex = etool.duplex; | 656 | slave->duplex = etool.duplex; |
700 | 657 | ||
701 | return 0; | 658 | return 0; |
@@ -865,6 +822,17 @@ static void bond_mc_del(struct bonding *bond, void *addr) | |||
865 | } | 822 | } |
866 | 823 | ||
867 | 824 | ||
825 | static void __bond_resend_igmp_join_requests(struct net_device *dev) | ||
826 | { | ||
827 | struct in_device *in_dev; | ||
828 | |||
829 | rcu_read_lock(); | ||
830 | in_dev = __in_dev_get_rcu(dev); | ||
831 | if (in_dev) | ||
832 | ip_mc_rejoin_groups(in_dev); | ||
833 | rcu_read_unlock(); | ||
834 | } | ||
835 | |||
868 | /* | 836 | /* |
869 | * Retrieve the list of registered multicast addresses for the bonding | 837 | * Retrieve the list of registered multicast addresses for the bonding |
870 | * device and retransmit an IGMP JOIN request to the current active | 838 | * device and retransmit an IGMP JOIN request to the current active |
@@ -872,17 +840,35 @@ static void bond_mc_del(struct bonding *bond, void *addr) | |||
872 | */ | 840 | */ |
873 | static void bond_resend_igmp_join_requests(struct bonding *bond) | 841 | static void bond_resend_igmp_join_requests(struct bonding *bond) |
874 | { | 842 | { |
875 | struct in_device *in_dev; | 843 | struct net_device *vlan_dev; |
876 | struct ip_mc_list *im; | 844 | struct vlan_entry *vlan; |
877 | 845 | ||
878 | rcu_read_lock(); | 846 | read_lock(&bond->lock); |
879 | in_dev = __in_dev_get_rcu(bond->dev); | 847 | |
880 | if (in_dev) { | 848 | /* rejoin all groups on bond device */ |
881 | for (im = in_dev->mc_list; im; im = im->next) | 849 | __bond_resend_igmp_join_requests(bond->dev); |
882 | ip_mc_rejoin_group(im); | 850 | |
851 | /* rejoin all groups on vlan devices */ | ||
852 | if (bond->vlgrp) { | ||
853 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | ||
854 | vlan_dev = vlan_group_get_device(bond->vlgrp, | ||
855 | vlan->vlan_id); | ||
856 | if (vlan_dev) | ||
857 | __bond_resend_igmp_join_requests(vlan_dev); | ||
858 | } | ||
883 | } | 859 | } |
884 | 860 | ||
885 | rcu_read_unlock(); | 861 | if (--bond->igmp_retrans > 0) |
862 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | ||
863 | |||
864 | read_unlock(&bond->lock); | ||
865 | } | ||
866 | |||
867 | static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) | ||
868 | { | ||
869 | struct bonding *bond = container_of(work, struct bonding, | ||
870 | mcast_work.work); | ||
871 | bond_resend_igmp_join_requests(bond); | ||
886 | } | 872 | } |
887 | 873 | ||
888 | /* | 874 | /* |
@@ -944,7 +930,6 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, | |||
944 | 930 | ||
945 | netdev_for_each_mc_addr(ha, bond->dev) | 931 | netdev_for_each_mc_addr(ha, bond->dev) |
946 | dev_mc_add(new_active->dev, ha->addr); | 932 | dev_mc_add(new_active->dev, ha->addr); |
947 | bond_resend_igmp_join_requests(bond); | ||
948 | } | 933 | } |
949 | } | 934 | } |
950 | 935 | ||
@@ -1091,6 +1076,21 @@ static struct slave *bond_find_best_slave(struct bonding *bond) | |||
1091 | return bestslave; | 1076 | return bestslave; |
1092 | } | 1077 | } |
1093 | 1078 | ||
1079 | static bool bond_should_notify_peers(struct bonding *bond) | ||
1080 | { | ||
1081 | struct slave *slave = bond->curr_active_slave; | ||
1082 | |||
1083 | pr_debug("bond_should_notify_peers: bond %s slave %s\n", | ||
1084 | bond->dev->name, slave ? slave->dev->name : "NULL"); | ||
1085 | |||
1086 | if (!slave || !bond->send_peer_notif || | ||
1087 | test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) | ||
1088 | return false; | ||
1089 | |||
1090 | bond->send_peer_notif--; | ||
1091 | return true; | ||
1092 | } | ||
1093 | |||
1094 | /** | 1094 | /** |
1095 | * change_active_interface - change the active slave into the specified one | 1095 | * change_active_interface - change the active slave into the specified one |
1096 | * @bond: our bonding struct | 1096 | * @bond: our bonding struct |
@@ -1158,31 +1158,43 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
1158 | bond_set_slave_inactive_flags(old_active); | 1158 | bond_set_slave_inactive_flags(old_active); |
1159 | 1159 | ||
1160 | if (new_active) { | 1160 | if (new_active) { |
1161 | bool should_notify_peers = false; | ||
1162 | |||
1161 | bond_set_slave_active_flags(new_active); | 1163 | bond_set_slave_active_flags(new_active); |
1162 | 1164 | ||
1163 | if (bond->params.fail_over_mac) | 1165 | if (bond->params.fail_over_mac) |
1164 | bond_do_fail_over_mac(bond, new_active, | 1166 | bond_do_fail_over_mac(bond, new_active, |
1165 | old_active); | 1167 | old_active); |
1166 | 1168 | ||
1167 | bond->send_grat_arp = bond->params.num_grat_arp; | 1169 | if (netif_running(bond->dev)) { |
1168 | bond_send_gratuitous_arp(bond); | 1170 | bond->send_peer_notif = |
1169 | 1171 | bond->params.num_peer_notif; | |
1170 | bond->send_unsol_na = bond->params.num_unsol_na; | 1172 | should_notify_peers = |
1171 | bond_send_unsolicited_na(bond); | 1173 | bond_should_notify_peers(bond); |
1174 | } | ||
1172 | 1175 | ||
1173 | write_unlock_bh(&bond->curr_slave_lock); | 1176 | write_unlock_bh(&bond->curr_slave_lock); |
1174 | read_unlock(&bond->lock); | 1177 | read_unlock(&bond->lock); |
1175 | 1178 | ||
1176 | netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); | 1179 | netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); |
1180 | if (should_notify_peers) | ||
1181 | netdev_bonding_change(bond->dev, | ||
1182 | NETDEV_NOTIFY_PEERS); | ||
1177 | 1183 | ||
1178 | read_lock(&bond->lock); | 1184 | read_lock(&bond->lock); |
1179 | write_lock_bh(&bond->curr_slave_lock); | 1185 | write_lock_bh(&bond->curr_slave_lock); |
1180 | } | 1186 | } |
1181 | } | 1187 | } |
1182 | 1188 | ||
1183 | /* resend IGMP joins since all were sent on curr_active_slave */ | 1189 | /* resend IGMP joins since active slave has changed or |
1184 | if (bond->params.mode == BOND_MODE_ROUNDROBIN) { | 1190 | * all were sent on curr_active_slave. |
1185 | bond_resend_igmp_join_requests(bond); | 1191 | * resend only if bond is brought up with the affected |
1192 | * bonding modes and the retransmission is enabled */ | ||
1193 | if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && | ||
1194 | ((USES_PRIMARY(bond->params.mode) && new_active) || | ||
1195 | bond->params.mode == BOND_MODE_ROUNDROBIN)) { | ||
1196 | bond->igmp_retrans = bond->params.resend_igmp; | ||
1197 | queue_delayed_work(bond->wq, &bond->mcast_work, 0); | ||
1186 | } | 1198 | } |
1187 | } | 1199 | } |
1188 | 1200 | ||
@@ -1274,58 +1286,104 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave) | |||
1274 | } | 1286 | } |
1275 | 1287 | ||
1276 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1288 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1277 | /* | 1289 | static inline int slave_enable_netpoll(struct slave *slave) |
1278 | * You must hold read lock on bond->lock before calling this. | ||
1279 | */ | ||
1280 | static bool slaves_support_netpoll(struct net_device *bond_dev) | ||
1281 | { | 1290 | { |
1282 | struct bonding *bond = netdev_priv(bond_dev); | 1291 | struct netpoll *np; |
1283 | struct slave *slave; | 1292 | int err = 0; |
1284 | int i = 0; | ||
1285 | bool ret = true; | ||
1286 | 1293 | ||
1287 | bond_for_each_slave(bond, slave, i) { | 1294 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
1288 | if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || | 1295 | err = -ENOMEM; |
1289 | !slave->dev->netdev_ops->ndo_poll_controller) | 1296 | if (!np) |
1290 | ret = false; | 1297 | goto out; |
1298 | |||
1299 | np->dev = slave->dev; | ||
1300 | strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ); | ||
1301 | err = __netpoll_setup(np); | ||
1302 | if (err) { | ||
1303 | kfree(np); | ||
1304 | goto out; | ||
1291 | } | 1305 | } |
1292 | return i != 0 && ret; | 1306 | slave->np = np; |
1307 | out: | ||
1308 | return err; | ||
1309 | } | ||
1310 | static inline void slave_disable_netpoll(struct slave *slave) | ||
1311 | { | ||
1312 | struct netpoll *np = slave->np; | ||
1313 | |||
1314 | if (!np) | ||
1315 | return; | ||
1316 | |||
1317 | slave->np = NULL; | ||
1318 | synchronize_rcu_bh(); | ||
1319 | __netpoll_cleanup(np); | ||
1320 | kfree(np); | ||
1321 | } | ||
1322 | static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) | ||
1323 | { | ||
1324 | if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL) | ||
1325 | return false; | ||
1326 | if (!slave_dev->netdev_ops->ndo_poll_controller) | ||
1327 | return false; | ||
1328 | return true; | ||
1293 | } | 1329 | } |
1294 | 1330 | ||
1295 | static void bond_poll_controller(struct net_device *bond_dev) | 1331 | static void bond_poll_controller(struct net_device *bond_dev) |
1296 | { | 1332 | { |
1297 | struct net_device *dev = bond_dev->npinfo->netpoll->real_dev; | ||
1298 | if (dev != bond_dev) | ||
1299 | netpoll_poll_dev(dev); | ||
1300 | } | 1333 | } |
1301 | 1334 | ||
1335 | static void __bond_netpoll_cleanup(struct bonding *bond) | ||
1336 | { | ||
1337 | struct slave *slave; | ||
1338 | int i; | ||
1339 | |||
1340 | bond_for_each_slave(bond, slave, i) | ||
1341 | if (IS_UP(slave->dev)) | ||
1342 | slave_disable_netpoll(slave); | ||
1343 | } | ||
1302 | static void bond_netpoll_cleanup(struct net_device *bond_dev) | 1344 | static void bond_netpoll_cleanup(struct net_device *bond_dev) |
1303 | { | 1345 | { |
1304 | struct bonding *bond = netdev_priv(bond_dev); | 1346 | struct bonding *bond = netdev_priv(bond_dev); |
1347 | |||
1348 | read_lock(&bond->lock); | ||
1349 | __bond_netpoll_cleanup(bond); | ||
1350 | read_unlock(&bond->lock); | ||
1351 | } | ||
1352 | |||
1353 | static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) | ||
1354 | { | ||
1355 | struct bonding *bond = netdev_priv(dev); | ||
1305 | struct slave *slave; | 1356 | struct slave *slave; |
1306 | const struct net_device_ops *ops; | 1357 | int i, err = 0; |
1307 | int i; | ||
1308 | 1358 | ||
1309 | read_lock(&bond->lock); | 1359 | read_lock(&bond->lock); |
1310 | bond_dev->npinfo = NULL; | ||
1311 | bond_for_each_slave(bond, slave, i) { | 1360 | bond_for_each_slave(bond, slave, i) { |
1312 | if (slave->dev) { | 1361 | err = slave_enable_netpoll(slave); |
1313 | ops = slave->dev->netdev_ops; | 1362 | if (err) { |
1314 | if (ops->ndo_netpoll_cleanup) | 1363 | __bond_netpoll_cleanup(bond); |
1315 | ops->ndo_netpoll_cleanup(slave->dev); | 1364 | break; |
1316 | else | ||
1317 | slave->dev->npinfo = NULL; | ||
1318 | } | 1365 | } |
1319 | } | 1366 | } |
1320 | read_unlock(&bond->lock); | 1367 | read_unlock(&bond->lock); |
1368 | return err; | ||
1321 | } | 1369 | } |
1322 | 1370 | ||
1323 | #else | 1371 | static struct netpoll_info *bond_netpoll_info(struct bonding *bond) |
1372 | { | ||
1373 | return bond->dev->npinfo; | ||
1374 | } | ||
1324 | 1375 | ||
1376 | #else | ||
1377 | static inline int slave_enable_netpoll(struct slave *slave) | ||
1378 | { | ||
1379 | return 0; | ||
1380 | } | ||
1381 | static inline void slave_disable_netpoll(struct slave *slave) | ||
1382 | { | ||
1383 | } | ||
1325 | static void bond_netpoll_cleanup(struct net_device *bond_dev) | 1384 | static void bond_netpoll_cleanup(struct net_device *bond_dev) |
1326 | { | 1385 | { |
1327 | } | 1386 | } |
1328 | |||
1329 | #endif | 1387 | #endif |
1330 | 1388 | ||
1331 | /*---------------------------------- IOCTL ----------------------------------*/ | 1389 | /*---------------------------------- IOCTL ----------------------------------*/ |
@@ -1340,52 +1398,68 @@ static int bond_sethwaddr(struct net_device *bond_dev, | |||
1340 | return 0; | 1398 | return 0; |
1341 | } | 1399 | } |
1342 | 1400 | ||
1343 | #define BOND_VLAN_FEATURES \ | 1401 | static u32 bond_fix_features(struct net_device *dev, u32 features) |
1344 | (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \ | ||
1345 | NETIF_F_HW_VLAN_FILTER) | ||
1346 | |||
1347 | /* | ||
1348 | * Compute the common dev->feature set available to all slaves. Some | ||
1349 | * feature bits are managed elsewhere, so preserve those feature bits | ||
1350 | * on the master device. | ||
1351 | */ | ||
1352 | static int bond_compute_features(struct bonding *bond) | ||
1353 | { | 1402 | { |
1354 | struct slave *slave; | 1403 | struct slave *slave; |
1355 | struct net_device *bond_dev = bond->dev; | 1404 | struct bonding *bond = netdev_priv(dev); |
1356 | unsigned long features = bond_dev->features; | 1405 | u32 mask; |
1357 | unsigned long vlan_features = 0; | ||
1358 | unsigned short max_hard_header_len = max((u16)ETH_HLEN, | ||
1359 | bond_dev->hard_header_len); | ||
1360 | int i; | 1406 | int i; |
1361 | 1407 | ||
1362 | features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); | 1408 | read_lock(&bond->lock); |
1363 | features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; | ||
1364 | 1409 | ||
1365 | if (!bond->first_slave) | 1410 | if (!bond->first_slave) { |
1366 | goto done; | 1411 | /* Disable adding VLANs to empty bond. But why? --mq */ |
1412 | features |= NETIF_F_VLAN_CHALLENGED; | ||
1413 | goto out; | ||
1414 | } | ||
1367 | 1415 | ||
1416 | mask = features; | ||
1368 | features &= ~NETIF_F_ONE_FOR_ALL; | 1417 | features &= ~NETIF_F_ONE_FOR_ALL; |
1418 | features |= NETIF_F_ALL_FOR_ALL; | ||
1369 | 1419 | ||
1370 | vlan_features = bond->first_slave->dev->vlan_features; | ||
1371 | bond_for_each_slave(bond, slave, i) { | 1420 | bond_for_each_slave(bond, slave, i) { |
1372 | features = netdev_increment_features(features, | 1421 | features = netdev_increment_features(features, |
1373 | slave->dev->features, | 1422 | slave->dev->features, |
1374 | NETIF_F_ONE_FOR_ALL); | 1423 | mask); |
1424 | } | ||
1425 | |||
1426 | out: | ||
1427 | read_unlock(&bond->lock); | ||
1428 | return features; | ||
1429 | } | ||
1430 | |||
1431 | #define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ | ||
1432 | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ | ||
1433 | NETIF_F_HIGHDMA | NETIF_F_LRO) | ||
1434 | |||
1435 | static void bond_compute_features(struct bonding *bond) | ||
1436 | { | ||
1437 | struct slave *slave; | ||
1438 | struct net_device *bond_dev = bond->dev; | ||
1439 | u32 vlan_features = BOND_VLAN_FEATURES; | ||
1440 | unsigned short max_hard_header_len = ETH_HLEN; | ||
1441 | int i; | ||
1442 | |||
1443 | read_lock(&bond->lock); | ||
1444 | |||
1445 | if (!bond->first_slave) | ||
1446 | goto done; | ||
1447 | |||
1448 | bond_for_each_slave(bond, slave, i) { | ||
1375 | vlan_features = netdev_increment_features(vlan_features, | 1449 | vlan_features = netdev_increment_features(vlan_features, |
1376 | slave->dev->vlan_features, | 1450 | slave->dev->vlan_features, BOND_VLAN_FEATURES); |
1377 | NETIF_F_ONE_FOR_ALL); | 1451 | |
1378 | if (slave->dev->hard_header_len > max_hard_header_len) | 1452 | if (slave->dev->hard_header_len > max_hard_header_len) |
1379 | max_hard_header_len = slave->dev->hard_header_len; | 1453 | max_hard_header_len = slave->dev->hard_header_len; |
1380 | } | 1454 | } |
1381 | 1455 | ||
1382 | done: | 1456 | done: |
1383 | features |= (bond_dev->features & BOND_VLAN_FEATURES); | 1457 | bond_dev->vlan_features = vlan_features; |
1384 | bond_dev->features = netdev_fix_features(features, NULL); | ||
1385 | bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); | ||
1386 | bond_dev->hard_header_len = max_hard_header_len; | 1458 | bond_dev->hard_header_len = max_hard_header_len; |
1387 | 1459 | ||
1388 | return 0; | 1460 | read_unlock(&bond->lock); |
1461 | |||
1462 | netdev_change_features(bond_dev); | ||
1389 | } | 1463 | } |
1390 | 1464 | ||
1391 | static void bond_setup_by_slave(struct net_device *bond_dev, | 1465 | static void bond_setup_by_slave(struct net_device *bond_dev, |
@@ -1404,6 +1478,71 @@ static void bond_setup_by_slave(struct net_device *bond_dev, | |||
1404 | bond->setup_by_slave = 1; | 1478 | bond->setup_by_slave = 1; |
1405 | } | 1479 | } |
1406 | 1480 | ||
1481 | /* On bonding slaves other than the currently active slave, suppress | ||
1482 | * duplicates except for alb non-mcast/bcast. | ||
1483 | */ | ||
1484 | static bool bond_should_deliver_exact_match(struct sk_buff *skb, | ||
1485 | struct slave *slave, | ||
1486 | struct bonding *bond) | ||
1487 | { | ||
1488 | if (bond_is_slave_inactive(slave)) { | ||
1489 | if (bond->params.mode == BOND_MODE_ALB && | ||
1490 | skb->pkt_type != PACKET_BROADCAST && | ||
1491 | skb->pkt_type != PACKET_MULTICAST) | ||
1492 | return false; | ||
1493 | return true; | ||
1494 | } | ||
1495 | return false; | ||
1496 | } | ||
1497 | |||
1498 | static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | ||
1499 | { | ||
1500 | struct sk_buff *skb = *pskb; | ||
1501 | struct slave *slave; | ||
1502 | struct bonding *bond; | ||
1503 | |||
1504 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
1505 | if (unlikely(!skb)) | ||
1506 | return RX_HANDLER_CONSUMED; | ||
1507 | |||
1508 | *pskb = skb; | ||
1509 | |||
1510 | slave = bond_slave_get_rcu(skb->dev); | ||
1511 | bond = slave->bond; | ||
1512 | |||
1513 | if (bond->params.arp_interval) | ||
1514 | slave->dev->last_rx = jiffies; | ||
1515 | |||
1516 | if (bond->recv_probe) { | ||
1517 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | ||
1518 | |||
1519 | if (likely(nskb)) { | ||
1520 | bond->recv_probe(nskb, bond, slave); | ||
1521 | dev_kfree_skb(nskb); | ||
1522 | } | ||
1523 | } | ||
1524 | |||
1525 | if (bond_should_deliver_exact_match(skb, slave, bond)) { | ||
1526 | return RX_HANDLER_EXACT; | ||
1527 | } | ||
1528 | |||
1529 | skb->dev = bond->dev; | ||
1530 | |||
1531 | if (bond->params.mode == BOND_MODE_ALB && | ||
1532 | bond->dev->priv_flags & IFF_BRIDGE_PORT && | ||
1533 | skb->pkt_type == PACKET_HOST) { | ||
1534 | |||
1535 | if (unlikely(skb_cow_head(skb, | ||
1536 | skb->data - skb_mac_header(skb)))) { | ||
1537 | kfree_skb(skb); | ||
1538 | return RX_HANDLER_CONSUMED; | ||
1539 | } | ||
1540 | memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); | ||
1541 | } | ||
1542 | |||
1543 | return RX_HANDLER_ANOTHER; | ||
1544 | } | ||
1545 | |||
1407 | /* enslave device <slave> to bond device <master> */ | 1546 | /* enslave device <slave> to bond device <master> */ |
1408 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | 1547 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
1409 | { | 1548 | { |
@@ -1413,7 +1552,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1413 | struct netdev_hw_addr *ha; | 1552 | struct netdev_hw_addr *ha; |
1414 | struct sockaddr addr; | 1553 | struct sockaddr addr; |
1415 | int link_reporting; | 1554 | int link_reporting; |
1416 | int old_features = bond_dev->features; | ||
1417 | int res = 0; | 1555 | int res = 0; |
1418 | 1556 | ||
1419 | if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && | 1557 | if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && |
@@ -1422,12 +1560,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1422 | bond_dev->name, slave_dev->name); | 1560 | bond_dev->name, slave_dev->name); |
1423 | } | 1561 | } |
1424 | 1562 | ||
1425 | /* bond must be initialized by bond_open() before enslaving */ | ||
1426 | if (!(bond_dev->flags & IFF_UP)) { | ||
1427 | pr_warning("%s: master_dev is not up in bond_enslave\n", | ||
1428 | bond_dev->name); | ||
1429 | } | ||
1430 | |||
1431 | /* already enslaved */ | 1563 | /* already enslaved */ |
1432 | if (slave_dev->flags & IFF_SLAVE) { | 1564 | if (slave_dev->flags & IFF_SLAVE) { |
1433 | pr_debug("Error, Device was already enslaved\n"); | 1565 | pr_debug("Error, Device was already enslaved\n"); |
@@ -1446,16 +1578,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1446 | pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", | 1578 | pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", |
1447 | bond_dev->name, slave_dev->name, | 1579 | bond_dev->name, slave_dev->name, |
1448 | slave_dev->name, bond_dev->name); | 1580 | slave_dev->name, bond_dev->name); |
1449 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
1450 | } | 1581 | } |
1451 | } else { | 1582 | } else { |
1452 | pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); | 1583 | pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); |
1453 | if (bond->slave_cnt == 0) { | ||
1454 | /* First slave, and it is not VLAN challenged, | ||
1455 | * so remove the block of adding VLANs over the bond. | ||
1456 | */ | ||
1457 | bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; | ||
1458 | } | ||
1459 | } | 1584 | } |
1460 | 1585 | ||
1461 | /* | 1586 | /* |
@@ -1527,9 +1652,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1527 | } | 1652 | } |
1528 | } | 1653 | } |
1529 | 1654 | ||
1655 | call_netdevice_notifiers(NETDEV_JOIN, slave_dev); | ||
1656 | |||
1530 | /* If this is the first slave, then we need to set the master's hardware | 1657 | /* If this is the first slave, then we need to set the master's hardware |
1531 | * address to be the same as the slave's. */ | 1658 | * address to be the same as the slave's. */ |
1532 | if (bond->slave_cnt == 0) | 1659 | if (is_zero_ether_addr(bond->dev->dev_addr)) |
1533 | memcpy(bond->dev->dev_addr, slave_dev->dev_addr, | 1660 | memcpy(bond->dev->dev_addr, slave_dev->dev_addr, |
1534 | slave_dev->addr_len); | 1661 | slave_dev->addr_len); |
1535 | 1662 | ||
@@ -1575,11 +1702,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1575 | } | 1702 | } |
1576 | } | 1703 | } |
1577 | 1704 | ||
1578 | res = netdev_set_master(slave_dev, bond_dev); | 1705 | res = netdev_set_bond_master(slave_dev, bond_dev); |
1579 | if (res) { | 1706 | if (res) { |
1580 | pr_debug("Error %d calling netdev_set_master\n", res); | 1707 | pr_debug("Error %d calling netdev_set_bond_master\n", res); |
1581 | goto err_restore_mac; | 1708 | goto err_restore_mac; |
1582 | } | 1709 | } |
1710 | |||
1583 | /* open the slave since the application closed it */ | 1711 | /* open the slave since the application closed it */ |
1584 | res = dev_open(slave_dev); | 1712 | res = dev_open(slave_dev); |
1585 | if (res) { | 1713 | if (res) { |
@@ -1587,6 +1715,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1587 | goto err_unset_master; | 1715 | goto err_unset_master; |
1588 | } | 1716 | } |
1589 | 1717 | ||
1718 | new_slave->bond = bond; | ||
1590 | new_slave->dev = slave_dev; | 1719 | new_slave->dev = slave_dev; |
1591 | slave_dev->priv_flags |= IFF_BONDING; | 1720 | slave_dev->priv_flags |= IFF_BONDING; |
1592 | 1721 | ||
@@ -1642,10 +1771,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1642 | new_slave->delay = 0; | 1771 | new_slave->delay = 0; |
1643 | new_slave->link_failure_count = 0; | 1772 | new_slave->link_failure_count = 0; |
1644 | 1773 | ||
1645 | bond_compute_features(bond); | ||
1646 | |||
1647 | write_unlock_bh(&bond->lock); | 1774 | write_unlock_bh(&bond->lock); |
1648 | 1775 | ||
1776 | bond_compute_features(bond); | ||
1777 | |||
1649 | read_lock(&bond->lock); | 1778 | read_lock(&bond->lock); |
1650 | 1779 | ||
1651 | new_slave->last_arp_rx = jiffies; | 1780 | new_slave->last_arp_rx = jiffies; |
@@ -1738,7 +1867,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1738 | break; | 1867 | break; |
1739 | case BOND_MODE_TLB: | 1868 | case BOND_MODE_TLB: |
1740 | case BOND_MODE_ALB: | 1869 | case BOND_MODE_ALB: |
1741 | new_slave->state = BOND_STATE_ACTIVE; | 1870 | bond_set_active_slave(new_slave); |
1742 | bond_set_slave_inactive_flags(new_slave); | 1871 | bond_set_slave_inactive_flags(new_slave); |
1743 | bond_select_active_slave(bond); | 1872 | bond_select_active_slave(bond); |
1744 | break; | 1873 | break; |
@@ -1746,7 +1875,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1746 | pr_debug("This slave is always active in trunk mode\n"); | 1875 | pr_debug("This slave is always active in trunk mode\n"); |
1747 | 1876 | ||
1748 | /* always active in trunk mode */ | 1877 | /* always active in trunk mode */ |
1749 | new_slave->state = BOND_STATE_ACTIVE; | 1878 | bond_set_active_slave(new_slave); |
1750 | 1879 | ||
1751 | /* In trunking mode there is little meaning to curr_active_slave | 1880 | /* In trunking mode there is little meaning to curr_active_slave |
1752 | * anyway (it holds no special properties of the bond device), | 1881 | * anyway (it holds no special properties of the bond device), |
@@ -1763,45 +1892,49 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1763 | bond_set_carrier(bond); | 1892 | bond_set_carrier(bond); |
1764 | 1893 | ||
1765 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1894 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1766 | /* | 1895 | slave_dev->npinfo = bond_netpoll_info(bond); |
1767 | * Netpoll and bonding is broken, make sure it is not initialized | 1896 | if (slave_dev->npinfo) { |
1768 | * until it is fixed. | 1897 | if (slave_enable_netpoll(new_slave)) { |
1769 | */ | 1898 | read_unlock(&bond->lock); |
1770 | if (disable_netpoll) { | 1899 | pr_info("Error, %s: master_dev is using netpoll, " |
1771 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | 1900 | "but new slave device does not support netpoll.\n", |
1772 | } else { | 1901 | bond_dev->name); |
1773 | if (slaves_support_netpoll(bond_dev)) { | 1902 | res = -EBUSY; |
1774 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | 1903 | goto err_close; |
1775 | if (bond_dev->npinfo) | ||
1776 | slave_dev->npinfo = bond_dev->npinfo; | ||
1777 | } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { | ||
1778 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
1779 | pr_info("New slave device %s does not support netpoll\n", | ||
1780 | slave_dev->name); | ||
1781 | pr_info("Disabling netpoll support for %s\n", bond_dev->name); | ||
1782 | } | 1904 | } |
1783 | } | 1905 | } |
1784 | #endif | 1906 | #endif |
1907 | |||
1785 | read_unlock(&bond->lock); | 1908 | read_unlock(&bond->lock); |
1786 | 1909 | ||
1787 | res = bond_create_slave_symlinks(bond_dev, slave_dev); | 1910 | res = bond_create_slave_symlinks(bond_dev, slave_dev); |
1788 | if (res) | 1911 | if (res) |
1789 | goto err_close; | 1912 | goto err_close; |
1790 | 1913 | ||
1914 | res = netdev_rx_handler_register(slave_dev, bond_handle_frame, | ||
1915 | new_slave); | ||
1916 | if (res) { | ||
1917 | pr_debug("Error %d calling netdev_rx_handler_register\n", res); | ||
1918 | goto err_dest_symlinks; | ||
1919 | } | ||
1920 | |||
1791 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", | 1921 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", |
1792 | bond_dev->name, slave_dev->name, | 1922 | bond_dev->name, slave_dev->name, |
1793 | new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup", | 1923 | bond_is_active_slave(new_slave) ? "n active" : " backup", |
1794 | new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); | 1924 | new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); |
1795 | 1925 | ||
1796 | /* enslave is successful */ | 1926 | /* enslave is successful */ |
1797 | return 0; | 1927 | return 0; |
1798 | 1928 | ||
1799 | /* Undo stages on error */ | 1929 | /* Undo stages on error */ |
1930 | err_dest_symlinks: | ||
1931 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | ||
1932 | |||
1800 | err_close: | 1933 | err_close: |
1801 | dev_close(slave_dev); | 1934 | dev_close(slave_dev); |
1802 | 1935 | ||
1803 | err_unset_master: | 1936 | err_unset_master: |
1804 | netdev_set_master(slave_dev, NULL); | 1937 | netdev_set_bond_master(slave_dev, NULL); |
1805 | 1938 | ||
1806 | err_restore_mac: | 1939 | err_restore_mac: |
1807 | if (!bond->params.fail_over_mac) { | 1940 | if (!bond->params.fail_over_mac) { |
@@ -1821,7 +1954,7 @@ err_free: | |||
1821 | kfree(new_slave); | 1954 | kfree(new_slave); |
1822 | 1955 | ||
1823 | err_undo_flags: | 1956 | err_undo_flags: |
1824 | bond_dev->features = old_features; | 1957 | bond_compute_features(bond); |
1825 | 1958 | ||
1826 | return res; | 1959 | return res; |
1827 | } | 1960 | } |
@@ -1842,6 +1975,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1842 | struct bonding *bond = netdev_priv(bond_dev); | 1975 | struct bonding *bond = netdev_priv(bond_dev); |
1843 | struct slave *slave, *oldcurrent; | 1976 | struct slave *slave, *oldcurrent; |
1844 | struct sockaddr addr; | 1977 | struct sockaddr addr; |
1978 | u32 old_features = bond_dev->features; | ||
1845 | 1979 | ||
1846 | /* slave is not a slave or master is not master of this slave */ | 1980 | /* slave is not a slave or master is not master of this slave */ |
1847 | if (!(slave_dev->flags & IFF_SLAVE) || | 1981 | if (!(slave_dev->flags & IFF_SLAVE) || |
@@ -1851,7 +1985,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1851 | return -EINVAL; | 1985 | return -EINVAL; |
1852 | } | 1986 | } |
1853 | 1987 | ||
1854 | netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE); | 1988 | block_netpoll_tx(); |
1989 | netdev_bonding_change(bond_dev, NETDEV_RELEASE); | ||
1855 | write_lock_bh(&bond->lock); | 1990 | write_lock_bh(&bond->lock); |
1856 | 1991 | ||
1857 | slave = bond_get_slave_by_dev(bond, slave_dev); | 1992 | slave = bond_get_slave_by_dev(bond, slave_dev); |
@@ -1860,9 +1995,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1860 | pr_info("%s: %s not enslaved\n", | 1995 | pr_info("%s: %s not enslaved\n", |
1861 | bond_dev->name, slave_dev->name); | 1996 | bond_dev->name, slave_dev->name); |
1862 | write_unlock_bh(&bond->lock); | 1997 | write_unlock_bh(&bond->lock); |
1998 | unblock_netpoll_tx(); | ||
1863 | return -EINVAL; | 1999 | return -EINVAL; |
1864 | } | 2000 | } |
1865 | 2001 | ||
2002 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | ||
2003 | * for this slave anymore. | ||
2004 | */ | ||
2005 | netdev_rx_handler_unregister(slave_dev); | ||
2006 | write_unlock_bh(&bond->lock); | ||
2007 | synchronize_net(); | ||
2008 | write_lock_bh(&bond->lock); | ||
2009 | |||
1866 | if (!bond->params.fail_over_mac) { | 2010 | if (!bond->params.fail_over_mac) { |
1867 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && | 2011 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && |
1868 | bond->slave_cnt > 1) | 2012 | bond->slave_cnt > 1) |
@@ -1882,7 +2026,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1882 | 2026 | ||
1883 | pr_info("%s: releasing %s interface %s\n", | 2027 | pr_info("%s: releasing %s interface %s\n", |
1884 | bond_dev->name, | 2028 | bond_dev->name, |
1885 | (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup", | 2029 | bond_is_active_slave(slave) ? "active" : "backup", |
1886 | slave_dev->name); | 2030 | slave_dev->name); |
1887 | 2031 | ||
1888 | oldcurrent = bond->curr_active_slave; | 2032 | oldcurrent = bond->curr_active_slave; |
@@ -1892,8 +2036,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1892 | /* release the slave from its bond */ | 2036 | /* release the slave from its bond */ |
1893 | bond_detach_slave(bond, slave); | 2037 | bond_detach_slave(bond, slave); |
1894 | 2038 | ||
1895 | bond_compute_features(bond); | ||
1896 | |||
1897 | if (bond->primary_slave == slave) | 2039 | if (bond->primary_slave == slave) |
1898 | bond->primary_slave = NULL; | 2040 | bond->primary_slave = NULL; |
1899 | 2041 | ||
@@ -1937,22 +2079,22 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1937 | */ | 2079 | */ |
1938 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2080 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
1939 | 2081 | ||
1940 | if (!bond->vlgrp) { | 2082 | if (bond->vlgrp) { |
1941 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
1942 | } else { | ||
1943 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2083 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
1944 | bond_dev->name, bond_dev->name); | 2084 | bond_dev->name, bond_dev->name); |
1945 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2085 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
1946 | bond_dev->name); | 2086 | bond_dev->name); |
1947 | } | 2087 | } |
1948 | } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | ||
1949 | !bond_has_challenged_slaves(bond)) { | ||
1950 | pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", | ||
1951 | bond_dev->name, slave_dev->name, bond_dev->name); | ||
1952 | bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; | ||
1953 | } | 2088 | } |
1954 | 2089 | ||
1955 | write_unlock_bh(&bond->lock); | 2090 | write_unlock_bh(&bond->lock); |
2091 | unblock_netpoll_tx(); | ||
2092 | |||
2093 | bond_compute_features(bond); | ||
2094 | if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | ||
2095 | (old_features & NETIF_F_VLAN_CHALLENGED)) | ||
2096 | pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", | ||
2097 | bond_dev->name, slave_dev->name, bond_dev->name); | ||
1956 | 2098 | ||
1957 | /* must do this from outside any spinlocks */ | 2099 | /* must do this from outside any spinlocks */ |
1958 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 2100 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
@@ -1978,21 +2120,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1978 | netif_addr_unlock_bh(bond_dev); | 2120 | netif_addr_unlock_bh(bond_dev); |
1979 | } | 2121 | } |
1980 | 2122 | ||
1981 | netdev_set_master(slave_dev, NULL); | 2123 | netdev_set_bond_master(slave_dev, NULL); |
1982 | 2124 | ||
1983 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2125 | slave_disable_netpoll(slave); |
1984 | read_lock_bh(&bond->lock); | ||
1985 | |||
1986 | /* Make sure netpoll over stays disabled until fixed. */ | ||
1987 | if (!disable_netpoll) | ||
1988 | if (slaves_support_netpoll(bond_dev)) | ||
1989 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
1990 | read_unlock_bh(&bond->lock); | ||
1991 | if (slave_dev->netdev_ops->ndo_netpoll_cleanup) | ||
1992 | slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); | ||
1993 | else | ||
1994 | slave_dev->npinfo = NULL; | ||
1995 | #endif | ||
1996 | 2126 | ||
1997 | /* close slave before restoring its mac address */ | 2127 | /* close slave before restoring its mac address */ |
1998 | dev_close(slave_dev); | 2128 | dev_close(slave_dev); |
@@ -2006,9 +2136,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2006 | 2136 | ||
2007 | dev_set_mtu(slave_dev, slave->original_mtu); | 2137 | dev_set_mtu(slave_dev, slave->original_mtu); |
2008 | 2138 | ||
2009 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | | 2139 | slave_dev->priv_flags &= ~IFF_BONDING; |
2010 | IFF_SLAVE_INACTIVE | IFF_BONDING | | ||
2011 | IFF_SLAVE_NEEDARP); | ||
2012 | 2140 | ||
2013 | kfree(slave); | 2141 | kfree(slave); |
2014 | 2142 | ||
@@ -2016,17 +2144,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2016 | } | 2144 | } |
2017 | 2145 | ||
2018 | /* | 2146 | /* |
2019 | * First release a slave and than destroy the bond if no more slaves are left. | 2147 | * First release a slave and then destroy the bond if no more slaves are left. |
2020 | * Must be under rtnl_lock when this function is called. | 2148 | * Must be under rtnl_lock when this function is called. |
2021 | */ | 2149 | */ |
2022 | int bond_release_and_destroy(struct net_device *bond_dev, | 2150 | static int bond_release_and_destroy(struct net_device *bond_dev, |
2023 | struct net_device *slave_dev) | 2151 | struct net_device *slave_dev) |
2024 | { | 2152 | { |
2025 | struct bonding *bond = netdev_priv(bond_dev); | 2153 | struct bonding *bond = netdev_priv(bond_dev); |
2026 | int ret; | 2154 | int ret; |
2027 | 2155 | ||
2028 | ret = bond_release(bond_dev, slave_dev); | 2156 | ret = bond_release(bond_dev, slave_dev); |
2029 | if ((ret == 0) && (bond->slave_cnt == 0)) { | 2157 | if ((ret == 0) && (bond->slave_cnt == 0)) { |
2158 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
2030 | pr_info("%s: destroying bond %s.\n", | 2159 | pr_info("%s: destroying bond %s.\n", |
2031 | bond_dev->name, bond_dev->name); | 2160 | bond_dev->name, bond_dev->name); |
2032 | unregister_netdevice(bond_dev); | 2161 | unregister_netdevice(bond_dev); |
@@ -2071,6 +2200,12 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2071 | */ | 2200 | */ |
2072 | write_unlock_bh(&bond->lock); | 2201 | write_unlock_bh(&bond->lock); |
2073 | 2202 | ||
2203 | /* unregister rx_handler early so bond_handle_frame wouldn't | ||
2204 | * be called for this slave anymore. | ||
2205 | */ | ||
2206 | netdev_rx_handler_unregister(slave_dev); | ||
2207 | synchronize_net(); | ||
2208 | |||
2074 | if (bond_is_lb(bond)) { | 2209 | if (bond_is_lb(bond)) { |
2075 | /* must be called only after the slave | 2210 | /* must be called only after the slave |
2076 | * has been detached from the list | 2211 | * has been detached from the list |
@@ -2078,8 +2213,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2078 | bond_alb_deinit_slave(bond, slave); | 2213 | bond_alb_deinit_slave(bond, slave); |
2079 | } | 2214 | } |
2080 | 2215 | ||
2081 | bond_compute_features(bond); | ||
2082 | |||
2083 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 2216 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
2084 | bond_del_vlans_from_slave(bond, slave_dev); | 2217 | bond_del_vlans_from_slave(bond, slave_dev); |
2085 | 2218 | ||
@@ -2102,7 +2235,9 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2102 | netif_addr_unlock_bh(bond_dev); | 2235 | netif_addr_unlock_bh(bond_dev); |
2103 | } | 2236 | } |
2104 | 2237 | ||
2105 | netdev_set_master(slave_dev, NULL); | 2238 | netdev_set_bond_master(slave_dev, NULL); |
2239 | |||
2240 | slave_disable_netpoll(slave); | ||
2106 | 2241 | ||
2107 | /* close slave before restoring its mac address */ | 2242 | /* close slave before restoring its mac address */ |
2108 | dev_close(slave_dev); | 2243 | dev_close(slave_dev); |
@@ -2114,9 +2249,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2114 | dev_set_mac_address(slave_dev, &addr); | 2249 | dev_set_mac_address(slave_dev, &addr); |
2115 | } | 2250 | } |
2116 | 2251 | ||
2117 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | | ||
2118 | IFF_SLAVE_INACTIVE); | ||
2119 | |||
2120 | kfree(slave); | 2252 | kfree(slave); |
2121 | 2253 | ||
2122 | /* re-acquire the lock before getting the next slave */ | 2254 | /* re-acquire the lock before getting the next slave */ |
@@ -2129,9 +2261,7 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2129 | */ | 2261 | */ |
2130 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2262 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
2131 | 2263 | ||
2132 | if (!bond->vlgrp) { | 2264 | if (bond->vlgrp) { |
2133 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
2134 | } else { | ||
2135 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2265 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
2136 | bond_dev->name, bond_dev->name); | 2266 | bond_dev->name, bond_dev->name); |
2137 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2267 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
@@ -2143,6 +2273,8 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2143 | out: | 2273 | out: |
2144 | write_unlock_bh(&bond->lock); | 2274 | write_unlock_bh(&bond->lock); |
2145 | 2275 | ||
2276 | bond_compute_features(bond); | ||
2277 | |||
2146 | return 0; | 2278 | return 0; |
2147 | } | 2279 | } |
2148 | 2280 | ||
@@ -2191,9 +2323,11 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi | |||
2191 | (old_active) && | 2323 | (old_active) && |
2192 | (new_active->link == BOND_LINK_UP) && | 2324 | (new_active->link == BOND_LINK_UP) && |
2193 | IS_UP(new_active->dev)) { | 2325 | IS_UP(new_active->dev)) { |
2326 | block_netpoll_tx(); | ||
2194 | write_lock_bh(&bond->curr_slave_lock); | 2327 | write_lock_bh(&bond->curr_slave_lock); |
2195 | bond_change_active_slave(bond, new_active); | 2328 | bond_change_active_slave(bond, new_active); |
2196 | write_unlock_bh(&bond->curr_slave_lock); | 2329 | write_unlock_bh(&bond->curr_slave_lock); |
2330 | unblock_netpoll_tx(); | ||
2197 | } else | 2331 | } else |
2198 | res = -EINVAL; | 2332 | res = -EINVAL; |
2199 | 2333 | ||
@@ -2229,7 +2363,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in | |||
2229 | res = 0; | 2363 | res = 0; |
2230 | strcpy(info->slave_name, slave->dev->name); | 2364 | strcpy(info->slave_name, slave->dev->name); |
2231 | info->link = slave->link; | 2365 | info->link = slave->link; |
2232 | info->state = slave->state; | 2366 | info->state = bond_slave_state(slave); |
2233 | info->link_failure_count = slave->link_failure_count; | 2367 | info->link_failure_count = slave->link_failure_count; |
2234 | break; | 2368 | break; |
2235 | } | 2369 | } |
@@ -2268,7 +2402,7 @@ static int bond_miimon_inspect(struct bonding *bond) | |||
2268 | bond->dev->name, | 2402 | bond->dev->name, |
2269 | (bond->params.mode == | 2403 | (bond->params.mode == |
2270 | BOND_MODE_ACTIVEBACKUP) ? | 2404 | BOND_MODE_ACTIVEBACKUP) ? |
2271 | ((slave->state == BOND_STATE_ACTIVE) ? | 2405 | (bond_is_active_slave(slave) ? |
2272 | "active " : "backup ") : "", | 2406 | "active " : "backup ") : "", |
2273 | slave->dev->name, | 2407 | slave->dev->name, |
2274 | bond->params.downdelay * bond->params.miimon); | 2408 | bond->params.downdelay * bond->params.miimon); |
@@ -2359,17 +2493,20 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2359 | 2493 | ||
2360 | if (bond->params.mode == BOND_MODE_8023AD) { | 2494 | if (bond->params.mode == BOND_MODE_8023AD) { |
2361 | /* prevent it from being the active one */ | 2495 | /* prevent it from being the active one */ |
2362 | slave->state = BOND_STATE_BACKUP; | 2496 | bond_set_backup_slave(slave); |
2363 | } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { | 2497 | } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { |
2364 | /* make it immediately active */ | 2498 | /* make it immediately active */ |
2365 | slave->state = BOND_STATE_ACTIVE; | 2499 | bond_set_active_slave(slave); |
2366 | } else if (slave != bond->primary_slave) { | 2500 | } else if (slave != bond->primary_slave) { |
2367 | /* prevent it from being the active one */ | 2501 | /* prevent it from being the active one */ |
2368 | slave->state = BOND_STATE_BACKUP; | 2502 | bond_set_backup_slave(slave); |
2369 | } | 2503 | } |
2370 | 2504 | ||
2371 | pr_info("%s: link status definitely up for interface %s.\n", | 2505 | bond_update_speed_duplex(slave); |
2372 | bond->dev->name, slave->dev->name); | 2506 | |
2507 | pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", | ||
2508 | bond->dev->name, slave->dev->name, | ||
2509 | slave->speed, slave->duplex ? "full" : "half"); | ||
2373 | 2510 | ||
2374 | /* notify ad that the link status has changed */ | 2511 | /* notify ad that the link status has changed */ |
2375 | if (bond->params.mode == BOND_MODE_8023AD) | 2512 | if (bond->params.mode == BOND_MODE_8023AD) |
@@ -2422,9 +2559,11 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2422 | 2559 | ||
2423 | do_failover: | 2560 | do_failover: |
2424 | ASSERT_RTNL(); | 2561 | ASSERT_RTNL(); |
2562 | block_netpoll_tx(); | ||
2425 | write_lock_bh(&bond->curr_slave_lock); | 2563 | write_lock_bh(&bond->curr_slave_lock); |
2426 | bond_select_active_slave(bond); | 2564 | bond_select_active_slave(bond); |
2427 | write_unlock_bh(&bond->curr_slave_lock); | 2565 | write_unlock_bh(&bond->curr_slave_lock); |
2566 | unblock_netpoll_tx(); | ||
2428 | } | 2567 | } |
2429 | 2568 | ||
2430 | bond_set_carrier(bond); | 2569 | bond_set_carrier(bond); |
@@ -2442,6 +2581,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2442 | { | 2581 | { |
2443 | struct bonding *bond = container_of(work, struct bonding, | 2582 | struct bonding *bond = container_of(work, struct bonding, |
2444 | mii_work.work); | 2583 | mii_work.work); |
2584 | bool should_notify_peers = false; | ||
2445 | 2585 | ||
2446 | read_lock(&bond->lock); | 2586 | read_lock(&bond->lock); |
2447 | if (bond->kill_timers) | 2587 | if (bond->kill_timers) |
@@ -2450,17 +2590,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2450 | if (bond->slave_cnt == 0) | 2590 | if (bond->slave_cnt == 0) |
2451 | goto re_arm; | 2591 | goto re_arm; |
2452 | 2592 | ||
2453 | if (bond->send_grat_arp) { | 2593 | should_notify_peers = bond_should_notify_peers(bond); |
2454 | read_lock(&bond->curr_slave_lock); | ||
2455 | bond_send_gratuitous_arp(bond); | ||
2456 | read_unlock(&bond->curr_slave_lock); | ||
2457 | } | ||
2458 | |||
2459 | if (bond->send_unsol_na) { | ||
2460 | read_lock(&bond->curr_slave_lock); | ||
2461 | bond_send_unsolicited_na(bond); | ||
2462 | read_unlock(&bond->curr_slave_lock); | ||
2463 | } | ||
2464 | 2594 | ||
2465 | if (bond_miimon_inspect(bond)) { | 2595 | if (bond_miimon_inspect(bond)) { |
2466 | read_unlock(&bond->lock); | 2596 | read_unlock(&bond->lock); |
@@ -2480,6 +2610,12 @@ re_arm: | |||
2480 | msecs_to_jiffies(bond->params.miimon)); | 2610 | msecs_to_jiffies(bond->params.miimon)); |
2481 | out: | 2611 | out: |
2482 | read_unlock(&bond->lock); | 2612 | read_unlock(&bond->lock); |
2613 | |||
2614 | if (should_notify_peers) { | ||
2615 | rtnl_lock(); | ||
2616 | netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); | ||
2617 | rtnl_unlock(); | ||
2618 | } | ||
2483 | } | 2619 | } |
2484 | 2620 | ||
2485 | static __be32 bond_glean_dev_ip(struct net_device *dev) | 2621 | static __be32 bond_glean_dev_ip(struct net_device *dev) |
@@ -2553,11 +2689,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ | |||
2553 | 2689 | ||
2554 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | 2690 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) |
2555 | { | 2691 | { |
2556 | int i, vlan_id, rv; | 2692 | int i, vlan_id; |
2557 | __be32 *targets = bond->params.arp_targets; | 2693 | __be32 *targets = bond->params.arp_targets; |
2558 | struct vlan_entry *vlan; | 2694 | struct vlan_entry *vlan; |
2559 | struct net_device *vlan_dev; | 2695 | struct net_device *vlan_dev; |
2560 | struct flowi fl; | ||
2561 | struct rtable *rt; | 2696 | struct rtable *rt; |
2562 | 2697 | ||
2563 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | 2698 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { |
@@ -2576,15 +2711,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2576 | * determine which VLAN interface would be used, so we | 2711 | * determine which VLAN interface would be used, so we |
2577 | * can tag the ARP with the proper VLAN tag. | 2712 | * can tag the ARP with the proper VLAN tag. |
2578 | */ | 2713 | */ |
2579 | memset(&fl, 0, sizeof(fl)); | 2714 | rt = ip_route_output(dev_net(bond->dev), targets[i], 0, |
2580 | fl.fl4_dst = targets[i]; | 2715 | RTO_ONLINK, 0); |
2581 | fl.fl4_tos = RTO_ONLINK; | 2716 | if (IS_ERR(rt)) { |
2582 | |||
2583 | rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl); | ||
2584 | if (rv) { | ||
2585 | if (net_ratelimit()) { | 2717 | if (net_ratelimit()) { |
2586 | pr_warning("%s: no route to arp_ip_target %pI4\n", | 2718 | pr_warning("%s: no route to arp_ip_target %pI4\n", |
2587 | bond->dev->name, &fl.fl4_dst); | 2719 | bond->dev->name, &targets[i]); |
2588 | } | 2720 | } |
2589 | continue; | 2721 | continue; |
2590 | } | 2722 | } |
@@ -2620,51 +2752,13 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2620 | 2752 | ||
2621 | if (net_ratelimit()) { | 2753 | if (net_ratelimit()) { |
2622 | pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", | 2754 | pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", |
2623 | bond->dev->name, &fl.fl4_dst, | 2755 | bond->dev->name, &targets[i], |
2624 | rt->dst.dev ? rt->dst.dev->name : "NULL"); | 2756 | rt->dst.dev ? rt->dst.dev->name : "NULL"); |
2625 | } | 2757 | } |
2626 | ip_rt_put(rt); | 2758 | ip_rt_put(rt); |
2627 | } | 2759 | } |
2628 | } | 2760 | } |
2629 | 2761 | ||
2630 | /* | ||
2631 | * Kick out a gratuitous ARP for an IP on the bonding master plus one | ||
2632 | * for each VLAN above us. | ||
2633 | * | ||
2634 | * Caller must hold curr_slave_lock for read or better | ||
2635 | */ | ||
2636 | static void bond_send_gratuitous_arp(struct bonding *bond) | ||
2637 | { | ||
2638 | struct slave *slave = bond->curr_active_slave; | ||
2639 | struct vlan_entry *vlan; | ||
2640 | struct net_device *vlan_dev; | ||
2641 | |||
2642 | pr_debug("bond_send_grat_arp: bond %s slave %s\n", | ||
2643 | bond->dev->name, slave ? slave->dev->name : "NULL"); | ||
2644 | |||
2645 | if (!slave || !bond->send_grat_arp || | ||
2646 | test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) | ||
2647 | return; | ||
2648 | |||
2649 | bond->send_grat_arp--; | ||
2650 | |||
2651 | if (bond->master_ip) { | ||
2652 | bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip, | ||
2653 | bond->master_ip, 0); | ||
2654 | } | ||
2655 | |||
2656 | if (!bond->vlgrp) | ||
2657 | return; | ||
2658 | |||
2659 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | ||
2660 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); | ||
2661 | if (vlan->vlan_ip) { | ||
2662 | bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, | ||
2663 | vlan->vlan_ip, vlan->vlan_id); | ||
2664 | } | ||
2665 | } | ||
2666 | } | ||
2667 | |||
2668 | static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) | 2762 | static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) |
2669 | { | 2763 | { |
2670 | int i; | 2764 | int i; |
@@ -2682,44 +2776,26 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 | |||
2682 | } | 2776 | } |
2683 | } | 2777 | } |
2684 | 2778 | ||
2685 | static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 2779 | static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, |
2780 | struct slave *slave) | ||
2686 | { | 2781 | { |
2687 | struct arphdr *arp; | 2782 | struct arphdr *arp; |
2688 | struct slave *slave; | ||
2689 | struct bonding *bond; | ||
2690 | unsigned char *arp_ptr; | 2783 | unsigned char *arp_ptr; |
2691 | __be32 sip, tip; | 2784 | __be32 sip, tip; |
2692 | 2785 | ||
2693 | if (dev->priv_flags & IFF_802_1Q_VLAN) { | 2786 | if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) |
2694 | /* | 2787 | return; |
2695 | * When using VLANS and bonding, dev and oriv_dev may be | ||
2696 | * incorrect if the physical interface supports VLAN | ||
2697 | * acceleration. With this change ARP validation now | ||
2698 | * works for hosts only reachable on the VLAN interface. | ||
2699 | */ | ||
2700 | dev = vlan_dev_real_dev(dev); | ||
2701 | orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif); | ||
2702 | } | ||
2703 | |||
2704 | if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) | ||
2705 | goto out; | ||
2706 | 2788 | ||
2707 | bond = netdev_priv(dev); | ||
2708 | read_lock(&bond->lock); | 2789 | read_lock(&bond->lock); |
2709 | 2790 | ||
2710 | pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n", | 2791 | pr_debug("bond_arp_rcv: bond %s skb->dev %s\n", |
2711 | bond->dev->name, skb->dev ? skb->dev->name : "NULL", | 2792 | bond->dev->name, skb->dev->name); |
2712 | orig_dev ? orig_dev->name : "NULL"); | ||
2713 | 2793 | ||
2714 | slave = bond_get_slave_by_dev(bond, orig_dev); | 2794 | if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) |
2715 | if (!slave || !slave_do_arp_validate(bond, slave)) | ||
2716 | goto out_unlock; | ||
2717 | |||
2718 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) | ||
2719 | goto out_unlock; | 2795 | goto out_unlock; |
2720 | 2796 | ||
2721 | arp = arp_hdr(skb); | 2797 | arp = arp_hdr(skb); |
2722 | if (arp->ar_hln != dev->addr_len || | 2798 | if (arp->ar_hln != bond->dev->addr_len || |
2723 | skb->pkt_type == PACKET_OTHERHOST || | 2799 | skb->pkt_type == PACKET_OTHERHOST || |
2724 | skb->pkt_type == PACKET_LOOPBACK || | 2800 | skb->pkt_type == PACKET_LOOPBACK || |
2725 | arp->ar_hrd != htons(ARPHRD_ETHER) || | 2801 | arp->ar_hrd != htons(ARPHRD_ETHER) || |
@@ -2728,13 +2804,13 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2728 | goto out_unlock; | 2804 | goto out_unlock; |
2729 | 2805 | ||
2730 | arp_ptr = (unsigned char *)(arp + 1); | 2806 | arp_ptr = (unsigned char *)(arp + 1); |
2731 | arp_ptr += dev->addr_len; | 2807 | arp_ptr += bond->dev->addr_len; |
2732 | memcpy(&sip, arp_ptr, 4); | 2808 | memcpy(&sip, arp_ptr, 4); |
2733 | arp_ptr += 4 + dev->addr_len; | 2809 | arp_ptr += 4 + bond->dev->addr_len; |
2734 | memcpy(&tip, arp_ptr, 4); | 2810 | memcpy(&tip, arp_ptr, 4); |
2735 | 2811 | ||
2736 | pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", | 2812 | pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", |
2737 | bond->dev->name, slave->dev->name, slave->state, | 2813 | bond->dev->name, slave->dev->name, bond_slave_state(slave), |
2738 | bond->params.arp_validate, slave_do_arp_validate(bond, slave), | 2814 | bond->params.arp_validate, slave_do_arp_validate(bond, slave), |
2739 | &sip, &tip); | 2815 | &sip, &tip); |
2740 | 2816 | ||
@@ -2746,16 +2822,13 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2746 | * the active, through one switch, the router, then the other | 2822 | * the active, through one switch, the router, then the other |
2747 | * switch before reaching the backup. | 2823 | * switch before reaching the backup. |
2748 | */ | 2824 | */ |
2749 | if (slave->state == BOND_STATE_ACTIVE) | 2825 | if (bond_is_active_slave(slave)) |
2750 | bond_validate_arp(bond, slave, sip, tip); | 2826 | bond_validate_arp(bond, slave, sip, tip); |
2751 | else | 2827 | else |
2752 | bond_validate_arp(bond, slave, tip, sip); | 2828 | bond_validate_arp(bond, slave, tip, sip); |
2753 | 2829 | ||
2754 | out_unlock: | 2830 | out_unlock: |
2755 | read_unlock(&bond->lock); | 2831 | read_unlock(&bond->lock); |
2756 | out: | ||
2757 | dev_kfree_skb(skb); | ||
2758 | return NET_RX_SUCCESS; | ||
2759 | } | 2832 | } |
2760 | 2833 | ||
2761 | /* | 2834 | /* |
@@ -2808,7 +2881,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2808 | slave->dev->last_rx + delta_in_ticks)) { | 2881 | slave->dev->last_rx + delta_in_ticks)) { |
2809 | 2882 | ||
2810 | slave->link = BOND_LINK_UP; | 2883 | slave->link = BOND_LINK_UP; |
2811 | slave->state = BOND_STATE_ACTIVE; | 2884 | bond_set_active_slave(slave); |
2812 | 2885 | ||
2813 | /* primary_slave has no meaning in round-robin | 2886 | /* primary_slave has no meaning in round-robin |
2814 | * mode. the window of a slave being up and | 2887 | * mode. the window of a slave being up and |
@@ -2841,7 +2914,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2841 | slave->dev->last_rx + 2 * delta_in_ticks)) { | 2914 | slave->dev->last_rx + 2 * delta_in_ticks)) { |
2842 | 2915 | ||
2843 | slave->link = BOND_LINK_DOWN; | 2916 | slave->link = BOND_LINK_DOWN; |
2844 | slave->state = BOND_STATE_BACKUP; | 2917 | bond_set_backup_slave(slave); |
2845 | 2918 | ||
2846 | if (slave->link_failure_count < UINT_MAX) | 2919 | if (slave->link_failure_count < UINT_MAX) |
2847 | slave->link_failure_count++; | 2920 | slave->link_failure_count++; |
@@ -2867,11 +2940,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2867 | } | 2940 | } |
2868 | 2941 | ||
2869 | if (do_failover) { | 2942 | if (do_failover) { |
2943 | block_netpoll_tx(); | ||
2870 | write_lock_bh(&bond->curr_slave_lock); | 2944 | write_lock_bh(&bond->curr_slave_lock); |
2871 | 2945 | ||
2872 | bond_select_active_slave(bond); | 2946 | bond_select_active_slave(bond); |
2873 | 2947 | ||
2874 | write_unlock_bh(&bond->curr_slave_lock); | 2948 | write_unlock_bh(&bond->curr_slave_lock); |
2949 | unblock_netpoll_tx(); | ||
2875 | } | 2950 | } |
2876 | 2951 | ||
2877 | re_arm: | 2952 | re_arm: |
@@ -2933,7 +3008,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2933 | * gives each slave a chance to tx/rx traffic | 3008 | * gives each slave a chance to tx/rx traffic |
2934 | * before being taken out | 3009 | * before being taken out |
2935 | */ | 3010 | */ |
2936 | if (slave->state == BOND_STATE_BACKUP && | 3011 | if (!bond_is_active_slave(slave) && |
2937 | !bond->current_arp_slave && | 3012 | !bond->current_arp_slave && |
2938 | !time_in_range(jiffies, | 3013 | !time_in_range(jiffies, |
2939 | slave_last_rx(bond, slave) - delta_in_ticks, | 3014 | slave_last_rx(bond, slave) - delta_in_ticks, |
@@ -2950,7 +3025,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2950 | * the bond has an IP address) | 3025 | * the bond has an IP address) |
2951 | */ | 3026 | */ |
2952 | trans_start = dev_trans_start(slave->dev); | 3027 | trans_start = dev_trans_start(slave->dev); |
2953 | if ((slave->state == BOND_STATE_ACTIVE) && | 3028 | if (bond_is_active_slave(slave) && |
2954 | (!time_in_range(jiffies, | 3029 | (!time_in_range(jiffies, |
2955 | trans_start - delta_in_ticks, | 3030 | trans_start - delta_in_ticks, |
2956 | trans_start + 2 * delta_in_ticks) || | 3031 | trans_start + 2 * delta_in_ticks) || |
@@ -3030,9 +3105,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) | |||
3030 | 3105 | ||
3031 | do_failover: | 3106 | do_failover: |
3032 | ASSERT_RTNL(); | 3107 | ASSERT_RTNL(); |
3108 | block_netpoll_tx(); | ||
3033 | write_lock_bh(&bond->curr_slave_lock); | 3109 | write_lock_bh(&bond->curr_slave_lock); |
3034 | bond_select_active_slave(bond); | 3110 | bond_select_active_slave(bond); |
3035 | write_unlock_bh(&bond->curr_slave_lock); | 3111 | write_unlock_bh(&bond->curr_slave_lock); |
3112 | unblock_netpoll_tx(); | ||
3036 | } | 3113 | } |
3037 | 3114 | ||
3038 | bond_set_carrier(bond); | 3115 | bond_set_carrier(bond); |
@@ -3111,6 +3188,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3111 | { | 3188 | { |
3112 | struct bonding *bond = container_of(work, struct bonding, | 3189 | struct bonding *bond = container_of(work, struct bonding, |
3113 | arp_work.work); | 3190 | arp_work.work); |
3191 | bool should_notify_peers = false; | ||
3114 | int delta_in_ticks; | 3192 | int delta_in_ticks; |
3115 | 3193 | ||
3116 | read_lock(&bond->lock); | 3194 | read_lock(&bond->lock); |
@@ -3123,17 +3201,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3123 | if (bond->slave_cnt == 0) | 3201 | if (bond->slave_cnt == 0) |
3124 | goto re_arm; | 3202 | goto re_arm; |
3125 | 3203 | ||
3126 | if (bond->send_grat_arp) { | 3204 | should_notify_peers = bond_should_notify_peers(bond); |
3127 | read_lock(&bond->curr_slave_lock); | ||
3128 | bond_send_gratuitous_arp(bond); | ||
3129 | read_unlock(&bond->curr_slave_lock); | ||
3130 | } | ||
3131 | |||
3132 | if (bond->send_unsol_na) { | ||
3133 | read_lock(&bond->curr_slave_lock); | ||
3134 | bond_send_unsolicited_na(bond); | ||
3135 | read_unlock(&bond->curr_slave_lock); | ||
3136 | } | ||
3137 | 3205 | ||
3138 | if (bond_ab_arp_inspect(bond, delta_in_ticks)) { | 3206 | if (bond_ab_arp_inspect(bond, delta_in_ticks)) { |
3139 | read_unlock(&bond->lock); | 3207 | read_unlock(&bond->lock); |
@@ -3154,299 +3222,14 @@ re_arm: | |||
3154 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3222 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3155 | out: | 3223 | out: |
3156 | read_unlock(&bond->lock); | 3224 | read_unlock(&bond->lock); |
3157 | } | ||
3158 | |||
3159 | /*------------------------------ proc/seq_file-------------------------------*/ | ||
3160 | |||
3161 | #ifdef CONFIG_PROC_FS | ||
3162 | |||
3163 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | ||
3164 | __acquires(&dev_base_lock) | ||
3165 | __acquires(&bond->lock) | ||
3166 | { | ||
3167 | struct bonding *bond = seq->private; | ||
3168 | loff_t off = 0; | ||
3169 | struct slave *slave; | ||
3170 | int i; | ||
3171 | |||
3172 | /* make sure the bond won't be taken away */ | ||
3173 | read_lock(&dev_base_lock); | ||
3174 | read_lock(&bond->lock); | ||
3175 | |||
3176 | if (*pos == 0) | ||
3177 | return SEQ_START_TOKEN; | ||
3178 | |||
3179 | bond_for_each_slave(bond, slave, i) { | ||
3180 | if (++off == *pos) | ||
3181 | return slave; | ||
3182 | } | ||
3183 | |||
3184 | return NULL; | ||
3185 | } | ||
3186 | |||
3187 | static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
3188 | { | ||
3189 | struct bonding *bond = seq->private; | ||
3190 | struct slave *slave = v; | ||
3191 | |||
3192 | ++*pos; | ||
3193 | if (v == SEQ_START_TOKEN) | ||
3194 | return bond->first_slave; | ||
3195 | |||
3196 | slave = slave->next; | ||
3197 | |||
3198 | return (slave == bond->first_slave) ? NULL : slave; | ||
3199 | } | ||
3200 | |||
3201 | static void bond_info_seq_stop(struct seq_file *seq, void *v) | ||
3202 | __releases(&bond->lock) | ||
3203 | __releases(&dev_base_lock) | ||
3204 | { | ||
3205 | struct bonding *bond = seq->private; | ||
3206 | |||
3207 | read_unlock(&bond->lock); | ||
3208 | read_unlock(&dev_base_lock); | ||
3209 | } | ||
3210 | |||
3211 | static void bond_info_show_master(struct seq_file *seq) | ||
3212 | { | ||
3213 | struct bonding *bond = seq->private; | ||
3214 | struct slave *curr; | ||
3215 | int i; | ||
3216 | |||
3217 | read_lock(&bond->curr_slave_lock); | ||
3218 | curr = bond->curr_active_slave; | ||
3219 | read_unlock(&bond->curr_slave_lock); | ||
3220 | |||
3221 | seq_printf(seq, "Bonding Mode: %s", | ||
3222 | bond_mode_name(bond->params.mode)); | ||
3223 | |||
3224 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && | ||
3225 | bond->params.fail_over_mac) | ||
3226 | seq_printf(seq, " (fail_over_mac %s)", | ||
3227 | fail_over_mac_tbl[bond->params.fail_over_mac].modename); | ||
3228 | |||
3229 | seq_printf(seq, "\n"); | ||
3230 | |||
3231 | if (bond->params.mode == BOND_MODE_XOR || | ||
3232 | bond->params.mode == BOND_MODE_8023AD) { | ||
3233 | seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", | ||
3234 | xmit_hashtype_tbl[bond->params.xmit_policy].modename, | ||
3235 | bond->params.xmit_policy); | ||
3236 | } | ||
3237 | |||
3238 | if (USES_PRIMARY(bond->params.mode)) { | ||
3239 | seq_printf(seq, "Primary Slave: %s", | ||
3240 | (bond->primary_slave) ? | ||
3241 | bond->primary_slave->dev->name : "None"); | ||
3242 | if (bond->primary_slave) | ||
3243 | seq_printf(seq, " (primary_reselect %s)", | ||
3244 | pri_reselect_tbl[bond->params.primary_reselect].modename); | ||
3245 | |||
3246 | seq_printf(seq, "\nCurrently Active Slave: %s\n", | ||
3247 | (curr) ? curr->dev->name : "None"); | ||
3248 | } | ||
3249 | |||
3250 | seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ? | ||
3251 | "up" : "down"); | ||
3252 | seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon); | ||
3253 | seq_printf(seq, "Up Delay (ms): %d\n", | ||
3254 | bond->params.updelay * bond->params.miimon); | ||
3255 | seq_printf(seq, "Down Delay (ms): %d\n", | ||
3256 | bond->params.downdelay * bond->params.miimon); | ||
3257 | |||
3258 | |||
3259 | /* ARP information */ | ||
3260 | if (bond->params.arp_interval > 0) { | ||
3261 | int printed = 0; | ||
3262 | seq_printf(seq, "ARP Polling Interval (ms): %d\n", | ||
3263 | bond->params.arp_interval); | ||
3264 | |||
3265 | seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); | ||
3266 | |||
3267 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | ||
3268 | if (!bond->params.arp_targets[i]) | ||
3269 | break; | ||
3270 | if (printed) | ||
3271 | seq_printf(seq, ","); | ||
3272 | seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); | ||
3273 | printed = 1; | ||
3274 | } | ||
3275 | seq_printf(seq, "\n"); | ||
3276 | } | ||
3277 | |||
3278 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
3279 | struct ad_info ad_info; | ||
3280 | |||
3281 | seq_puts(seq, "\n802.3ad info\n"); | ||
3282 | seq_printf(seq, "LACP rate: %s\n", | ||
3283 | (bond->params.lacp_fast) ? "fast" : "slow"); | ||
3284 | seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", | ||
3285 | ad_select_tbl[bond->params.ad_select].modename); | ||
3286 | |||
3287 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) { | ||
3288 | seq_printf(seq, "bond %s has no active aggregator\n", | ||
3289 | bond->dev->name); | ||
3290 | } else { | ||
3291 | seq_printf(seq, "Active Aggregator Info:\n"); | ||
3292 | |||
3293 | seq_printf(seq, "\tAggregator ID: %d\n", | ||
3294 | ad_info.aggregator_id); | ||
3295 | seq_printf(seq, "\tNumber of ports: %d\n", | ||
3296 | ad_info.ports); | ||
3297 | seq_printf(seq, "\tActor Key: %d\n", | ||
3298 | ad_info.actor_key); | ||
3299 | seq_printf(seq, "\tPartner Key: %d\n", | ||
3300 | ad_info.partner_key); | ||
3301 | seq_printf(seq, "\tPartner Mac Address: %pM\n", | ||
3302 | ad_info.partner_system); | ||
3303 | } | ||
3304 | } | ||
3305 | } | ||
3306 | |||
3307 | static void bond_info_show_slave(struct seq_file *seq, | ||
3308 | const struct slave *slave) | ||
3309 | { | ||
3310 | struct bonding *bond = seq->private; | ||
3311 | |||
3312 | seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); | ||
3313 | seq_printf(seq, "MII Status: %s\n", | ||
3314 | (slave->link == BOND_LINK_UP) ? "up" : "down"); | ||
3315 | seq_printf(seq, "Link Failure Count: %u\n", | ||
3316 | slave->link_failure_count); | ||
3317 | |||
3318 | seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); | ||
3319 | |||
3320 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
3321 | const struct aggregator *agg | ||
3322 | = SLAVE_AD_INFO(slave).port.aggregator; | ||
3323 | |||
3324 | if (agg) | ||
3325 | seq_printf(seq, "Aggregator ID: %d\n", | ||
3326 | agg->aggregator_identifier); | ||
3327 | else | ||
3328 | seq_puts(seq, "Aggregator ID: N/A\n"); | ||
3329 | } | ||
3330 | seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); | ||
3331 | } | ||
3332 | |||
3333 | static int bond_info_seq_show(struct seq_file *seq, void *v) | ||
3334 | { | ||
3335 | if (v == SEQ_START_TOKEN) { | ||
3336 | seq_printf(seq, "%s\n", version); | ||
3337 | bond_info_show_master(seq); | ||
3338 | } else | ||
3339 | bond_info_show_slave(seq, v); | ||
3340 | |||
3341 | return 0; | ||
3342 | } | ||
3343 | |||
3344 | static const struct seq_operations bond_info_seq_ops = { | ||
3345 | .start = bond_info_seq_start, | ||
3346 | .next = bond_info_seq_next, | ||
3347 | .stop = bond_info_seq_stop, | ||
3348 | .show = bond_info_seq_show, | ||
3349 | }; | ||
3350 | |||
3351 | static int bond_info_open(struct inode *inode, struct file *file) | ||
3352 | { | ||
3353 | struct seq_file *seq; | ||
3354 | struct proc_dir_entry *proc; | ||
3355 | int res; | ||
3356 | |||
3357 | res = seq_open(file, &bond_info_seq_ops); | ||
3358 | if (!res) { | ||
3359 | /* recover the pointer buried in proc_dir_entry data */ | ||
3360 | seq = file->private_data; | ||
3361 | proc = PDE(inode); | ||
3362 | seq->private = proc->data; | ||
3363 | } | ||
3364 | |||
3365 | return res; | ||
3366 | } | ||
3367 | |||
3368 | static const struct file_operations bond_info_fops = { | ||
3369 | .owner = THIS_MODULE, | ||
3370 | .open = bond_info_open, | ||
3371 | .read = seq_read, | ||
3372 | .llseek = seq_lseek, | ||
3373 | .release = seq_release, | ||
3374 | }; | ||
3375 | |||
3376 | static void bond_create_proc_entry(struct bonding *bond) | ||
3377 | { | ||
3378 | struct net_device *bond_dev = bond->dev; | ||
3379 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | ||
3380 | |||
3381 | if (bn->proc_dir) { | ||
3382 | bond->proc_entry = proc_create_data(bond_dev->name, | ||
3383 | S_IRUGO, bn->proc_dir, | ||
3384 | &bond_info_fops, bond); | ||
3385 | if (bond->proc_entry == NULL) | ||
3386 | pr_warning("Warning: Cannot create /proc/net/%s/%s\n", | ||
3387 | DRV_NAME, bond_dev->name); | ||
3388 | else | ||
3389 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); | ||
3390 | } | ||
3391 | } | ||
3392 | |||
3393 | static void bond_remove_proc_entry(struct bonding *bond) | ||
3394 | { | ||
3395 | struct net_device *bond_dev = bond->dev; | ||
3396 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | ||
3397 | |||
3398 | if (bn->proc_dir && bond->proc_entry) { | ||
3399 | remove_proc_entry(bond->proc_file_name, bn->proc_dir); | ||
3400 | memset(bond->proc_file_name, 0, IFNAMSIZ); | ||
3401 | bond->proc_entry = NULL; | ||
3402 | } | ||
3403 | } | ||
3404 | 3225 | ||
3405 | /* Create the bonding directory under /proc/net, if doesn't exist yet. | 3226 | if (should_notify_peers) { |
3406 | * Caller must hold rtnl_lock. | 3227 | rtnl_lock(); |
3407 | */ | 3228 | netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); |
3408 | static void __net_init bond_create_proc_dir(struct bond_net *bn) | 3229 | rtnl_unlock(); |
3409 | { | ||
3410 | if (!bn->proc_dir) { | ||
3411 | bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); | ||
3412 | if (!bn->proc_dir) | ||
3413 | pr_warning("Warning: cannot create /proc/net/%s\n", | ||
3414 | DRV_NAME); | ||
3415 | } | ||
3416 | } | ||
3417 | |||
3418 | /* Destroy the bonding directory under /proc/net, if empty. | ||
3419 | * Caller must hold rtnl_lock. | ||
3420 | */ | ||
3421 | static void __net_exit bond_destroy_proc_dir(struct bond_net *bn) | ||
3422 | { | ||
3423 | if (bn->proc_dir) { | ||
3424 | remove_proc_entry(DRV_NAME, bn->net->proc_net); | ||
3425 | bn->proc_dir = NULL; | ||
3426 | } | 3230 | } |
3427 | } | 3231 | } |
3428 | 3232 | ||
3429 | #else /* !CONFIG_PROC_FS */ | ||
3430 | |||
3431 | static void bond_create_proc_entry(struct bonding *bond) | ||
3432 | { | ||
3433 | } | ||
3434 | |||
3435 | static void bond_remove_proc_entry(struct bonding *bond) | ||
3436 | { | ||
3437 | } | ||
3438 | |||
3439 | static inline void bond_create_proc_dir(struct bond_net *bn) | ||
3440 | { | ||
3441 | } | ||
3442 | |||
3443 | static inline void bond_destroy_proc_dir(struct bond_net *bn) | ||
3444 | { | ||
3445 | } | ||
3446 | |||
3447 | #endif /* CONFIG_PROC_FS */ | ||
3448 | |||
3449 | |||
3450 | /*-------------------------- netdev event handling --------------------------*/ | 3233 | /*-------------------------- netdev event handling --------------------------*/ |
3451 | 3234 | ||
3452 | /* | 3235 | /* |
@@ -3457,6 +3240,8 @@ static int bond_event_changename(struct bonding *bond) | |||
3457 | bond_remove_proc_entry(bond); | 3240 | bond_remove_proc_entry(bond); |
3458 | bond_create_proc_entry(bond); | 3241 | bond_create_proc_entry(bond); |
3459 | 3242 | ||
3243 | bond_debug_reregister(bond); | ||
3244 | |||
3460 | return NOTIFY_DONE; | 3245 | return NOTIFY_DONE; |
3461 | } | 3246 | } |
3462 | 3247 | ||
@@ -3496,8 +3281,8 @@ static int bond_slave_netdev_event(unsigned long event, | |||
3496 | 3281 | ||
3497 | slave = bond_get_slave_by_dev(bond, slave_dev); | 3282 | slave = bond_get_slave_by_dev(bond, slave_dev); |
3498 | if (slave) { | 3283 | if (slave) { |
3499 | u16 old_speed = slave->speed; | 3284 | u32 old_speed = slave->speed; |
3500 | u16 old_duplex = slave->duplex; | 3285 | u8 old_duplex = slave->duplex; |
3501 | 3286 | ||
3502 | bond_update_speed_duplex(slave); | 3287 | bond_update_speed_duplex(slave); |
3503 | 3288 | ||
@@ -3639,48 +3424,6 @@ static struct notifier_block bond_inetaddr_notifier = { | |||
3639 | .notifier_call = bond_inetaddr_event, | 3424 | .notifier_call = bond_inetaddr_event, |
3640 | }; | 3425 | }; |
3641 | 3426 | ||
3642 | /*-------------------------- Packet type handling ---------------------------*/ | ||
3643 | |||
3644 | /* register to receive lacpdus on a bond */ | ||
3645 | static void bond_register_lacpdu(struct bonding *bond) | ||
3646 | { | ||
3647 | struct packet_type *pk_type = &(BOND_AD_INFO(bond).ad_pkt_type); | ||
3648 | |||
3649 | /* initialize packet type */ | ||
3650 | pk_type->type = PKT_TYPE_LACPDU; | ||
3651 | pk_type->dev = bond->dev; | ||
3652 | pk_type->func = bond_3ad_lacpdu_recv; | ||
3653 | |||
3654 | dev_add_pack(pk_type); | ||
3655 | } | ||
3656 | |||
3657 | /* unregister to receive lacpdus on a bond */ | ||
3658 | static void bond_unregister_lacpdu(struct bonding *bond) | ||
3659 | { | ||
3660 | dev_remove_pack(&(BOND_AD_INFO(bond).ad_pkt_type)); | ||
3661 | } | ||
3662 | |||
3663 | void bond_register_arp(struct bonding *bond) | ||
3664 | { | ||
3665 | struct packet_type *pt = &bond->arp_mon_pt; | ||
3666 | |||
3667 | if (pt->type) | ||
3668 | return; | ||
3669 | |||
3670 | pt->type = htons(ETH_P_ARP); | ||
3671 | pt->dev = bond->dev; | ||
3672 | pt->func = bond_arp_rcv; | ||
3673 | dev_add_pack(pt); | ||
3674 | } | ||
3675 | |||
3676 | void bond_unregister_arp(struct bonding *bond) | ||
3677 | { | ||
3678 | struct packet_type *pt = &bond->arp_mon_pt; | ||
3679 | |||
3680 | dev_remove_pack(pt); | ||
3681 | pt->type = 0; | ||
3682 | } | ||
3683 | |||
3684 | /*---------------------------- Hashing Policies -----------------------------*/ | 3427 | /*---------------------------- Hashing Policies -----------------------------*/ |
3685 | 3428 | ||
3686 | /* | 3429 | /* |
@@ -3744,6 +3487,8 @@ static int bond_open(struct net_device *bond_dev) | |||
3744 | 3487 | ||
3745 | bond->kill_timers = 0; | 3488 | bond->kill_timers = 0; |
3746 | 3489 | ||
3490 | INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); | ||
3491 | |||
3747 | if (bond_is_lb(bond)) { | 3492 | if (bond_is_lb(bond)) { |
3748 | /* bond_alb_initialize must be called before the timer | 3493 | /* bond_alb_initialize must be called before the timer |
3749 | * is started. | 3494 | * is started. |
@@ -3772,14 +3517,14 @@ static int bond_open(struct net_device *bond_dev) | |||
3772 | 3517 | ||
3773 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | 3518 | queue_delayed_work(bond->wq, &bond->arp_work, 0); |
3774 | if (bond->params.arp_validate) | 3519 | if (bond->params.arp_validate) |
3775 | bond_register_arp(bond); | 3520 | bond->recv_probe = bond_arp_rcv; |
3776 | } | 3521 | } |
3777 | 3522 | ||
3778 | if (bond->params.mode == BOND_MODE_8023AD) { | 3523 | if (bond->params.mode == BOND_MODE_8023AD) { |
3779 | INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); | 3524 | INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); |
3780 | queue_delayed_work(bond->wq, &bond->ad_work, 0); | 3525 | queue_delayed_work(bond->wq, &bond->ad_work, 0); |
3781 | /* register to receive LACPDUs */ | 3526 | /* register to receive LACPDUs */ |
3782 | bond_register_lacpdu(bond); | 3527 | bond->recv_probe = bond_3ad_lacpdu_recv; |
3783 | bond_3ad_initiate_agg_selection(bond, 1); | 3528 | bond_3ad_initiate_agg_selection(bond, 1); |
3784 | } | 3529 | } |
3785 | 3530 | ||
@@ -3790,18 +3535,9 @@ static int bond_close(struct net_device *bond_dev) | |||
3790 | { | 3535 | { |
3791 | struct bonding *bond = netdev_priv(bond_dev); | 3536 | struct bonding *bond = netdev_priv(bond_dev); |
3792 | 3537 | ||
3793 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
3794 | /* Unregister the receive of LACPDUs */ | ||
3795 | bond_unregister_lacpdu(bond); | ||
3796 | } | ||
3797 | |||
3798 | if (bond->params.arp_validate) | ||
3799 | bond_unregister_arp(bond); | ||
3800 | |||
3801 | write_lock_bh(&bond->lock); | 3538 | write_lock_bh(&bond->lock); |
3802 | 3539 | ||
3803 | bond->send_grat_arp = 0; | 3540 | bond->send_peer_notif = 0; |
3804 | bond->send_unsol_na = 0; | ||
3805 | 3541 | ||
3806 | /* signal timers not to re-arm */ | 3542 | /* signal timers not to re-arm */ |
3807 | bond->kill_timers = 1; | 3543 | bond->kill_timers = 1; |
@@ -3828,6 +3564,8 @@ static int bond_close(struct net_device *bond_dev) | |||
3828 | break; | 3564 | break; |
3829 | } | 3565 | } |
3830 | 3566 | ||
3567 | if (delayed_work_pending(&bond->mcast_work)) | ||
3568 | cancel_delayed_work(&bond->mcast_work); | ||
3831 | 3569 | ||
3832 | if (bond_is_lb(bond)) { | 3570 | if (bond_is_lb(bond)) { |
3833 | /* Must be called only after all | 3571 | /* Must be called only after all |
@@ -3835,6 +3573,7 @@ static int bond_close(struct net_device *bond_dev) | |||
3835 | */ | 3573 | */ |
3836 | bond_alb_deinitialize(bond); | 3574 | bond_alb_deinitialize(bond); |
3837 | } | 3575 | } |
3576 | bond->recv_probe = NULL; | ||
3838 | 3577 | ||
3839 | return 0; | 3578 | return 0; |
3840 | } | 3579 | } |
@@ -4258,10 +3997,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4258 | int i, slave_no, res = 1; | 3997 | int i, slave_no, res = 1; |
4259 | struct iphdr *iph = ip_hdr(skb); | 3998 | struct iphdr *iph = ip_hdr(skb); |
4260 | 3999 | ||
4261 | read_lock(&bond->lock); | ||
4262 | |||
4263 | if (!BOND_IS_OK(bond)) | ||
4264 | goto out; | ||
4265 | /* | 4000 | /* |
4266 | * Start with the curr_active_slave that joined the bond as the | 4001 | * Start with the curr_active_slave that joined the bond as the |
4267 | * default for sending IGMP traffic. For failover purposes one | 4002 | * default for sending IGMP traffic. For failover purposes one |
@@ -4297,7 +4032,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4297 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4032 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4298 | if (IS_UP(slave->dev) && | 4033 | if (IS_UP(slave->dev) && |
4299 | (slave->link == BOND_LINK_UP) && | 4034 | (slave->link == BOND_LINK_UP) && |
4300 | (slave->state == BOND_STATE_ACTIVE)) { | 4035 | bond_is_active_slave(slave)) { |
4301 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4036 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4302 | break; | 4037 | break; |
4303 | } | 4038 | } |
@@ -4308,7 +4043,7 @@ out: | |||
4308 | /* no suitable interface, frame not sent */ | 4043 | /* no suitable interface, frame not sent */ |
4309 | dev_kfree_skb(skb); | 4044 | dev_kfree_skb(skb); |
4310 | } | 4045 | } |
4311 | read_unlock(&bond->lock); | 4046 | |
4312 | return NETDEV_TX_OK; | 4047 | return NETDEV_TX_OK; |
4313 | } | 4048 | } |
4314 | 4049 | ||
@@ -4322,24 +4057,18 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
4322 | struct bonding *bond = netdev_priv(bond_dev); | 4057 | struct bonding *bond = netdev_priv(bond_dev); |
4323 | int res = 1; | 4058 | int res = 1; |
4324 | 4059 | ||
4325 | read_lock(&bond->lock); | ||
4326 | read_lock(&bond->curr_slave_lock); | 4060 | read_lock(&bond->curr_slave_lock); |
4327 | 4061 | ||
4328 | if (!BOND_IS_OK(bond)) | 4062 | if (bond->curr_active_slave) |
4329 | goto out; | 4063 | res = bond_dev_queue_xmit(bond, skb, |
4330 | 4064 | bond->curr_active_slave->dev); | |
4331 | if (!bond->curr_active_slave) | ||
4332 | goto out; | ||
4333 | |||
4334 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); | ||
4335 | 4065 | ||
4336 | out: | ||
4337 | if (res) | 4066 | if (res) |
4338 | /* no suitable interface, frame not sent */ | 4067 | /* no suitable interface, frame not sent */ |
4339 | dev_kfree_skb(skb); | 4068 | dev_kfree_skb(skb); |
4340 | 4069 | ||
4341 | read_unlock(&bond->curr_slave_lock); | 4070 | read_unlock(&bond->curr_slave_lock); |
4342 | read_unlock(&bond->lock); | 4071 | |
4343 | return NETDEV_TX_OK; | 4072 | return NETDEV_TX_OK; |
4344 | } | 4073 | } |
4345 | 4074 | ||
@@ -4356,11 +4085,6 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | |||
4356 | int i; | 4085 | int i; |
4357 | int res = 1; | 4086 | int res = 1; |
4358 | 4087 | ||
4359 | read_lock(&bond->lock); | ||
4360 | |||
4361 | if (!BOND_IS_OK(bond)) | ||
4362 | goto out; | ||
4363 | |||
4364 | slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); | 4088 | slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); |
4365 | 4089 | ||
4366 | bond_for_each_slave(bond, slave, i) { | 4090 | bond_for_each_slave(bond, slave, i) { |
@@ -4374,18 +4098,17 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | |||
4374 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4098 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4375 | if (IS_UP(slave->dev) && | 4099 | if (IS_UP(slave->dev) && |
4376 | (slave->link == BOND_LINK_UP) && | 4100 | (slave->link == BOND_LINK_UP) && |
4377 | (slave->state == BOND_STATE_ACTIVE)) { | 4101 | bond_is_active_slave(slave)) { |
4378 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4102 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4379 | break; | 4103 | break; |
4380 | } | 4104 | } |
4381 | } | 4105 | } |
4382 | 4106 | ||
4383 | out: | ||
4384 | if (res) { | 4107 | if (res) { |
4385 | /* no suitable interface, frame not sent */ | 4108 | /* no suitable interface, frame not sent */ |
4386 | dev_kfree_skb(skb); | 4109 | dev_kfree_skb(skb); |
4387 | } | 4110 | } |
4388 | read_unlock(&bond->lock); | 4111 | |
4389 | return NETDEV_TX_OK; | 4112 | return NETDEV_TX_OK; |
4390 | } | 4113 | } |
4391 | 4114 | ||
@@ -4400,11 +4123,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) | |||
4400 | int i; | 4123 | int i; |
4401 | int res = 1; | 4124 | int res = 1; |
4402 | 4125 | ||
4403 | read_lock(&bond->lock); | ||
4404 | |||
4405 | if (!BOND_IS_OK(bond)) | ||
4406 | goto out; | ||
4407 | |||
4408 | read_lock(&bond->curr_slave_lock); | 4126 | read_lock(&bond->curr_slave_lock); |
4409 | start_at = bond->curr_active_slave; | 4127 | start_at = bond->curr_active_slave; |
4410 | read_unlock(&bond->curr_slave_lock); | 4128 | read_unlock(&bond->curr_slave_lock); |
@@ -4415,7 +4133,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) | |||
4415 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4133 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4416 | if (IS_UP(slave->dev) && | 4134 | if (IS_UP(slave->dev) && |
4417 | (slave->link == BOND_LINK_UP) && | 4135 | (slave->link == BOND_LINK_UP) && |
4418 | (slave->state == BOND_STATE_ACTIVE)) { | 4136 | bond_is_active_slave(slave)) { |
4419 | if (tx_dev) { | 4137 | if (tx_dev) { |
4420 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 4138 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
4421 | if (!skb2) { | 4139 | if (!skb2) { |
@@ -4443,7 +4161,6 @@ out: | |||
4443 | dev_kfree_skb(skb); | 4161 | dev_kfree_skb(skb); |
4444 | 4162 | ||
4445 | /* frame sent to all suitable interfaces */ | 4163 | /* frame sent to all suitable interfaces */ |
4446 | read_unlock(&bond->lock); | ||
4447 | return NETDEV_TX_OK; | 4164 | return NETDEV_TX_OK; |
4448 | } | 4165 | } |
4449 | 4166 | ||
@@ -4475,10 +4192,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
4475 | struct slave *slave = NULL; | 4192 | struct slave *slave = NULL; |
4476 | struct slave *check_slave; | 4193 | struct slave *check_slave; |
4477 | 4194 | ||
4478 | read_lock(&bond->lock); | 4195 | if (!skb->queue_mapping) |
4479 | 4196 | return 1; | |
4480 | if (!BOND_IS_OK(bond) || !skb->queue_mapping) | ||
4481 | goto out; | ||
4482 | 4197 | ||
4483 | /* Find out if any slaves have the same mapping as this skb. */ | 4198 | /* Find out if any slaves have the same mapping as this skb. */ |
4484 | bond_for_each_slave(bond, check_slave, i) { | 4199 | bond_for_each_slave(bond, check_slave, i) { |
@@ -4494,23 +4209,34 @@ static inline int bond_slave_override(struct bonding *bond, | |||
4494 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4209 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4495 | } | 4210 | } |
4496 | 4211 | ||
4497 | out: | ||
4498 | read_unlock(&bond->lock); | ||
4499 | return res; | 4212 | return res; |
4500 | } | 4213 | } |
4501 | 4214 | ||
4215 | |||
4502 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | 4216 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) |
4503 | { | 4217 | { |
4504 | /* | 4218 | /* |
4505 | * This helper function exists to help dev_pick_tx get the correct | 4219 | * This helper function exists to help dev_pick_tx get the correct |
4506 | * destination queue. Using a helper function skips the a call to | 4220 | * destination queue. Using a helper function skips a call to |
4507 | * skb_tx_hash and will put the skbs in the queue we expect on their | 4221 | * skb_tx_hash and will put the skbs in the queue we expect on their |
4508 | * way down to the bonding driver. | 4222 | * way down to the bonding driver. |
4509 | */ | 4223 | */ |
4510 | return skb->queue_mapping; | 4224 | u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; |
4225 | |||
4226 | /* | ||
4227 | * Save the original txq to restore before passing to the driver | ||
4228 | */ | ||
4229 | bond_queue_mapping(skb) = skb->queue_mapping; | ||
4230 | |||
4231 | if (unlikely(txq >= dev->real_num_tx_queues)) { | ||
4232 | do { | ||
4233 | txq -= dev->real_num_tx_queues; | ||
4234 | } while (txq >= dev->real_num_tx_queues); | ||
4235 | } | ||
4236 | return txq; | ||
4511 | } | 4237 | } |
4512 | 4238 | ||
4513 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | 4239 | static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) |
4514 | { | 4240 | { |
4515 | struct bonding *bond = netdev_priv(dev); | 4241 | struct bonding *bond = netdev_priv(dev); |
4516 | 4242 | ||
@@ -4543,6 +4269,29 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4543 | } | 4269 | } |
4544 | } | 4270 | } |
4545 | 4271 | ||
4272 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
4273 | { | ||
4274 | struct bonding *bond = netdev_priv(dev); | ||
4275 | netdev_tx_t ret = NETDEV_TX_OK; | ||
4276 | |||
4277 | /* | ||
4278 | * If we risk deadlock from transmitting this in the | ||
4279 | * netpoll path, tell netpoll to queue the frame for later tx | ||
4280 | */ | ||
4281 | if (is_netpoll_tx_blocked(dev)) | ||
4282 | return NETDEV_TX_BUSY; | ||
4283 | |||
4284 | read_lock(&bond->lock); | ||
4285 | |||
4286 | if (bond->slave_cnt) | ||
4287 | ret = __bond_start_xmit(skb, dev); | ||
4288 | else | ||
4289 | dev_kfree_skb(skb); | ||
4290 | |||
4291 | read_unlock(&bond->lock); | ||
4292 | |||
4293 | return ret; | ||
4294 | } | ||
4546 | 4295 | ||
4547 | /* | 4296 | /* |
4548 | * set bond mode specific net device operations | 4297 | * set bond mode specific net device operations |
@@ -4562,11 +4311,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode) | |||
4562 | case BOND_MODE_BROADCAST: | 4311 | case BOND_MODE_BROADCAST: |
4563 | break; | 4312 | break; |
4564 | case BOND_MODE_8023AD: | 4313 | case BOND_MODE_8023AD: |
4565 | bond_set_master_3ad_flags(bond); | ||
4566 | bond_set_xmit_hash_policy(bond); | 4314 | bond_set_xmit_hash_policy(bond); |
4567 | break; | 4315 | break; |
4568 | case BOND_MODE_ALB: | 4316 | case BOND_MODE_ALB: |
4569 | bond_set_master_alb_flags(bond); | ||
4570 | /* FALLTHRU */ | 4317 | /* FALLTHRU */ |
4571 | case BOND_MODE_TLB: | 4318 | case BOND_MODE_TLB: |
4572 | break; | 4319 | break; |
@@ -4589,11 +4336,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, | |||
4589 | static const struct ethtool_ops bond_ethtool_ops = { | 4336 | static const struct ethtool_ops bond_ethtool_ops = { |
4590 | .get_drvinfo = bond_ethtool_get_drvinfo, | 4337 | .get_drvinfo = bond_ethtool_get_drvinfo, |
4591 | .get_link = ethtool_op_get_link, | 4338 | .get_link = ethtool_op_get_link, |
4592 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
4593 | .get_sg = ethtool_op_get_sg, | ||
4594 | .get_tso = ethtool_op_get_tso, | ||
4595 | .get_ufo = ethtool_op_get_ufo, | ||
4596 | .get_flags = ethtool_op_get_flags, | ||
4597 | }; | 4339 | }; |
4598 | 4340 | ||
4599 | static const struct net_device_ops bond_netdev_ops = { | 4341 | static const struct net_device_ops bond_netdev_ops = { |
@@ -4613,9 +4355,13 @@ static const struct net_device_ops bond_netdev_ops = { | |||
4613 | .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, | 4355 | .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, |
4614 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, | 4356 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, |
4615 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4357 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4358 | .ndo_netpoll_setup = bond_netpoll_setup, | ||
4616 | .ndo_netpoll_cleanup = bond_netpoll_cleanup, | 4359 | .ndo_netpoll_cleanup = bond_netpoll_cleanup, |
4617 | .ndo_poll_controller = bond_poll_controller, | 4360 | .ndo_poll_controller = bond_poll_controller, |
4618 | #endif | 4361 | #endif |
4362 | .ndo_add_slave = bond_enslave, | ||
4363 | .ndo_del_slave = bond_release, | ||
4364 | .ndo_fix_features = bond_fix_features, | ||
4619 | }; | 4365 | }; |
4620 | 4366 | ||
4621 | static void bond_destructor(struct net_device *bond_dev) | 4367 | static void bond_destructor(struct net_device *bond_dev) |
@@ -4654,9 +4400,6 @@ static void bond_setup(struct net_device *bond_dev) | |||
4654 | bond_dev->priv_flags |= IFF_BONDING; | 4400 | bond_dev->priv_flags |= IFF_BONDING; |
4655 | bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 4401 | bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
4656 | 4402 | ||
4657 | if (bond->params.arp_interval) | ||
4658 | bond_dev->priv_flags |= IFF_MASTER_ARPMON; | ||
4659 | |||
4660 | /* At first, we block adding VLANs. That's the only way to | 4403 | /* At first, we block adding VLANs. That's the only way to |
4661 | * prevent problems that occur when adding VLANs over an | 4404 | * prevent problems that occur when adding VLANs over an |
4662 | * empty bond. The block will be removed once non-challenged | 4405 | * empty bond. The block will be removed once non-challenged |
@@ -4674,10 +4417,14 @@ static void bond_setup(struct net_device *bond_dev) | |||
4674 | * when there are slaves that are not hw accel | 4417 | * when there are slaves that are not hw accel |
4675 | * capable | 4418 | * capable |
4676 | */ | 4419 | */ |
4677 | bond_dev->features |= (NETIF_F_HW_VLAN_TX | | ||
4678 | NETIF_F_HW_VLAN_RX | | ||
4679 | NETIF_F_HW_VLAN_FILTER); | ||
4680 | 4420 | ||
4421 | bond_dev->hw_features = BOND_VLAN_FEATURES | | ||
4422 | NETIF_F_HW_VLAN_TX | | ||
4423 | NETIF_F_HW_VLAN_RX | | ||
4424 | NETIF_F_HW_VLAN_FILTER; | ||
4425 | |||
4426 | bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM); | ||
4427 | bond_dev->features |= bond_dev->hw_features; | ||
4681 | } | 4428 | } |
4682 | 4429 | ||
4683 | static void bond_work_cancel_all(struct bonding *bond) | 4430 | static void bond_work_cancel_all(struct bonding *bond) |
@@ -4699,6 +4446,9 @@ static void bond_work_cancel_all(struct bonding *bond) | |||
4699 | if (bond->params.mode == BOND_MODE_8023AD && | 4446 | if (bond->params.mode == BOND_MODE_8023AD && |
4700 | delayed_work_pending(&bond->ad_work)) | 4447 | delayed_work_pending(&bond->ad_work)) |
4701 | cancel_delayed_work(&bond->ad_work); | 4448 | cancel_delayed_work(&bond->ad_work); |
4449 | |||
4450 | if (delayed_work_pending(&bond->mcast_work)) | ||
4451 | cancel_delayed_work(&bond->mcast_work); | ||
4702 | } | 4452 | } |
4703 | 4453 | ||
4704 | /* | 4454 | /* |
@@ -4721,6 +4471,8 @@ static void bond_uninit(struct net_device *bond_dev) | |||
4721 | 4471 | ||
4722 | bond_remove_proc_entry(bond); | 4472 | bond_remove_proc_entry(bond); |
4723 | 4473 | ||
4474 | bond_debug_unregister(bond); | ||
4475 | |||
4724 | __hw_addr_flush(&bond->mc_list); | 4476 | __hw_addr_flush(&bond->mc_list); |
4725 | 4477 | ||
4726 | list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { | 4478 | list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { |
@@ -4856,16 +4608,10 @@ static int bond_check_params(struct bond_params *params) | |||
4856 | use_carrier = 1; | 4608 | use_carrier = 1; |
4857 | } | 4609 | } |
4858 | 4610 | ||
4859 | if (num_grat_arp < 0 || num_grat_arp > 255) { | 4611 | if (num_peer_notif < 0 || num_peer_notif > 255) { |
4860 | pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n", | 4612 | pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", |
4861 | num_grat_arp); | 4613 | num_peer_notif); |
4862 | num_grat_arp = 1; | 4614 | num_peer_notif = 1; |
4863 | } | ||
4864 | |||
4865 | if (num_unsol_na < 0 || num_unsol_na > 255) { | ||
4866 | pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", | ||
4867 | num_unsol_na); | ||
4868 | num_unsol_na = 1; | ||
4869 | } | 4615 | } |
4870 | 4616 | ||
4871 | /* reset values for 802.3ad */ | 4617 | /* reset values for 802.3ad */ |
@@ -4891,6 +4637,13 @@ static int bond_check_params(struct bond_params *params) | |||
4891 | all_slaves_active = 0; | 4637 | all_slaves_active = 0; |
4892 | } | 4638 | } |
4893 | 4639 | ||
4640 | if (resend_igmp < 0 || resend_igmp > 255) { | ||
4641 | pr_warning("Warning: resend_igmp (%d) should be between " | ||
4642 | "0 and 255, resetting to %d\n", | ||
4643 | resend_igmp, BOND_DEFAULT_RESEND_IGMP); | ||
4644 | resend_igmp = BOND_DEFAULT_RESEND_IGMP; | ||
4645 | } | ||
4646 | |||
4894 | /* reset values for TLB/ALB */ | 4647 | /* reset values for TLB/ALB */ |
4895 | if ((bond_mode == BOND_MODE_TLB) || | 4648 | if ((bond_mode == BOND_MODE_TLB) || |
4896 | (bond_mode == BOND_MODE_ALB)) { | 4649 | (bond_mode == BOND_MODE_ALB)) { |
@@ -5050,8 +4803,7 @@ static int bond_check_params(struct bond_params *params) | |||
5050 | params->mode = bond_mode; | 4803 | params->mode = bond_mode; |
5051 | params->xmit_policy = xmit_hashtype; | 4804 | params->xmit_policy = xmit_hashtype; |
5052 | params->miimon = miimon; | 4805 | params->miimon = miimon; |
5053 | params->num_grat_arp = num_grat_arp; | 4806 | params->num_peer_notif = num_peer_notif; |
5054 | params->num_unsol_na = num_unsol_na; | ||
5055 | params->arp_interval = arp_interval; | 4807 | params->arp_interval = arp_interval; |
5056 | params->arp_validate = arp_validate_value; | 4808 | params->arp_validate = arp_validate_value; |
5057 | params->updelay = updelay; | 4809 | params->updelay = updelay; |
@@ -5063,6 +4815,7 @@ static int bond_check_params(struct bond_params *params) | |||
5063 | params->fail_over_mac = fail_over_mac_value; | 4815 | params->fail_over_mac = fail_over_mac_value; |
5064 | params->tx_queues = tx_queues; | 4816 | params->tx_queues = tx_queues; |
5065 | params->all_slaves_active = all_slaves_active; | 4817 | params->all_slaves_active = all_slaves_active; |
4818 | params->resend_igmp = resend_igmp; | ||
5066 | 4819 | ||
5067 | if (primary) { | 4820 | if (primary) { |
5068 | strncpy(params->primary, primary, IFNAMSIZ); | 4821 | strncpy(params->primary, primary, IFNAMSIZ); |
@@ -5099,22 +4852,32 @@ static int bond_init(struct net_device *bond_dev) | |||
5099 | { | 4852 | { |
5100 | struct bonding *bond = netdev_priv(bond_dev); | 4853 | struct bonding *bond = netdev_priv(bond_dev); |
5101 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | 4854 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); |
4855 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | ||
5102 | 4856 | ||
5103 | pr_debug("Begin bond_init for %s\n", bond_dev->name); | 4857 | pr_debug("Begin bond_init for %s\n", bond_dev->name); |
5104 | 4858 | ||
4859 | /* | ||
4860 | * Initialize locks that may be required during | ||
4861 | * en/deslave operations. All of the bond_open work | ||
4862 | * (of which this is part) should really be moved to | ||
4863 | * a phase prior to dev_open | ||
4864 | */ | ||
4865 | spin_lock_init(&(bond_info->tx_hashtbl_lock)); | ||
4866 | spin_lock_init(&(bond_info->rx_hashtbl_lock)); | ||
4867 | |||
5105 | bond->wq = create_singlethread_workqueue(bond_dev->name); | 4868 | bond->wq = create_singlethread_workqueue(bond_dev->name); |
5106 | if (!bond->wq) | 4869 | if (!bond->wq) |
5107 | return -ENOMEM; | 4870 | return -ENOMEM; |
5108 | 4871 | ||
5109 | bond_set_lockdep_class(bond_dev); | 4872 | bond_set_lockdep_class(bond_dev); |
5110 | 4873 | ||
5111 | netif_carrier_off(bond_dev); | ||
5112 | |||
5113 | bond_create_proc_entry(bond); | 4874 | bond_create_proc_entry(bond); |
5114 | list_add_tail(&bond->bond_list, &bn->dev_list); | 4875 | list_add_tail(&bond->bond_list, &bn->dev_list); |
5115 | 4876 | ||
5116 | bond_prepare_sysfs_group(bond); | 4877 | bond_prepare_sysfs_group(bond); |
5117 | 4878 | ||
4879 | bond_debug_register(bond); | ||
4880 | |||
5118 | __hw_addr_init(&bond->mc_list); | 4881 | __hw_addr_init(&bond->mc_list); |
5119 | return 0; | 4882 | return 0; |
5120 | } | 4883 | } |
@@ -5149,8 +4912,9 @@ int bond_create(struct net *net, const char *name) | |||
5149 | 4912 | ||
5150 | rtnl_lock(); | 4913 | rtnl_lock(); |
5151 | 4914 | ||
5152 | bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "", | 4915 | bond_dev = alloc_netdev_mq(sizeof(struct bonding), |
5153 | bond_setup, tx_queues); | 4916 | name ? name : "bond%d", |
4917 | bond_setup, tx_queues); | ||
5154 | if (!bond_dev) { | 4918 | if (!bond_dev) { |
5155 | pr_err("%s: eek! can't alloc netdev!\n", name); | 4919 | pr_err("%s: eek! can't alloc netdev!\n", name); |
5156 | rtnl_unlock(); | 4920 | rtnl_unlock(); |
@@ -5160,24 +4924,10 @@ int bond_create(struct net *net, const char *name) | |||
5160 | dev_net_set(bond_dev, net); | 4924 | dev_net_set(bond_dev, net); |
5161 | bond_dev->rtnl_link_ops = &bond_link_ops; | 4925 | bond_dev->rtnl_link_ops = &bond_link_ops; |
5162 | 4926 | ||
5163 | if (!name) { | ||
5164 | res = dev_alloc_name(bond_dev, "bond%d"); | ||
5165 | if (res < 0) | ||
5166 | goto out; | ||
5167 | } else { | ||
5168 | /* | ||
5169 | * If we're given a name to register | ||
5170 | * we need to ensure that its not already | ||
5171 | * registered | ||
5172 | */ | ||
5173 | res = -EEXIST; | ||
5174 | if (__dev_get_by_name(net, name) != NULL) | ||
5175 | goto out; | ||
5176 | } | ||
5177 | |||
5178 | res = register_netdevice(bond_dev); | 4927 | res = register_netdevice(bond_dev); |
5179 | 4928 | ||
5180 | out: | 4929 | netif_carrier_off(bond_dev); |
4930 | |||
5181 | rtnl_unlock(); | 4931 | rtnl_unlock(); |
5182 | if (res < 0) | 4932 | if (res < 0) |
5183 | bond_destructor(bond_dev); | 4933 | bond_destructor(bond_dev); |
@@ -5215,7 +4965,7 @@ static int __init bonding_init(void) | |||
5215 | int i; | 4965 | int i; |
5216 | int res; | 4966 | int res; |
5217 | 4967 | ||
5218 | pr_info("%s", version); | 4968 | pr_info("%s", bond_version); |
5219 | 4969 | ||
5220 | res = bond_check_params(&bonding_defaults); | 4970 | res = bond_check_params(&bonding_defaults); |
5221 | if (res) | 4971 | if (res) |
@@ -5229,6 +4979,8 @@ static int __init bonding_init(void) | |||
5229 | if (res) | 4979 | if (res) |
5230 | goto err_link; | 4980 | goto err_link; |
5231 | 4981 | ||
4982 | bond_create_debugfs(); | ||
4983 | |||
5232 | for (i = 0; i < max_bonds; i++) { | 4984 | for (i = 0; i < max_bonds; i++) { |
5233 | res = bond_create(&init_net, NULL); | 4985 | res = bond_create(&init_net, NULL); |
5234 | if (res) | 4986 | if (res) |
@@ -5241,7 +4993,6 @@ static int __init bonding_init(void) | |||
5241 | 4993 | ||
5242 | register_netdevice_notifier(&bond_netdev_notifier); | 4994 | register_netdevice_notifier(&bond_netdev_notifier); |
5243 | register_inetaddr_notifier(&bond_inetaddr_notifier); | 4995 | register_inetaddr_notifier(&bond_inetaddr_notifier); |
5244 | bond_register_ipv6_notifier(); | ||
5245 | out: | 4996 | out: |
5246 | return res; | 4997 | return res; |
5247 | err: | 4998 | err: |
@@ -5256,12 +5007,19 @@ static void __exit bonding_exit(void) | |||
5256 | { | 5007 | { |
5257 | unregister_netdevice_notifier(&bond_netdev_notifier); | 5008 | unregister_netdevice_notifier(&bond_netdev_notifier); |
5258 | unregister_inetaddr_notifier(&bond_inetaddr_notifier); | 5009 | unregister_inetaddr_notifier(&bond_inetaddr_notifier); |
5259 | bond_unregister_ipv6_notifier(); | ||
5260 | 5010 | ||
5261 | bond_destroy_sysfs(); | 5011 | bond_destroy_sysfs(); |
5012 | bond_destroy_debugfs(); | ||
5262 | 5013 | ||
5263 | rtnl_link_unregister(&bond_link_ops); | 5014 | rtnl_link_unregister(&bond_link_ops); |
5264 | unregister_pernet_subsys(&bond_net_ops); | 5015 | unregister_pernet_subsys(&bond_net_ops); |
5016 | |||
5017 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
5018 | /* | ||
5019 | * Make sure we don't have an imbalance on our netpoll blocking | ||
5020 | */ | ||
5021 | WARN_ON(atomic_read(&netpoll_block_tx)); | ||
5022 | #endif | ||
5265 | } | 5023 | } |
5266 | 5024 | ||
5267 | module_init(bonding_init); | 5025 | module_init(bonding_init); |