diff options
Diffstat (limited to 'drivers/net/bonding')
-rw-r--r-- | drivers/net/bonding/Makefile | 6 | ||||
-rw-r--r-- | drivers/net/bonding/bond_3ad.c | 359 | ||||
-rw-r--r-- | drivers/net/bonding/bond_3ad.h | 17 | ||||
-rw-r--r-- | drivers/net/bonding/bond_alb.c | 101 | ||||
-rw-r--r-- | drivers/net/bonding/bond_alb.h | 47 | ||||
-rw-r--r-- | drivers/net/bonding/bond_debugfs.c | 146 | ||||
-rw-r--r-- | drivers/net/bonding/bond_ipv6.c | 7 | ||||
-rw-r--r-- | drivers/net/bonding/bond_main.c | 1340 | ||||
-rw-r--r-- | drivers/net/bonding/bond_procfs.c | 273 | ||||
-rw-r--r-- | drivers/net/bonding/bond_sysfs.c | 165 | ||||
-rw-r--r-- | drivers/net/bonding/bonding.h | 210 |
11 files changed, 1423 insertions, 1248 deletions
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 6f9c6faef24c..4c21bf6b8b2f 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile | |||
@@ -4,8 +4,8 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_BONDING) += bonding.o | 5 | obj-$(CONFIG_BONDING) += bonding.o |
6 | 6 | ||
7 | bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o | 7 | bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o |
8 | 8 | ||
9 | ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o | 9 | proc-$(CONFIG_PROC_FS) += bond_procfs.o |
10 | bonding-objs += $(ipv6-y) | 10 | bonding-objs += $(proc-y) |
11 | 11 | ||
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 0ddf4c66afe2..c7537abca4f2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -93,7 +93,7 @@ | |||
93 | // compare MAC addresses | 93 | // compare MAC addresses |
94 | #define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN) | 94 | #define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN) |
95 | 95 | ||
96 | static struct mac_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}}; | 96 | static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; |
97 | static u16 ad_ticks_per_sec; | 97 | static u16 ad_ticks_per_sec; |
98 | static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; | 98 | static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; |
99 | 99 | ||
@@ -129,9 +129,8 @@ static void ad_marker_response_received(struct bond_marker *marker, struct port | |||
129 | */ | 129 | */ |
130 | static inline struct bonding *__get_bond_by_port(struct port *port) | 130 | static inline struct bonding *__get_bond_by_port(struct port *port) |
131 | { | 131 | { |
132 | if (port->slave == NULL) { | 132 | if (port->slave == NULL) |
133 | return NULL; | 133 | return NULL; |
134 | } | ||
135 | 134 | ||
136 | return bond_get_bond_by_slave(port->slave); | 135 | return bond_get_bond_by_slave(port->slave); |
137 | } | 136 | } |
@@ -144,9 +143,8 @@ static inline struct bonding *__get_bond_by_port(struct port *port) | |||
144 | */ | 143 | */ |
145 | static inline struct port *__get_first_port(struct bonding *bond) | 144 | static inline struct port *__get_first_port(struct bonding *bond) |
146 | { | 145 | { |
147 | if (bond->slave_cnt == 0) { | 146 | if (bond->slave_cnt == 0) |
148 | return NULL; | 147 | return NULL; |
149 | } | ||
150 | 148 | ||
151 | return &(SLAVE_AD_INFO(bond->first_slave).port); | 149 | return &(SLAVE_AD_INFO(bond->first_slave).port); |
152 | } | 150 | } |
@@ -164,9 +162,8 @@ static inline struct port *__get_next_port(struct port *port) | |||
164 | struct slave *slave = port->slave; | 162 | struct slave *slave = port->slave; |
165 | 163 | ||
166 | // If there's no bond for this port, or this is the last slave | 164 | // If there's no bond for this port, or this is the last slave |
167 | if ((bond == NULL) || (slave->next == bond->first_slave)) { | 165 | if ((bond == NULL) || (slave->next == bond->first_slave)) |
168 | return NULL; | 166 | return NULL; |
169 | } | ||
170 | 167 | ||
171 | return &(SLAVE_AD_INFO(slave->next).port); | 168 | return &(SLAVE_AD_INFO(slave->next).port); |
172 | } | 169 | } |
@@ -183,9 +180,8 @@ static inline struct aggregator *__get_first_agg(struct port *port) | |||
183 | struct bonding *bond = __get_bond_by_port(port); | 180 | struct bonding *bond = __get_bond_by_port(port); |
184 | 181 | ||
185 | // If there's no bond for this port, or bond has no slaves | 182 | // If there's no bond for this port, or bond has no slaves |
186 | if ((bond == NULL) || (bond->slave_cnt == 0)) { | 183 | if ((bond == NULL) || (bond->slave_cnt == 0)) |
187 | return NULL; | 184 | return NULL; |
188 | } | ||
189 | 185 | ||
190 | return &(SLAVE_AD_INFO(bond->first_slave).aggregator); | 186 | return &(SLAVE_AD_INFO(bond->first_slave).aggregator); |
191 | } | 187 | } |
@@ -203,9 +199,8 @@ static inline struct aggregator *__get_next_agg(struct aggregator *aggregator) | |||
203 | struct bonding *bond = bond_get_bond_by_slave(slave); | 199 | struct bonding *bond = bond_get_bond_by_slave(slave); |
204 | 200 | ||
205 | // If there's no bond for this aggregator, or this is the last slave | 201 | // If there's no bond for this aggregator, or this is the last slave |
206 | if ((bond == NULL) || (slave->next == bond->first_slave)) { | 202 | if ((bond == NULL) || (slave->next == bond->first_slave)) |
207 | return NULL; | 203 | return NULL; |
208 | } | ||
209 | 204 | ||
210 | return &(SLAVE_AD_INFO(slave->next).aggregator); | 205 | return &(SLAVE_AD_INFO(slave->next).aggregator); |
211 | } | 206 | } |
@@ -240,9 +235,8 @@ static inline void __enable_port(struct port *port) | |||
240 | { | 235 | { |
241 | struct slave *slave = port->slave; | 236 | struct slave *slave = port->slave; |
242 | 237 | ||
243 | if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) { | 238 | if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) |
244 | bond_set_slave_active_flags(slave); | 239 | bond_set_slave_active_flags(slave); |
245 | } | ||
246 | } | 240 | } |
247 | 241 | ||
248 | /** | 242 | /** |
@@ -252,7 +246,7 @@ static inline void __enable_port(struct port *port) | |||
252 | */ | 246 | */ |
253 | static inline int __port_is_enabled(struct port *port) | 247 | static inline int __port_is_enabled(struct port *port) |
254 | { | 248 | { |
255 | return(port->slave->state == BOND_STATE_ACTIVE); | 249 | return bond_is_active_slave(port->slave); |
256 | } | 250 | } |
257 | 251 | ||
258 | /** | 252 | /** |
@@ -265,9 +259,8 @@ static inline u32 __get_agg_selection_mode(struct port *port) | |||
265 | { | 259 | { |
266 | struct bonding *bond = __get_bond_by_port(port); | 260 | struct bonding *bond = __get_bond_by_port(port); |
267 | 261 | ||
268 | if (bond == NULL) { | 262 | if (bond == NULL) |
269 | return BOND_AD_STABLE; | 263 | return BOND_AD_STABLE; |
270 | } | ||
271 | 264 | ||
272 | return BOND_AD_INFO(bond).agg_select_mode; | 265 | return BOND_AD_INFO(bond).agg_select_mode; |
273 | } | 266 | } |
@@ -281,31 +274,30 @@ static inline int __check_agg_selection_timer(struct port *port) | |||
281 | { | 274 | { |
282 | struct bonding *bond = __get_bond_by_port(port); | 275 | struct bonding *bond = __get_bond_by_port(port); |
283 | 276 | ||
284 | if (bond == NULL) { | 277 | if (bond == NULL) |
285 | return 0; | 278 | return 0; |
286 | } | ||
287 | 279 | ||
288 | return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; | 280 | return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; |
289 | } | 281 | } |
290 | 282 | ||
291 | /** | 283 | /** |
292 | * __get_rx_machine_lock - lock the port's RX machine | 284 | * __get_state_machine_lock - lock the port's state machines |
293 | * @port: the port we're looking at | 285 | * @port: the port we're looking at |
294 | * | 286 | * |
295 | */ | 287 | */ |
296 | static inline void __get_rx_machine_lock(struct port *port) | 288 | static inline void __get_state_machine_lock(struct port *port) |
297 | { | 289 | { |
298 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 290 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
299 | } | 291 | } |
300 | 292 | ||
301 | /** | 293 | /** |
302 | * __release_rx_machine_lock - unlock the port's RX machine | 294 | * __release_state_machine_lock - unlock the port's state machines |
303 | * @port: the port we're looking at | 295 | * @port: the port we're looking at |
304 | * | 296 | * |
305 | */ | 297 | */ |
306 | static inline void __release_rx_machine_lock(struct port *port) | 298 | static inline void __release_state_machine_lock(struct port *port) |
307 | { | 299 | { |
308 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 300 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
309 | } | 301 | } |
310 | 302 | ||
311 | /** | 303 | /** |
@@ -328,9 +320,9 @@ static u16 __get_link_speed(struct port *port) | |||
328 | * link down, it sets the speed to 0. | 320 | * link down, it sets the speed to 0. |
329 | * This is done in spite of the fact that the e100 driver reports 0 to be | 321 | * This is done in spite of the fact that the e100 driver reports 0 to be |
330 | * compatible with MVT in the future.*/ | 322 | * compatible with MVT in the future.*/ |
331 | if (slave->link != BOND_LINK_UP) { | 323 | if (slave->link != BOND_LINK_UP) |
332 | speed=0; | 324 | speed = 0; |
333 | } else { | 325 | else { |
334 | switch (slave->speed) { | 326 | switch (slave->speed) { |
335 | case SPEED_10: | 327 | case SPEED_10: |
336 | speed = AD_LINK_SPEED_BITMASK_10MBPS; | 328 | speed = AD_LINK_SPEED_BITMASK_10MBPS; |
@@ -375,18 +367,18 @@ static u8 __get_duplex(struct port *port) | |||
375 | 367 | ||
376 | // handling a special case: when the configuration starts with | 368 | // handling a special case: when the configuration starts with |
377 | // link down, it sets the duplex to 0. | 369 | // link down, it sets the duplex to 0. |
378 | if (slave->link != BOND_LINK_UP) { | 370 | if (slave->link != BOND_LINK_UP) |
379 | retval=0x0; | 371 | retval = 0x0; |
380 | } else { | 372 | else { |
381 | switch (slave->duplex) { | 373 | switch (slave->duplex) { |
382 | case DUPLEX_FULL: | 374 | case DUPLEX_FULL: |
383 | retval=0x1; | 375 | retval = 0x1; |
384 | pr_debug("Port %d Received status full duplex update from adapter\n", | 376 | pr_debug("Port %d Received status full duplex update from adapter\n", |
385 | port->actor_port_number); | 377 | port->actor_port_number); |
386 | break; | 378 | break; |
387 | case DUPLEX_HALF: | 379 | case DUPLEX_HALF: |
388 | default: | 380 | default: |
389 | retval=0x0; | 381 | retval = 0x0; |
390 | pr_debug("Port %d Received status NOT full duplex update from adapter\n", | 382 | pr_debug("Port %d Received status NOT full duplex update from adapter\n", |
391 | port->actor_port_number); | 383 | port->actor_port_number); |
392 | break; | 384 | break; |
@@ -396,14 +388,14 @@ static u8 __get_duplex(struct port *port) | |||
396 | } | 388 | } |
397 | 389 | ||
398 | /** | 390 | /** |
399 | * __initialize_port_locks - initialize a port's RX machine spinlock | 391 | * __initialize_port_locks - initialize a port's STATE machine spinlock |
400 | * @port: the port we're looking at | 392 | * @port: the port we're looking at |
401 | * | 393 | * |
402 | */ | 394 | */ |
403 | static inline void __initialize_port_locks(struct port *port) | 395 | static inline void __initialize_port_locks(struct port *port) |
404 | { | 396 | { |
405 | // make sure it isn't called twice | 397 | // make sure it isn't called twice |
406 | spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 398 | spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
407 | } | 399 | } |
408 | 400 | ||
409 | //conversions | 401 | //conversions |
@@ -419,15 +411,14 @@ static inline void __initialize_port_locks(struct port *port) | |||
419 | */ | 411 | */ |
420 | static u16 __ad_timer_to_ticks(u16 timer_type, u16 par) | 412 | static u16 __ad_timer_to_ticks(u16 timer_type, u16 par) |
421 | { | 413 | { |
422 | u16 retval=0; //to silence the compiler | 414 | u16 retval = 0; /* to silence the compiler */ |
423 | 415 | ||
424 | switch (timer_type) { | 416 | switch (timer_type) { |
425 | case AD_CURRENT_WHILE_TIMER: // for rx machine usage | 417 | case AD_CURRENT_WHILE_TIMER: // for rx machine usage |
426 | if (par) { // for short or long timeout | 418 | if (par) |
427 | retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout | 419 | retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout |
428 | } else { | 420 | else |
429 | retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout | 421 | retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout |
430 | } | ||
431 | break; | 422 | break; |
432 | case AD_ACTOR_CHURN_TIMER: // for local churn machine | 423 | case AD_ACTOR_CHURN_TIMER: // for local churn machine |
433 | retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); | 424 | retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); |
@@ -519,11 +510,11 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port) | |||
519 | port->actor_oper_port_state &= ~AD_STATE_DEFAULTED; | 510 | port->actor_oper_port_state &= ~AD_STATE_DEFAULTED; |
520 | 511 | ||
521 | // set the partner sync. to on if the partner is sync. and the port is matched | 512 | // set the partner sync. to on if the partner is sync. and the port is matched |
522 | if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) { | 513 | if ((port->sm_vars & AD_PORT_MATCHED) |
514 | && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) | ||
523 | partner->port_state |= AD_STATE_SYNCHRONIZATION; | 515 | partner->port_state |= AD_STATE_SYNCHRONIZATION; |
524 | } else { | 516 | else |
525 | partner->port_state &= ~AD_STATE_SYNCHRONIZATION; | 517 | partner->port_state &= ~AD_STATE_SYNCHRONIZATION; |
526 | } | ||
527 | } | 518 | } |
528 | } | 519 | } |
529 | 520 | ||
@@ -653,7 +644,7 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port) | |||
653 | */ | 644 | */ |
654 | static void __attach_bond_to_agg(struct port *port) | 645 | static void __attach_bond_to_agg(struct port *port) |
655 | { | 646 | { |
656 | port=NULL; // just to satisfy the compiler | 647 | port = NULL; /* just to satisfy the compiler */ |
657 | // This function does nothing since the parser/multiplexer of the receive | 648 | // This function does nothing since the parser/multiplexer of the receive |
658 | // and the parser/multiplexer of the aggregator are already combined | 649 | // and the parser/multiplexer of the aggregator are already combined |
659 | } | 650 | } |
@@ -668,7 +659,7 @@ static void __attach_bond_to_agg(struct port *port) | |||
668 | */ | 659 | */ |
669 | static void __detach_bond_from_agg(struct port *port) | 660 | static void __detach_bond_from_agg(struct port *port) |
670 | { | 661 | { |
671 | port=NULL; // just to satisfy the compiler | 662 | port = NULL; /* just to satisfy the compiler */ |
672 | // This function does nothing sience the parser/multiplexer of the receive | 663 | // This function does nothing sience the parser/multiplexer of the receive |
673 | // and the parser/multiplexer of the aggregator are already combined | 664 | // and the parser/multiplexer of the aggregator are already combined |
674 | } | 665 | } |
@@ -685,7 +676,9 @@ static int __agg_ports_are_ready(struct aggregator *aggregator) | |||
685 | 676 | ||
686 | if (aggregator) { | 677 | if (aggregator) { |
687 | // scan all ports in this aggregator to verfy if they are all ready | 678 | // scan all ports in this aggregator to verfy if they are all ready |
688 | for (port=aggregator->lag_ports; port; port=port->next_port_in_aggregator) { | 679 | for (port = aggregator->lag_ports; |
680 | port; | ||
681 | port = port->next_port_in_aggregator) { | ||
689 | if (!(port->sm_vars & AD_PORT_READY_N)) { | 682 | if (!(port->sm_vars & AD_PORT_READY_N)) { |
690 | retval = 0; | 683 | retval = 0; |
691 | break; | 684 | break; |
@@ -706,12 +699,12 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val) | |||
706 | { | 699 | { |
707 | struct port *port; | 700 | struct port *port; |
708 | 701 | ||
709 | for (port=aggregator->lag_ports; port; port=port->next_port_in_aggregator) { | 702 | for (port = aggregator->lag_ports; port; |
710 | if (val) { | 703 | port = port->next_port_in_aggregator) { |
704 | if (val) | ||
711 | port->sm_vars |= AD_PORT_READY; | 705 | port->sm_vars |= AD_PORT_READY; |
712 | } else { | 706 | else |
713 | port->sm_vars &= ~AD_PORT_READY; | 707 | port->sm_vars &= ~AD_PORT_READY; |
714 | } | ||
715 | } | 708 | } |
716 | } | 709 | } |
717 | 710 | ||
@@ -722,12 +715,10 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val) | |||
722 | */ | 715 | */ |
723 | static u32 __get_agg_bandwidth(struct aggregator *aggregator) | 716 | static u32 __get_agg_bandwidth(struct aggregator *aggregator) |
724 | { | 717 | { |
725 | u32 bandwidth=0; | 718 | u32 bandwidth = 0; |
726 | u32 basic_speed; | ||
727 | 719 | ||
728 | if (aggregator->num_of_ports) { | 720 | if (aggregator->num_of_ports) { |
729 | basic_speed = __get_link_speed(aggregator->lag_ports); | 721 | switch (__get_link_speed(aggregator->lag_ports)) { |
730 | switch (basic_speed) { | ||
731 | case AD_LINK_SPEED_BITMASK_1MBPS: | 722 | case AD_LINK_SPEED_BITMASK_1MBPS: |
732 | bandwidth = aggregator->num_of_ports; | 723 | bandwidth = aggregator->num_of_ports; |
733 | break; | 724 | break; |
@@ -744,7 +735,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) | |||
744 | bandwidth = aggregator->num_of_ports * 10000; | 735 | bandwidth = aggregator->num_of_ports * 10000; |
745 | break; | 736 | break; |
746 | default: | 737 | default: |
747 | bandwidth=0; // to silent the compilor .... | 738 | bandwidth = 0; /*to silence the compiler ....*/ |
748 | } | 739 | } |
749 | } | 740 | } |
750 | return bandwidth; | 741 | return bandwidth; |
@@ -835,9 +826,8 @@ static int ad_lacpdu_send(struct port *port) | |||
835 | int length = sizeof(struct lacpdu_header); | 826 | int length = sizeof(struct lacpdu_header); |
836 | 827 | ||
837 | skb = dev_alloc_skb(length); | 828 | skb = dev_alloc_skb(length); |
838 | if (!skb) { | 829 | if (!skb) |
839 | return -ENOMEM; | 830 | return -ENOMEM; |
840 | } | ||
841 | 831 | ||
842 | skb->dev = slave->dev; | 832 | skb->dev = slave->dev; |
843 | skb_reset_mac_header(skb); | 833 | skb_reset_mac_header(skb); |
@@ -848,7 +838,7 @@ static int ad_lacpdu_send(struct port *port) | |||
848 | lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); | 838 | lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); |
849 | 839 | ||
850 | memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); | 840 | memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); |
851 | /* Note: source addres is set to be the member's PERMANENT address, | 841 | /* Note: source address is set to be the member's PERMANENT address, |
852 | because we use it to identify loopback lacpdus in receive. */ | 842 | because we use it to identify loopback lacpdus in receive. */ |
853 | memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); | 843 | memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); |
854 | lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; | 844 | lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; |
@@ -876,9 +866,8 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) | |||
876 | int length = sizeof(struct bond_marker_header); | 866 | int length = sizeof(struct bond_marker_header); |
877 | 867 | ||
878 | skb = dev_alloc_skb(length + 16); | 868 | skb = dev_alloc_skb(length + 16); |
879 | if (!skb) { | 869 | if (!skb) |
880 | return -ENOMEM; | 870 | return -ENOMEM; |
881 | } | ||
882 | 871 | ||
883 | skb_reserve(skb, 16); | 872 | skb_reserve(skb, 16); |
884 | 873 | ||
@@ -890,7 +879,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) | |||
890 | marker_header = (struct bond_marker_header *)skb_put(skb, length); | 879 | marker_header = (struct bond_marker_header *)skb_put(skb, length); |
891 | 880 | ||
892 | memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); | 881 | memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); |
893 | /* Note: source addres is set to be the member's PERMANENT address, | 882 | /* Note: source address is set to be the member's PERMANENT address, |
894 | because we use it to identify loopback MARKERs in receive. */ | 883 | because we use it to identify loopback MARKERs in receive. */ |
895 | memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); | 884 | memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); |
896 | marker_header->hdr.h_proto = PKT_TYPE_LACPDU; | 885 | marker_header->hdr.h_proto = PKT_TYPE_LACPDU; |
@@ -919,9 +908,10 @@ static void ad_mux_machine(struct port *port) | |||
919 | } else { | 908 | } else { |
920 | switch (port->sm_mux_state) { | 909 | switch (port->sm_mux_state) { |
921 | case AD_MUX_DETACHED: | 910 | case AD_MUX_DETACHED: |
922 | if ((port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if SELECTED or STANDBY | 911 | if ((port->sm_vars & AD_PORT_SELECTED) |
912 | || (port->sm_vars & AD_PORT_STANDBY)) | ||
913 | /* if SELECTED or STANDBY */ | ||
923 | port->sm_mux_state = AD_MUX_WAITING; // next state | 914 | port->sm_mux_state = AD_MUX_WAITING; // next state |
924 | } | ||
925 | break; | 915 | break; |
926 | case AD_MUX_WAITING: | 916 | case AD_MUX_WAITING: |
927 | // if SELECTED == FALSE return to DETACH state | 917 | // if SELECTED == FALSE return to DETACH state |
@@ -935,18 +925,18 @@ static void ad_mux_machine(struct port *port) | |||
935 | } | 925 | } |
936 | 926 | ||
937 | // check if the wait_while_timer expired | 927 | // check if the wait_while_timer expired |
938 | if (port->sm_mux_timer_counter && !(--port->sm_mux_timer_counter)) { | 928 | if (port->sm_mux_timer_counter |
929 | && !(--port->sm_mux_timer_counter)) | ||
939 | port->sm_vars |= AD_PORT_READY_N; | 930 | port->sm_vars |= AD_PORT_READY_N; |
940 | } | ||
941 | 931 | ||
942 | // in order to withhold the selection logic to check all ports READY_N value | 932 | // in order to withhold the selection logic to check all ports READY_N value |
943 | // every callback cycle to update ready variable, we check READY_N and update READY here | 933 | // every callback cycle to update ready variable, we check READY_N and update READY here |
944 | __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); | 934 | __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); |
945 | 935 | ||
946 | // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state | 936 | // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state |
947 | if ((port->sm_vars & AD_PORT_READY) && !port->sm_mux_timer_counter) { | 937 | if ((port->sm_vars & AD_PORT_READY) |
938 | && !port->sm_mux_timer_counter) | ||
948 | port->sm_mux_state = AD_MUX_ATTACHED; // next state | 939 | port->sm_mux_state = AD_MUX_ATTACHED; // next state |
949 | } | ||
950 | break; | 940 | break; |
951 | case AD_MUX_ATTACHED: | 941 | case AD_MUX_ATTACHED: |
952 | // check also if agg_select_timer expired(so the edable port will take place only after this timer) | 942 | // check also if agg_select_timer expired(so the edable port will take place only after this timer) |
@@ -1033,21 +1023,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1033 | { | 1023 | { |
1034 | rx_states_t last_state; | 1024 | rx_states_t last_state; |
1035 | 1025 | ||
1036 | // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback) | ||
1037 | __get_rx_machine_lock(port); | ||
1038 | |||
1039 | // keep current State Machine state to compare later if it was changed | 1026 | // keep current State Machine state to compare later if it was changed |
1040 | last_state = port->sm_rx_state; | 1027 | last_state = port->sm_rx_state; |
1041 | 1028 | ||
1042 | // check if state machine should change state | 1029 | // check if state machine should change state |
1043 | // first, check if port was reinitialized | 1030 | // first, check if port was reinitialized |
1044 | if (port->sm_vars & AD_PORT_BEGIN) { | 1031 | if (port->sm_vars & AD_PORT_BEGIN) |
1045 | port->sm_rx_state = AD_RX_INITIALIZE; // next state | 1032 | /* next state */ |
1046 | } | 1033 | port->sm_rx_state = AD_RX_INITIALIZE; |
1047 | // check if port is not enabled | 1034 | // check if port is not enabled |
1048 | else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) { | 1035 | else if (!(port->sm_vars & AD_PORT_BEGIN) |
1049 | port->sm_rx_state = AD_RX_PORT_DISABLED; // next state | 1036 | && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) |
1050 | } | 1037 | /* next state */ |
1038 | port->sm_rx_state = AD_RX_PORT_DISABLED; | ||
1051 | // check if new lacpdu arrived | 1039 | // check if new lacpdu arrived |
1052 | else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { | 1040 | else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { |
1053 | port->sm_rx_timer_counter = 0; // zero timer | 1041 | port->sm_rx_timer_counter = 0; // zero timer |
@@ -1069,13 +1057,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1069 | // if no lacpdu arrived and no timer is on | 1057 | // if no lacpdu arrived and no timer is on |
1070 | switch (port->sm_rx_state) { | 1058 | switch (port->sm_rx_state) { |
1071 | case AD_RX_PORT_DISABLED: | 1059 | case AD_RX_PORT_DISABLED: |
1072 | if (port->sm_vars & AD_PORT_MOVED) { | 1060 | if (port->sm_vars & AD_PORT_MOVED) |
1073 | port->sm_rx_state = AD_RX_INITIALIZE; // next state | 1061 | port->sm_rx_state = AD_RX_INITIALIZE; // next state |
1074 | } else if (port->is_enabled && (port->sm_vars & AD_PORT_LACP_ENABLED)) { | 1062 | else if (port->is_enabled |
1063 | && (port->sm_vars | ||
1064 | & AD_PORT_LACP_ENABLED)) | ||
1075 | port->sm_rx_state = AD_RX_EXPIRED; // next state | 1065 | port->sm_rx_state = AD_RX_EXPIRED; // next state |
1076 | } else if (port->is_enabled && ((port->sm_vars & AD_PORT_LACP_ENABLED) == 0)) { | 1066 | else if (port->is_enabled |
1067 | && ((port->sm_vars | ||
1068 | & AD_PORT_LACP_ENABLED) == 0)) | ||
1077 | port->sm_rx_state = AD_RX_LACP_DISABLED; // next state | 1069 | port->sm_rx_state = AD_RX_LACP_DISABLED; // next state |
1078 | } | ||
1079 | break; | 1070 | break; |
1080 | default: //to silence the compiler | 1071 | default: //to silence the compiler |
1081 | break; | 1072 | break; |
@@ -1091,11 +1082,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1091 | port->sm_rx_state); | 1082 | port->sm_rx_state); |
1092 | switch (port->sm_rx_state) { | 1083 | switch (port->sm_rx_state) { |
1093 | case AD_RX_INITIALIZE: | 1084 | case AD_RX_INITIALIZE: |
1094 | if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) { | 1085 | if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) |
1095 | port->sm_vars &= ~AD_PORT_LACP_ENABLED; | 1086 | port->sm_vars &= ~AD_PORT_LACP_ENABLED; |
1096 | } else { | 1087 | else |
1097 | port->sm_vars |= AD_PORT_LACP_ENABLED; | 1088 | port->sm_vars |= AD_PORT_LACP_ENABLED; |
1098 | } | ||
1099 | port->sm_vars &= ~AD_PORT_SELECTED; | 1089 | port->sm_vars &= ~AD_PORT_SELECTED; |
1100 | __record_default(port); | 1090 | __record_default(port); |
1101 | port->actor_oper_port_state &= ~AD_STATE_EXPIRED; | 1091 | port->actor_oper_port_state &= ~AD_STATE_EXPIRED; |
@@ -1138,7 +1128,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1138 | pr_err("%s: An illegal loopback occurred on adapter (%s).\n" | 1128 | pr_err("%s: An illegal loopback occurred on adapter (%s).\n" |
1139 | "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", | 1129 | "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", |
1140 | port->slave->dev->master->name, port->slave->dev->name); | 1130 | port->slave->dev->master->name, port->slave->dev->name); |
1141 | __release_rx_machine_lock(port); | ||
1142 | return; | 1131 | return; |
1143 | } | 1132 | } |
1144 | __update_selected(lacpdu, port); | 1133 | __update_selected(lacpdu, port); |
@@ -1149,15 +1138,15 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1149 | // verify that if the aggregator is enabled, the port is enabled too. | 1138 | // verify that if the aggregator is enabled, the port is enabled too. |
1150 | //(because if the link goes down for a short time, the 802.3ad will not | 1139 | //(because if the link goes down for a short time, the 802.3ad will not |
1151 | // catch it, and the port will continue to be disabled) | 1140 | // catch it, and the port will continue to be disabled) |
1152 | if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { | 1141 | if (port->aggregator |
1142 | && port->aggregator->is_active | ||
1143 | && !__port_is_enabled(port)) | ||
1153 | __enable_port(port); | 1144 | __enable_port(port); |
1154 | } | ||
1155 | break; | 1145 | break; |
1156 | default: //to silence the compiler | 1146 | default: //to silence the compiler |
1157 | break; | 1147 | break; |
1158 | } | 1148 | } |
1159 | } | 1149 | } |
1160 | __release_rx_machine_lock(port); | ||
1161 | } | 1150 | } |
1162 | 1151 | ||
1163 | /** | 1152 | /** |
@@ -1183,7 +1172,8 @@ static void ad_tx_machine(struct port *port) | |||
1183 | } | 1172 | } |
1184 | } | 1173 | } |
1185 | // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND | 1174 | // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND |
1186 | port->sm_tx_timer_counter=ad_ticks_per_sec/AD_MAX_TX_IN_SECOND; | 1175 | port->sm_tx_timer_counter = |
1176 | ad_ticks_per_sec/AD_MAX_TX_IN_SECOND; | ||
1187 | } | 1177 | } |
1188 | } | 1178 | } |
1189 | 1179 | ||
@@ -1216,9 +1206,9 @@ static void ad_periodic_machine(struct port *port) | |||
1216 | // If not expired, check if there is some new timeout parameter from the partner state | 1206 | // If not expired, check if there is some new timeout parameter from the partner state |
1217 | switch (port->sm_periodic_state) { | 1207 | switch (port->sm_periodic_state) { |
1218 | case AD_FAST_PERIODIC: | 1208 | case AD_FAST_PERIODIC: |
1219 | if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { | 1209 | if (!(port->partner_oper.port_state |
1210 | & AD_STATE_LACP_TIMEOUT)) | ||
1220 | port->sm_periodic_state = AD_SLOW_PERIODIC; // next state | 1211 | port->sm_periodic_state = AD_SLOW_PERIODIC; // next state |
1221 | } | ||
1222 | break; | 1212 | break; |
1223 | case AD_SLOW_PERIODIC: | 1213 | case AD_SLOW_PERIODIC: |
1224 | if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { | 1214 | if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { |
@@ -1237,11 +1227,11 @@ static void ad_periodic_machine(struct port *port) | |||
1237 | port->sm_periodic_state = AD_FAST_PERIODIC; // next state | 1227 | port->sm_periodic_state = AD_FAST_PERIODIC; // next state |
1238 | break; | 1228 | break; |
1239 | case AD_PERIODIC_TX: | 1229 | case AD_PERIODIC_TX: |
1240 | if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { | 1230 | if (!(port->partner_oper.port_state |
1231 | & AD_STATE_LACP_TIMEOUT)) | ||
1241 | port->sm_periodic_state = AD_SLOW_PERIODIC; // next state | 1232 | port->sm_periodic_state = AD_SLOW_PERIODIC; // next state |
1242 | } else { | 1233 | else |
1243 | port->sm_periodic_state = AD_FAST_PERIODIC; // next state | 1234 | port->sm_periodic_state = AD_FAST_PERIODIC; // next state |
1244 | } | ||
1245 | break; | 1235 | break; |
1246 | default: //to silence the compiler | 1236 | default: //to silence the compiler |
1247 | break; | 1237 | break; |
@@ -1287,35 +1277,37 @@ static void ad_port_selection_logic(struct port *port) | |||
1287 | int found = 0; | 1277 | int found = 0; |
1288 | 1278 | ||
1289 | // if the port is already Selected, do nothing | 1279 | // if the port is already Selected, do nothing |
1290 | if (port->sm_vars & AD_PORT_SELECTED) { | 1280 | if (port->sm_vars & AD_PORT_SELECTED) |
1291 | return; | 1281 | return; |
1292 | } | ||
1293 | 1282 | ||
1294 | // if the port is connected to other aggregator, detach it | 1283 | // if the port is connected to other aggregator, detach it |
1295 | if (port->aggregator) { | 1284 | if (port->aggregator) { |
1296 | // detach the port from its former aggregator | 1285 | // detach the port from its former aggregator |
1297 | temp_aggregator=port->aggregator; | 1286 | temp_aggregator = port->aggregator; |
1298 | for (curr_port=temp_aggregator->lag_ports; curr_port; last_port=curr_port, curr_port=curr_port->next_port_in_aggregator) { | 1287 | for (curr_port = temp_aggregator->lag_ports; curr_port; |
1288 | last_port = curr_port, | ||
1289 | curr_port = curr_port->next_port_in_aggregator) { | ||
1299 | if (curr_port == port) { | 1290 | if (curr_port == port) { |
1300 | temp_aggregator->num_of_ports--; | 1291 | temp_aggregator->num_of_ports--; |
1301 | if (!last_port) {// if it is the first port attached to the aggregator | 1292 | if (!last_port) {// if it is the first port attached to the aggregator |
1302 | temp_aggregator->lag_ports=port->next_port_in_aggregator; | 1293 | temp_aggregator->lag_ports = |
1294 | port->next_port_in_aggregator; | ||
1303 | } else {// not the first port attached to the aggregator | 1295 | } else {// not the first port attached to the aggregator |
1304 | last_port->next_port_in_aggregator=port->next_port_in_aggregator; | 1296 | last_port->next_port_in_aggregator = |
1297 | port->next_port_in_aggregator; | ||
1305 | } | 1298 | } |
1306 | 1299 | ||
1307 | // clear the port's relations to this aggregator | 1300 | // clear the port's relations to this aggregator |
1308 | port->aggregator = NULL; | 1301 | port->aggregator = NULL; |
1309 | port->next_port_in_aggregator=NULL; | 1302 | port->next_port_in_aggregator = NULL; |
1310 | port->actor_port_aggregator_identifier=0; | 1303 | port->actor_port_aggregator_identifier = 0; |
1311 | 1304 | ||
1312 | pr_debug("Port %d left LAG %d\n", | 1305 | pr_debug("Port %d left LAG %d\n", |
1313 | port->actor_port_number, | 1306 | port->actor_port_number, |
1314 | temp_aggregator->aggregator_identifier); | 1307 | temp_aggregator->aggregator_identifier); |
1315 | // if the aggregator is empty, clear its parameters, and set it ready to be attached | 1308 | // if the aggregator is empty, clear its parameters, and set it ready to be attached |
1316 | if (!temp_aggregator->lag_ports) { | 1309 | if (!temp_aggregator->lag_ports) |
1317 | ad_clear_agg(temp_aggregator); | 1310 | ad_clear_agg(temp_aggregator); |
1318 | } | ||
1319 | break; | 1311 | break; |
1320 | } | 1312 | } |
1321 | } | 1313 | } |
@@ -1333,9 +1325,8 @@ static void ad_port_selection_logic(struct port *port) | |||
1333 | 1325 | ||
1334 | // keep a free aggregator for later use(if needed) | 1326 | // keep a free aggregator for later use(if needed) |
1335 | if (!aggregator->lag_ports) { | 1327 | if (!aggregator->lag_ports) { |
1336 | if (!free_aggregator) { | 1328 | if (!free_aggregator) |
1337 | free_aggregator=aggregator; | 1329 | free_aggregator = aggregator; |
1338 | } | ||
1339 | continue; | 1330 | continue; |
1340 | } | 1331 | } |
1341 | // check if current aggregator suits us | 1332 | // check if current aggregator suits us |
@@ -1350,10 +1341,11 @@ static void ad_port_selection_logic(struct port *port) | |||
1350 | ) { | 1341 | ) { |
1351 | // attach to the founded aggregator | 1342 | // attach to the founded aggregator |
1352 | port->aggregator = aggregator; | 1343 | port->aggregator = aggregator; |
1353 | port->actor_port_aggregator_identifier=port->aggregator->aggregator_identifier; | 1344 | port->actor_port_aggregator_identifier = |
1354 | port->next_port_in_aggregator=aggregator->lag_ports; | 1345 | port->aggregator->aggregator_identifier; |
1346 | port->next_port_in_aggregator = aggregator->lag_ports; | ||
1355 | port->aggregator->num_of_ports++; | 1347 | port->aggregator->num_of_ports++; |
1356 | aggregator->lag_ports=port; | 1348 | aggregator->lag_ports = port; |
1357 | pr_debug("Port %d joined LAG %d(existing LAG)\n", | 1349 | pr_debug("Port %d joined LAG %d(existing LAG)\n", |
1358 | port->actor_port_number, | 1350 | port->actor_port_number, |
1359 | port->aggregator->aggregator_identifier); | 1351 | port->aggregator->aggregator_identifier); |
@@ -1370,20 +1362,23 @@ static void ad_port_selection_logic(struct port *port) | |||
1370 | if (free_aggregator) { | 1362 | if (free_aggregator) { |
1371 | // assign port a new aggregator | 1363 | // assign port a new aggregator |
1372 | port->aggregator = free_aggregator; | 1364 | port->aggregator = free_aggregator; |
1373 | port->actor_port_aggregator_identifier=port->aggregator->aggregator_identifier; | 1365 | port->actor_port_aggregator_identifier = |
1366 | port->aggregator->aggregator_identifier; | ||
1374 | 1367 | ||
1375 | // update the new aggregator's parameters | 1368 | // update the new aggregator's parameters |
1376 | // if port was responsed from the end-user | 1369 | // if port was responsed from the end-user |
1377 | if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) {// if port is full duplex | 1370 | if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) |
1371 | /* if port is full duplex */ | ||
1378 | port->aggregator->is_individual = false; | 1372 | port->aggregator->is_individual = false; |
1379 | } else { | 1373 | else |
1380 | port->aggregator->is_individual = true; | 1374 | port->aggregator->is_individual = true; |
1381 | } | ||
1382 | 1375 | ||
1383 | port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; | 1376 | port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; |
1384 | port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; | 1377 | port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; |
1385 | port->aggregator->partner_system=port->partner_oper.system; | 1378 | port->aggregator->partner_system = |
1386 | port->aggregator->partner_system_priority = port->partner_oper.system_priority; | 1379 | port->partner_oper.system; |
1380 | port->aggregator->partner_system_priority = | ||
1381 | port->partner_oper.system_priority; | ||
1387 | port->aggregator->partner_oper_aggregator_key = port->partner_oper.key; | 1382 | port->aggregator->partner_oper_aggregator_key = port->partner_oper.key; |
1388 | port->aggregator->receive_state = 1; | 1383 | port->aggregator->receive_state = 1; |
1389 | port->aggregator->transmit_state = 1; | 1384 | port->aggregator->transmit_state = 1; |
@@ -1485,8 +1480,11 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, | |||
1485 | 1480 | ||
1486 | static int agg_device_up(const struct aggregator *agg) | 1481 | static int agg_device_up(const struct aggregator *agg) |
1487 | { | 1482 | { |
1488 | return (netif_running(agg->slave->dev) && | 1483 | struct port *port = agg->lag_ports; |
1489 | netif_carrier_ok(agg->slave->dev)); | 1484 | if (!port) |
1485 | return 0; | ||
1486 | return (netif_running(port->slave->dev) && | ||
1487 | netif_carrier_ok(port->slave->dev)); | ||
1490 | } | 1488 | } |
1491 | 1489 | ||
1492 | /** | 1490 | /** |
@@ -1704,9 +1702,8 @@ static void ad_initialize_port(struct port *port, int lacp_fast) | |||
1704 | port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; | 1702 | port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; |
1705 | port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; | 1703 | port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; |
1706 | 1704 | ||
1707 | if (lacp_fast) { | 1705 | if (lacp_fast) |
1708 | port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; | 1706 | port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; |
1709 | } | ||
1710 | 1707 | ||
1711 | memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); | 1708 | memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); |
1712 | memcpy(&port->partner_oper, &tmpl, sizeof(tmpl)); | 1709 | memcpy(&port->partner_oper, &tmpl, sizeof(tmpl)); |
@@ -1785,13 +1782,16 @@ static void ad_marker_info_send(struct port *port) | |||
1785 | marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8)); | 1782 | marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8)); |
1786 | marker.requester_system = port->actor_system; | 1783 | marker.requester_system = port->actor_system; |
1787 | // convert requester_port(u32) to Big Endian | 1784 | // convert requester_port(u32) to Big Endian |
1788 | marker.requester_transaction_id = (((++port->transaction_id & 0xFF) << 24) |((port->transaction_id & 0xFF00) << 8) |((port->transaction_id & 0xFF0000) >> 8) |((port->transaction_id & 0xFF000000) >> 24)); | 1785 | marker.requester_transaction_id = |
1786 | (((++port->transaction_id & 0xFF) << 24) | ||
1787 | | ((port->transaction_id & 0xFF00) << 8) | ||
1788 | | ((port->transaction_id & 0xFF0000) >> 8) | ||
1789 | | ((port->transaction_id & 0xFF000000) >> 24)); | ||
1789 | marker.pad = 0; | 1790 | marker.pad = 0; |
1790 | marker.tlv_type_terminator = 0x00; | 1791 | marker.tlv_type_terminator = 0x00; |
1791 | marker.terminator_length = 0x00; | 1792 | marker.terminator_length = 0x00; |
1792 | for (index=0; index<90; index++) { | 1793 | for (index = 0; index < 90; index++) |
1793 | marker.reserved_90[index]=0; | 1794 | marker.reserved_90[index] = 0; |
1794 | } | ||
1795 | 1795 | ||
1796 | // send the marker information | 1796 | // send the marker information |
1797 | if (ad_marker_send(port, &marker) >= 0) { | 1797 | if (ad_marker_send(port, &marker) >= 0) { |
@@ -1816,7 +1816,7 @@ static void ad_marker_info_received(struct bond_marker *marker_info, | |||
1816 | //marker = *marker_info; | 1816 | //marker = *marker_info; |
1817 | memcpy(&marker, marker_info, sizeof(struct bond_marker)); | 1817 | memcpy(&marker, marker_info, sizeof(struct bond_marker)); |
1818 | // change the marker subtype to marker response | 1818 | // change the marker subtype to marker response |
1819 | marker.tlv_type=AD_MARKER_RESPONSE_SUBTYPE; | 1819 | marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE; |
1820 | // send the marker response | 1820 | // send the marker response |
1821 | 1821 | ||
1822 | if (ad_marker_send(port, &marker) >= 0) { | 1822 | if (ad_marker_send(port, &marker) >= 0) { |
@@ -1837,8 +1837,8 @@ static void ad_marker_info_received(struct bond_marker *marker_info, | |||
1837 | static void ad_marker_response_received(struct bond_marker *marker, | 1837 | static void ad_marker_response_received(struct bond_marker *marker, |
1838 | struct port *port) | 1838 | struct port *port) |
1839 | { | 1839 | { |
1840 | marker=NULL; // just to satisfy the compiler | 1840 | marker = NULL; /* just to satisfy the compiler */ |
1841 | port=NULL; // just to satisfy the compiler | 1841 | port = NULL; /* just to satisfy the compiler */ |
1842 | // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW | 1842 | // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW |
1843 | } | 1843 | } |
1844 | 1844 | ||
@@ -1912,7 +1912,7 @@ int bond_3ad_bind_slave(struct slave *slave) | |||
1912 | return -1; | 1912 | return -1; |
1913 | } | 1913 | } |
1914 | 1914 | ||
1915 | //check that the slave has not been intialized yet. | 1915 | //check that the slave has not been initialized yet. |
1916 | if (SLAVE_AD_INFO(slave).port.slave != slave) { | 1916 | if (SLAVE_AD_INFO(slave).port.slave != slave) { |
1917 | 1917 | ||
1918 | // port initialization | 1918 | // port initialization |
@@ -1932,9 +1932,8 @@ int bond_3ad_bind_slave(struct slave *slave) | |||
1932 | port->actor_admin_port_key |= (__get_link_speed(port) << 1); | 1932 | port->actor_admin_port_key |= (__get_link_speed(port) << 1); |
1933 | port->actor_oper_port_key = port->actor_admin_port_key; | 1933 | port->actor_oper_port_key = port->actor_admin_port_key; |
1934 | // if the port is not full duplex, then the port should be not lacp Enabled | 1934 | // if the port is not full duplex, then the port should be not lacp Enabled |
1935 | if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) { | 1935 | if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) |
1936 | port->sm_vars &= ~AD_PORT_LACP_ENABLED; | 1936 | port->sm_vars &= ~AD_PORT_LACP_ENABLED; |
1937 | } | ||
1938 | // actor system is the bond's system | 1937 | // actor system is the bond's system |
1939 | port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; | 1938 | port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; |
1940 | // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) | 1939 | // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) |
@@ -2006,9 +2005,10 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
2006 | new_aggregator = __get_first_agg(port); | 2005 | new_aggregator = __get_first_agg(port); |
2007 | for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) { | 2006 | for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) { |
2008 | // if the new aggregator is empty, or it is connected to our port only | 2007 | // if the new aggregator is empty, or it is connected to our port only |
2009 | if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) { | 2008 | if (!new_aggregator->lag_ports |
2009 | || ((new_aggregator->lag_ports == port) | ||
2010 | && !new_aggregator->lag_ports->next_port_in_aggregator)) | ||
2010 | break; | 2011 | break; |
2011 | } | ||
2012 | } | 2012 | } |
2013 | // if new aggregator found, copy the aggregator's parameters | 2013 | // if new aggregator found, copy the aggregator's parameters |
2014 | // and connect the related lag_ports to the new aggregator | 2014 | // and connect the related lag_ports to the new aggregator |
@@ -2037,17 +2037,17 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
2037 | new_aggregator->num_of_ports = aggregator->num_of_ports; | 2037 | new_aggregator->num_of_ports = aggregator->num_of_ports; |
2038 | 2038 | ||
2039 | // update the information that is written on the ports about the aggregator | 2039 | // update the information that is written on the ports about the aggregator |
2040 | for (temp_port=aggregator->lag_ports; temp_port; temp_port=temp_port->next_port_in_aggregator) { | 2040 | for (temp_port = aggregator->lag_ports; temp_port; |
2041 | temp_port->aggregator=new_aggregator; | 2041 | temp_port = temp_port->next_port_in_aggregator) { |
2042 | temp_port->aggregator = new_aggregator; | ||
2042 | temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier; | 2043 | temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier; |
2043 | } | 2044 | } |
2044 | 2045 | ||
2045 | // clear the aggregator | 2046 | // clear the aggregator |
2046 | ad_clear_agg(aggregator); | 2047 | ad_clear_agg(aggregator); |
2047 | 2048 | ||
2048 | if (select_new_active_agg) { | 2049 | if (select_new_active_agg) |
2049 | ad_agg_selection_logic(__get_first_agg(port)); | 2050 | ad_agg_selection_logic(__get_first_agg(port)); |
2050 | } | ||
2051 | } else { | 2051 | } else { |
2052 | pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n", | 2052 | pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n", |
2053 | slave->dev->master->name); | 2053 | slave->dev->master->name); |
@@ -2071,15 +2071,16 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
2071 | for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) { | 2071 | for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) { |
2072 | prev_port = NULL; | 2072 | prev_port = NULL; |
2073 | // search the port in the aggregator's related ports | 2073 | // search the port in the aggregator's related ports |
2074 | for (temp_port=temp_aggregator->lag_ports; temp_port; prev_port=temp_port, temp_port=temp_port->next_port_in_aggregator) { | 2074 | for (temp_port = temp_aggregator->lag_ports; temp_port; |
2075 | prev_port = temp_port, | ||
2076 | temp_port = temp_port->next_port_in_aggregator) { | ||
2075 | if (temp_port == port) { // the aggregator found - detach the port from this aggregator | 2077 | if (temp_port == port) { // the aggregator found - detach the port from this aggregator |
2076 | if (prev_port) { | 2078 | if (prev_port) |
2077 | prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator; | 2079 | prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator; |
2078 | } else { | 2080 | else |
2079 | temp_aggregator->lag_ports = temp_port->next_port_in_aggregator; | 2081 | temp_aggregator->lag_ports = temp_port->next_port_in_aggregator; |
2080 | } | ||
2081 | temp_aggregator->num_of_ports--; | 2082 | temp_aggregator->num_of_ports--; |
2082 | if (temp_aggregator->num_of_ports==0) { | 2083 | if (temp_aggregator->num_of_ports == 0) { |
2083 | select_new_active_agg = temp_aggregator->is_active; | 2084 | select_new_active_agg = temp_aggregator->is_active; |
2084 | // clear the aggregator | 2085 | // clear the aggregator |
2085 | ad_clear_agg(temp_aggregator); | 2086 | ad_clear_agg(temp_aggregator); |
@@ -2094,7 +2095,7 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
2094 | } | 2095 | } |
2095 | } | 2096 | } |
2096 | } | 2097 | } |
2097 | port->slave=NULL; | 2098 | port->slave = NULL; |
2098 | } | 2099 | } |
2099 | 2100 | ||
2100 | /** | 2101 | /** |
@@ -2119,14 +2120,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2119 | 2120 | ||
2120 | read_lock(&bond->lock); | 2121 | read_lock(&bond->lock); |
2121 | 2122 | ||
2122 | if (bond->kill_timers) { | 2123 | if (bond->kill_timers) |
2123 | goto out; | 2124 | goto out; |
2124 | } | ||
2125 | 2125 | ||
2126 | //check if there are any slaves | 2126 | //check if there are any slaves |
2127 | if (bond->slave_cnt == 0) { | 2127 | if (bond->slave_cnt == 0) |
2128 | goto re_arm; | 2128 | goto re_arm; |
2129 | } | ||
2130 | 2129 | ||
2131 | // check if agg_select_timer timer after initialize is timed out | 2130 | // check if agg_select_timer timer after initialize is timed out |
2132 | if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) { | 2131 | if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) { |
@@ -2152,6 +2151,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2152 | goto re_arm; | 2151 | goto re_arm; |
2153 | } | 2152 | } |
2154 | 2153 | ||
2154 | /* Lock around state machines to protect data accessed | ||
2155 | * by all (e.g., port->sm_vars). ad_rx_machine may run | ||
2156 | * concurrently due to incoming LACPDU. | ||
2157 | */ | ||
2158 | __get_state_machine_lock(port); | ||
2159 | |||
2155 | ad_rx_machine(NULL, port); | 2160 | ad_rx_machine(NULL, port); |
2156 | ad_periodic_machine(port); | 2161 | ad_periodic_machine(port); |
2157 | ad_port_selection_logic(port); | 2162 | ad_port_selection_logic(port); |
@@ -2159,9 +2164,10 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2159 | ad_tx_machine(port); | 2164 | ad_tx_machine(port); |
2160 | 2165 | ||
2161 | // turn off the BEGIN bit, since we already handled it | 2166 | // turn off the BEGIN bit, since we already handled it |
2162 | if (port->sm_vars & AD_PORT_BEGIN) { | 2167 | if (port->sm_vars & AD_PORT_BEGIN) |
2163 | port->sm_vars &= ~AD_PORT_BEGIN; | 2168 | port->sm_vars &= ~AD_PORT_BEGIN; |
2164 | } | 2169 | |
2170 | __release_state_machine_lock(port); | ||
2165 | } | 2171 | } |
2166 | 2172 | ||
2167 | re_arm: | 2173 | re_arm: |
@@ -2198,7 +2204,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u | |||
2198 | case AD_TYPE_LACPDU: | 2204 | case AD_TYPE_LACPDU: |
2199 | pr_debug("Received LACPDU on port %d\n", | 2205 | pr_debug("Received LACPDU on port %d\n", |
2200 | port->actor_port_number); | 2206 | port->actor_port_number); |
2207 | /* Protect against concurrent state machines */ | ||
2208 | __get_state_machine_lock(port); | ||
2201 | ad_rx_machine(lacpdu, port); | 2209 | ad_rx_machine(lacpdu, port); |
2210 | __release_state_machine_lock(port); | ||
2202 | break; | 2211 | break; |
2203 | 2212 | ||
2204 | case AD_TYPE_MARKER: | 2213 | case AD_TYPE_MARKER: |
@@ -2245,7 +2254,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) | |||
2245 | } | 2254 | } |
2246 | 2255 | ||
2247 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; | 2256 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; |
2248 | port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1); | 2257 | port->actor_oper_port_key = port->actor_admin_port_key |= |
2258 | (__get_link_speed(port) << 1); | ||
2249 | pr_debug("Port %d changed speed\n", port->actor_port_number); | 2259 | pr_debug("Port %d changed speed\n", port->actor_port_number); |
2250 | // there is no need to reselect a new aggregator, just signal the | 2260 | // there is no need to reselect a new aggregator, just signal the |
2251 | // state machines to reinitialize | 2261 | // state machines to reinitialize |
@@ -2262,7 +2272,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) | |||
2262 | { | 2272 | { |
2263 | struct port *port; | 2273 | struct port *port; |
2264 | 2274 | ||
2265 | port=&(SLAVE_AD_INFO(slave).port); | 2275 | port = &(SLAVE_AD_INFO(slave).port); |
2266 | 2276 | ||
2267 | // if slave is null, the whole port is not initialized | 2277 | // if slave is null, the whole port is not initialized |
2268 | if (!port->slave) { | 2278 | if (!port->slave) { |
@@ -2272,7 +2282,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) | |||
2272 | } | 2282 | } |
2273 | 2283 | ||
2274 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2284 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
2275 | port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port); | 2285 | port->actor_oper_port_key = port->actor_admin_port_key |= |
2286 | __get_duplex(port); | ||
2276 | pr_debug("Port %d changed duplex\n", port->actor_port_number); | 2287 | pr_debug("Port %d changed duplex\n", port->actor_port_number); |
2277 | // there is no need to reselect a new aggregator, just signal the | 2288 | // there is no need to reselect a new aggregator, just signal the |
2278 | // state machines to reinitialize | 2289 | // state machines to reinitialize |
@@ -2304,14 +2315,17 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
2304 | if (link == BOND_LINK_UP) { | 2315 | if (link == BOND_LINK_UP) { |
2305 | port->is_enabled = true; | 2316 | port->is_enabled = true; |
2306 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2317 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
2307 | port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port); | 2318 | port->actor_oper_port_key = port->actor_admin_port_key |= |
2319 | __get_duplex(port); | ||
2308 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; | 2320 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; |
2309 | port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1); | 2321 | port->actor_oper_port_key = port->actor_admin_port_key |= |
2322 | (__get_link_speed(port) << 1); | ||
2310 | } else { | 2323 | } else { |
2311 | /* link has failed */ | 2324 | /* link has failed */ |
2312 | port->is_enabled = false; | 2325 | port->is_enabled = false; |
2313 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2326 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
2314 | port->actor_oper_port_key= (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS); | 2327 | port->actor_oper_port_key = (port->actor_admin_port_key &= |
2328 | ~AD_SPEED_KEY_BITS); | ||
2315 | } | 2329 | } |
2316 | //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); | 2330 | //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); |
2317 | // there is no need to reselect a new aggregator, just signal the | 2331 | // there is no need to reselect a new aggregator, just signal the |
@@ -2389,15 +2403,6 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) | |||
2389 | struct ad_info ad_info; | 2403 | struct ad_info ad_info; |
2390 | int res = 1; | 2404 | int res = 1; |
2391 | 2405 | ||
2392 | /* make sure that the slaves list will | ||
2393 | * not change during tx | ||
2394 | */ | ||
2395 | read_lock(&bond->lock); | ||
2396 | |||
2397 | if (!BOND_IS_OK(bond)) { | ||
2398 | goto out; | ||
2399 | } | ||
2400 | |||
2401 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) { | 2406 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) { |
2402 | pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", | 2407 | pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", |
2403 | dev->name); | 2408 | dev->name); |
@@ -2420,9 +2425,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) | |||
2420 | 2425 | ||
2421 | if (agg && (agg->aggregator_identifier == agg_id)) { | 2426 | if (agg && (agg->aggregator_identifier == agg_id)) { |
2422 | slave_agg_no--; | 2427 | slave_agg_no--; |
2423 | if (slave_agg_no < 0) { | 2428 | if (slave_agg_no < 0) |
2424 | break; | 2429 | break; |
2425 | } | ||
2426 | } | 2430 | } |
2427 | } | 2431 | } |
2428 | 2432 | ||
@@ -2438,9 +2442,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) | |||
2438 | int slave_agg_id = 0; | 2442 | int slave_agg_id = 0; |
2439 | struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; | 2443 | struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; |
2440 | 2444 | ||
2441 | if (agg) { | 2445 | if (agg) |
2442 | slave_agg_id = agg->aggregator_identifier; | 2446 | slave_agg_id = agg->aggregator_identifier; |
2443 | } | ||
2444 | 2447 | ||
2445 | if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) { | 2448 | if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) { |
2446 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 2449 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
@@ -2453,36 +2456,20 @@ out: | |||
2453 | /* no suitable interface, frame not sent */ | 2456 | /* no suitable interface, frame not sent */ |
2454 | dev_kfree_skb(skb); | 2457 | dev_kfree_skb(skb); |
2455 | } | 2458 | } |
2456 | read_unlock(&bond->lock); | 2459 | |
2457 | return NETDEV_TX_OK; | 2460 | return NETDEV_TX_OK; |
2458 | } | 2461 | } |
2459 | 2462 | ||
2460 | int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev) | 2463 | void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, |
2464 | struct slave *slave) | ||
2461 | { | 2465 | { |
2462 | struct bonding *bond = netdev_priv(dev); | 2466 | if (skb->protocol != PKT_TYPE_LACPDU) |
2463 | struct slave *slave = NULL; | 2467 | return; |
2464 | int ret = NET_RX_DROP; | ||
2465 | |||
2466 | if (!(dev->flags & IFF_MASTER)) | ||
2467 | goto out; | ||
2468 | 2468 | ||
2469 | if (!pskb_may_pull(skb, sizeof(struct lacpdu))) | 2469 | if (!pskb_may_pull(skb, sizeof(struct lacpdu))) |
2470 | goto out; | 2470 | return; |
2471 | 2471 | ||
2472 | read_lock(&bond->lock); | 2472 | read_lock(&bond->lock); |
2473 | slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), | ||
2474 | orig_dev); | ||
2475 | if (!slave) | ||
2476 | goto out_unlock; | ||
2477 | |||
2478 | bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); | 2473 | bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); |
2479 | |||
2480 | ret = NET_RX_SUCCESS; | ||
2481 | |||
2482 | out_unlock: | ||
2483 | read_unlock(&bond->lock); | 2474 | read_unlock(&bond->lock); |
2484 | out: | ||
2485 | dev_kfree_skb(skb); | ||
2486 | |||
2487 | return ret; | ||
2488 | } | 2475 | } |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 2c46a154f2c6..0ee3f1632c46 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | typedef struct mac_addr { | 40 | typedef struct mac_addr { |
41 | u8 mac_addr_value[ETH_ALEN]; | 41 | u8 mac_addr_value[ETH_ALEN]; |
42 | } mac_addr_t; | 42 | } __packed mac_addr_t; |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | BOND_AD_STABLE = 0, | 45 | BOND_AD_STABLE = 0, |
@@ -134,12 +134,12 @@ typedef struct lacpdu { | |||
134 | u8 tlv_type_terminator; // = terminator | 134 | u8 tlv_type_terminator; // = terminator |
135 | u8 terminator_length; // = 0 | 135 | u8 terminator_length; // = 0 |
136 | u8 reserved_50[50]; // = 0 | 136 | u8 reserved_50[50]; // = 0 |
137 | } lacpdu_t; | 137 | } __packed lacpdu_t; |
138 | 138 | ||
139 | typedef struct lacpdu_header { | 139 | typedef struct lacpdu_header { |
140 | struct ethhdr hdr; | 140 | struct ethhdr hdr; |
141 | struct lacpdu lacpdu; | 141 | struct lacpdu lacpdu; |
142 | } lacpdu_header_t; | 142 | } __packed lacpdu_header_t; |
143 | 143 | ||
144 | // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) | 144 | // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) |
145 | typedef struct bond_marker { | 145 | typedef struct bond_marker { |
@@ -155,12 +155,12 @@ typedef struct bond_marker { | |||
155 | u8 tlv_type_terminator; // = 0x00 | 155 | u8 tlv_type_terminator; // = 0x00 |
156 | u8 terminator_length; // = 0x00 | 156 | u8 terminator_length; // = 0x00 |
157 | u8 reserved_90[90]; // = 0 | 157 | u8 reserved_90[90]; // = 0 |
158 | } bond_marker_t; | 158 | } __packed bond_marker_t; |
159 | 159 | ||
160 | typedef struct bond_marker_header { | 160 | typedef struct bond_marker_header { |
161 | struct ethhdr hdr; | 161 | struct ethhdr hdr; |
162 | struct bond_marker marker; | 162 | struct bond_marker marker; |
163 | } bond_marker_header_t; | 163 | } __packed bond_marker_header_t; |
164 | 164 | ||
165 | #pragma pack() | 165 | #pragma pack() |
166 | 166 | ||
@@ -258,13 +258,13 @@ struct ad_bond_info { | |||
258 | * requested | 258 | * requested |
259 | */ | 259 | */ |
260 | struct timer_list ad_timer; | 260 | struct timer_list ad_timer; |
261 | struct packet_type ad_pkt_type; | ||
262 | }; | 261 | }; |
263 | 262 | ||
264 | struct ad_slave_info { | 263 | struct ad_slave_info { |
265 | struct aggregator aggregator; // 802.3ad aggregator structure | 264 | struct aggregator aggregator; // 802.3ad aggregator structure |
266 | struct port port; // 802.3ad port structure | 265 | struct port port; // 802.3ad port structure |
267 | spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt | 266 | spinlock_t state_machine_lock; /* mutex state machines vs. |
267 | incoming LACPDU */ | ||
268 | u16 id; | 268 | u16 id; |
269 | }; | 269 | }; |
270 | 270 | ||
@@ -279,7 +279,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave); | |||
279 | void bond_3ad_handle_link_change(struct slave *slave, char link); | 279 | void bond_3ad_handle_link_change(struct slave *slave, char link); |
280 | int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); | 280 | int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); |
281 | int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); | 281 | int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); |
282 | int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev); | 282 | void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, |
283 | struct slave *slave); | ||
283 | int bond_3ad_set_carrier(struct bonding *bond); | 284 | int bond_3ad_set_carrier(struct bonding *bond); |
284 | #endif //__BOND_3AD_H__ | 285 | #endif //__BOND_3AD_H__ |
285 | 286 | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 26bb118c4533..2df9276720a0 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -44,42 +44,6 @@ | |||
44 | #include "bond_alb.h" | 44 | #include "bond_alb.h" |
45 | 45 | ||
46 | 46 | ||
47 | #define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */ | ||
48 | #define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing. | ||
49 | * Used for division - never set | ||
50 | * to zero !!! | ||
51 | */ | ||
52 | #define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of | ||
53 | * learning packets to the switch | ||
54 | */ | ||
55 | |||
56 | #define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ | ||
57 | * ALB_TIMER_TICKS_PER_SEC) | ||
58 | |||
59 | #define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \ | ||
60 | * ALB_TIMER_TICKS_PER_SEC) | ||
61 | |||
62 | #define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. | ||
63 | * Note that this value MUST NOT be smaller | ||
64 | * because the key hash table is BYTE wide ! | ||
65 | */ | ||
66 | |||
67 | |||
68 | #define TLB_NULL_INDEX 0xffffffff | ||
69 | #define MAX_LP_BURST 3 | ||
70 | |||
71 | /* rlb defs */ | ||
72 | #define RLB_HASH_TABLE_SIZE 256 | ||
73 | #define RLB_NULL_INDEX 0xffffffff | ||
74 | #define RLB_UPDATE_DELAY 2*ALB_TIMER_TICKS_PER_SEC /* 2 seconds */ | ||
75 | #define RLB_ARP_BURST_SIZE 2 | ||
76 | #define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb | ||
77 | * rebalance interval (5 min). | ||
78 | */ | ||
79 | /* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is | ||
80 | * promiscuous after failover | ||
81 | */ | ||
82 | #define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC | ||
83 | 47 | ||
84 | #ifndef __long_aligned | 48 | #ifndef __long_aligned |
85 | #define __long_aligned __attribute__((aligned((sizeof(long))))) | 49 | #define __long_aligned __attribute__((aligned((sizeof(long))))) |
@@ -199,8 +163,6 @@ static int tlb_initialize(struct bonding *bond) | |||
199 | struct tlb_client_info *new_hashtbl; | 163 | struct tlb_client_info *new_hashtbl; |
200 | int i; | 164 | int i; |
201 | 165 | ||
202 | spin_lock_init(&(bond_info->tx_hashtbl_lock)); | ||
203 | |||
204 | new_hashtbl = kzalloc(size, GFP_KERNEL); | 166 | new_hashtbl = kzalloc(size, GFP_KERNEL); |
205 | if (!new_hashtbl) { | 167 | if (!new_hashtbl) { |
206 | pr_err("%s: Error: Failed to allocate TLB hash table\n", | 168 | pr_err("%s: Error: Failed to allocate TLB hash table\n", |
@@ -212,7 +174,7 @@ static int tlb_initialize(struct bonding *bond) | |||
212 | bond_info->tx_hashtbl = new_hashtbl; | 174 | bond_info->tx_hashtbl = new_hashtbl; |
213 | 175 | ||
214 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { | 176 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { |
215 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); | 177 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); |
216 | } | 178 | } |
217 | 179 | ||
218 | _unlock_tx_hashtbl(bond); | 180 | _unlock_tx_hashtbl(bond); |
@@ -344,45 +306,33 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) | |||
344 | _unlock_rx_hashtbl(bond); | 306 | _unlock_rx_hashtbl(bond); |
345 | } | 307 | } |
346 | 308 | ||
347 | static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev) | 309 | static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, |
310 | struct slave *slave) | ||
348 | { | 311 | { |
349 | struct bonding *bond; | 312 | struct arp_pkt *arp; |
350 | struct arp_pkt *arp = (struct arp_pkt *)skb->data; | ||
351 | int res = NET_RX_DROP; | ||
352 | |||
353 | while (bond_dev->priv_flags & IFF_802_1Q_VLAN) | ||
354 | bond_dev = vlan_dev_real_dev(bond_dev); | ||
355 | 313 | ||
356 | if (!(bond_dev->priv_flags & IFF_BONDING) || | 314 | if (skb->protocol != cpu_to_be16(ETH_P_ARP)) |
357 | !(bond_dev->flags & IFF_MASTER)) | 315 | return; |
358 | goto out; | ||
359 | 316 | ||
317 | arp = (struct arp_pkt *) skb->data; | ||
360 | if (!arp) { | 318 | if (!arp) { |
361 | pr_debug("Packet has no ARP data\n"); | 319 | pr_debug("Packet has no ARP data\n"); |
362 | goto out; | 320 | return; |
363 | } | 321 | } |
364 | 322 | ||
365 | if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) | 323 | if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) |
366 | goto out; | 324 | return; |
367 | 325 | ||
368 | if (skb->len < sizeof(struct arp_pkt)) { | 326 | if (skb->len < sizeof(struct arp_pkt)) { |
369 | pr_debug("Packet is too small to be an ARP\n"); | 327 | pr_debug("Packet is too small to be an ARP\n"); |
370 | goto out; | 328 | return; |
371 | } | 329 | } |
372 | 330 | ||
373 | if (arp->op_code == htons(ARPOP_REPLY)) { | 331 | if (arp->op_code == htons(ARPOP_REPLY)) { |
374 | /* update rx hash table for this ARP */ | 332 | /* update rx hash table for this ARP */ |
375 | bond = netdev_priv(bond_dev); | ||
376 | rlb_update_entry_from_arp(bond, arp); | 333 | rlb_update_entry_from_arp(bond, arp); |
377 | pr_debug("Server received an ARP Reply from client\n"); | 334 | pr_debug("Server received an ARP Reply from client\n"); |
378 | } | 335 | } |
379 | |||
380 | res = NET_RX_SUCCESS; | ||
381 | |||
382 | out: | ||
383 | dev_kfree_skb(skb); | ||
384 | |||
385 | return res; | ||
386 | } | 336 | } |
387 | 337 | ||
388 | /* Caller must hold bond lock for read */ | 338 | /* Caller must hold bond lock for read */ |
@@ -636,7 +586,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
636 | 586 | ||
637 | _lock_rx_hashtbl(bond); | 587 | _lock_rx_hashtbl(bond); |
638 | 588 | ||
639 | hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src)); | 589 | hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst)); |
640 | client_info = &(bond_info->rx_hashtbl[hash_index]); | 590 | client_info = &(bond_info->rx_hashtbl[hash_index]); |
641 | 591 | ||
642 | if (client_info->assigned) { | 592 | if (client_info->assigned) { |
@@ -733,7 +683,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) | |||
733 | */ | 683 | */ |
734 | rlb_choose_channel(skb, bond); | 684 | rlb_choose_channel(skb, bond); |
735 | 685 | ||
736 | /* The ARP relpy packets must be delayed so that | 686 | /* The ARP reply packets must be delayed so that |
737 | * they can cancel out the influence of the ARP request. | 687 | * they can cancel out the influence of the ARP request. |
738 | */ | 688 | */ |
739 | bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; | 689 | bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; |
@@ -791,13 +741,10 @@ static void rlb_init_table_entry(struct rlb_client_info *entry) | |||
791 | static int rlb_initialize(struct bonding *bond) | 741 | static int rlb_initialize(struct bonding *bond) |
792 | { | 742 | { |
793 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 743 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
794 | struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type); | ||
795 | struct rlb_client_info *new_hashtbl; | 744 | struct rlb_client_info *new_hashtbl; |
796 | int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); | 745 | int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); |
797 | int i; | 746 | int i; |
798 | 747 | ||
799 | spin_lock_init(&(bond_info->rx_hashtbl_lock)); | ||
800 | |||
801 | new_hashtbl = kmalloc(size, GFP_KERNEL); | 748 | new_hashtbl = kmalloc(size, GFP_KERNEL); |
802 | if (!new_hashtbl) { | 749 | if (!new_hashtbl) { |
803 | pr_err("%s: Error: Failed to allocate RLB hash table\n", | 750 | pr_err("%s: Error: Failed to allocate RLB hash table\n", |
@@ -816,13 +763,8 @@ static int rlb_initialize(struct bonding *bond) | |||
816 | 763 | ||
817 | _unlock_rx_hashtbl(bond); | 764 | _unlock_rx_hashtbl(bond); |
818 | 765 | ||
819 | /*initialize packet type*/ | ||
820 | pk_type->type = cpu_to_be16(ETH_P_ARP); | ||
821 | pk_type->dev = bond->dev; | ||
822 | pk_type->func = rlb_arp_recv; | ||
823 | |||
824 | /* register to receive ARPs */ | 766 | /* register to receive ARPs */ |
825 | dev_add_pack(pk_type); | 767 | bond->recv_probe = rlb_arp_recv; |
826 | 768 | ||
827 | return 0; | 769 | return 0; |
828 | } | 770 | } |
@@ -831,8 +773,6 @@ static void rlb_deinitialize(struct bonding *bond) | |||
831 | { | 773 | { |
832 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 774 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
833 | 775 | ||
834 | dev_remove_pack(&(bond_info->rlb_pkt_type)); | ||
835 | |||
836 | _lock_rx_hashtbl(bond); | 776 | _lock_rx_hashtbl(bond); |
837 | 777 | ||
838 | kfree(bond_info->rx_hashtbl); | 778 | kfree(bond_info->rx_hashtbl); |
@@ -1074,7 +1014,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla | |||
1074 | * | 1014 | * |
1075 | * If the permanent hw address of @slave is @bond's hw address, we need to | 1015 | * If the permanent hw address of @slave is @bond's hw address, we need to |
1076 | * find a different hw address to give @slave, that isn't in use by any other | 1016 | * find a different hw address to give @slave, that isn't in use by any other |
1077 | * slave in the bond. This address must be, of course, one of the premanent | 1017 | * slave in the bond. This address must be, of course, one of the permanent |
1078 | * addresses of the other slaves. | 1018 | * addresses of the other slaves. |
1079 | * | 1019 | * |
1080 | * We go over the slave list, and for each slave there we compare its | 1020 | * We go over the slave list, and for each slave there we compare its |
@@ -1281,16 +1221,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1281 | skb_reset_mac_header(skb); | 1221 | skb_reset_mac_header(skb); |
1282 | eth_data = eth_hdr(skb); | 1222 | eth_data = eth_hdr(skb); |
1283 | 1223 | ||
1284 | /* make sure that the curr_active_slave and the slaves list do | 1224 | /* make sure that the curr_active_slave do not change during tx |
1285 | * not change during tx | ||
1286 | */ | 1225 | */ |
1287 | read_lock(&bond->lock); | ||
1288 | read_lock(&bond->curr_slave_lock); | 1226 | read_lock(&bond->curr_slave_lock); |
1289 | 1227 | ||
1290 | if (!BOND_IS_OK(bond)) { | ||
1291 | goto out; | ||
1292 | } | ||
1293 | |||
1294 | switch (ntohs(skb->protocol)) { | 1228 | switch (ntohs(skb->protocol)) { |
1295 | case ETH_P_IP: { | 1229 | case ETH_P_IP: { |
1296 | const struct iphdr *iph = ip_hdr(skb); | 1230 | const struct iphdr *iph = ip_hdr(skb); |
@@ -1390,13 +1324,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1390 | } | 1324 | } |
1391 | } | 1325 | } |
1392 | 1326 | ||
1393 | out: | ||
1394 | if (res) { | 1327 | if (res) { |
1395 | /* no suitable interface, frame not sent */ | 1328 | /* no suitable interface, frame not sent */ |
1396 | dev_kfree_skb(skb); | 1329 | dev_kfree_skb(skb); |
1397 | } | 1330 | } |
1398 | read_unlock(&bond->curr_slave_lock); | 1331 | read_unlock(&bond->curr_slave_lock); |
1399 | read_unlock(&bond->lock); | 1332 | |
1400 | return NETDEV_TX_OK; | 1333 | return NETDEV_TX_OK; |
1401 | } | 1334 | } |
1402 | 1335 | ||
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index 50968f8196cf..90f140a2d197 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h | |||
@@ -31,14 +31,52 @@ struct slave; | |||
31 | #define BOND_ALB_INFO(bond) ((bond)->alb_info) | 31 | #define BOND_ALB_INFO(bond) ((bond)->alb_info) |
32 | #define SLAVE_TLB_INFO(slave) ((slave)->tlb_info) | 32 | #define SLAVE_TLB_INFO(slave) ((slave)->tlb_info) |
33 | 33 | ||
34 | #define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */ | ||
35 | #define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing. | ||
36 | * Used for division - never set | ||
37 | * to zero !!! | ||
38 | */ | ||
39 | #define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of | ||
40 | * learning packets to the switch | ||
41 | */ | ||
42 | |||
43 | #define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ | ||
44 | * ALB_TIMER_TICKS_PER_SEC) | ||
45 | |||
46 | #define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \ | ||
47 | * ALB_TIMER_TICKS_PER_SEC) | ||
48 | |||
49 | #define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. | ||
50 | * Note that this value MUST NOT be smaller | ||
51 | * because the key hash table is BYTE wide ! | ||
52 | */ | ||
53 | |||
54 | |||
55 | #define TLB_NULL_INDEX 0xffffffff | ||
56 | #define MAX_LP_BURST 3 | ||
57 | |||
58 | /* rlb defs */ | ||
59 | #define RLB_HASH_TABLE_SIZE 256 | ||
60 | #define RLB_NULL_INDEX 0xffffffff | ||
61 | #define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */ | ||
62 | #define RLB_ARP_BURST_SIZE 2 | ||
63 | #define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb | ||
64 | * rebalance interval (5 min). | ||
65 | */ | ||
66 | /* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is | ||
67 | * promiscuous after failover | ||
68 | */ | ||
69 | #define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC) | ||
70 | |||
71 | |||
34 | struct tlb_client_info { | 72 | struct tlb_client_info { |
35 | struct slave *tx_slave; /* A pointer to slave used for transmiting | 73 | struct slave *tx_slave; /* A pointer to slave used for transmiting |
36 | * packets to a Client that the Hash function | 74 | * packets to a Client that the Hash function |
37 | * gave this entry index. | 75 | * gave this entry index. |
38 | */ | 76 | */ |
39 | u32 tx_bytes; /* Each Client acumulates the BytesTx that | 77 | u32 tx_bytes; /* Each Client accumulates the BytesTx that |
40 | * were tranmitted to it, and after each | 78 | * were transmitted to it, and after each |
41 | * CallBack the LoadHistory is devided | 79 | * CallBack the LoadHistory is divided |
42 | * by the balance interval | 80 | * by the balance interval |
43 | */ | 81 | */ |
44 | u32 load_history; /* This field contains the amount of Bytes | 82 | u32 load_history; /* This field contains the amount of Bytes |
@@ -84,7 +122,6 @@ struct tlb_slave_info { | |||
84 | }; | 122 | }; |
85 | 123 | ||
86 | struct alb_bond_info { | 124 | struct alb_bond_info { |
87 | struct timer_list alb_timer; | ||
88 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ | 125 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ |
89 | spinlock_t tx_hashtbl_lock; | 126 | spinlock_t tx_hashtbl_lock; |
90 | u32 unbalanced_load; | 127 | u32 unbalanced_load; |
@@ -92,7 +129,6 @@ struct alb_bond_info { | |||
92 | int lp_counter; | 129 | int lp_counter; |
93 | /* -------- rlb parameters -------- */ | 130 | /* -------- rlb parameters -------- */ |
94 | int rlb_enabled; | 131 | int rlb_enabled; |
95 | struct packet_type rlb_pkt_type; | ||
96 | struct rlb_client_info *rx_hashtbl; /* Receive hash table */ | 132 | struct rlb_client_info *rx_hashtbl; /* Receive hash table */ |
97 | spinlock_t rx_hashtbl_lock; | 133 | spinlock_t rx_hashtbl_lock; |
98 | u32 rx_hashtbl_head; | 134 | u32 rx_hashtbl_head; |
@@ -102,7 +138,6 @@ struct alb_bond_info { | |||
102 | struct slave *next_rx_slave;/* next slave to be assigned | 138 | struct slave *next_rx_slave;/* next slave to be assigned |
103 | * to a new rx client for | 139 | * to a new rx client for |
104 | */ | 140 | */ |
105 | u32 rlb_interval_counter; | ||
106 | u8 primary_is_promisc; /* boolean */ | 141 | u8 primary_is_promisc; /* boolean */ |
107 | u32 rlb_promisc_timeout_counter;/* counts primary | 142 | u32 rlb_promisc_timeout_counter;/* counts primary |
108 | * promiscuity time | 143 | * promiscuity time |
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c new file mode 100644 index 000000000000..3680aa251dea --- /dev/null +++ b/drivers/net/bonding/bond_debugfs.c | |||
@@ -0,0 +1,146 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/device.h> | ||
4 | #include <linux/netdevice.h> | ||
5 | |||
6 | #include "bonding.h" | ||
7 | #include "bond_alb.h" | ||
8 | |||
9 | #ifdef CONFIG_DEBUG_FS | ||
10 | |||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | |||
14 | static struct dentry *bonding_debug_root; | ||
15 | |||
16 | /* | ||
17 | * Show RLB hash table | ||
18 | */ | ||
19 | static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) | ||
20 | { | ||
21 | struct bonding *bond = m->private; | ||
22 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | ||
23 | struct rlb_client_info *client_info; | ||
24 | u32 hash_index; | ||
25 | |||
26 | if (bond->params.mode != BOND_MODE_ALB) | ||
27 | return 0; | ||
28 | |||
29 | seq_printf(m, "SourceIP DestinationIP " | ||
30 | "Destination MAC DEV\n"); | ||
31 | |||
32 | spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
33 | |||
34 | hash_index = bond_info->rx_hashtbl_head; | ||
35 | for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { | ||
36 | client_info = &(bond_info->rx_hashtbl[hash_index]); | ||
37 | seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", | ||
38 | &client_info->ip_src, | ||
39 | &client_info->ip_dst, | ||
40 | &client_info->mac_dst, | ||
41 | client_info->slave->dev->name); | ||
42 | } | ||
43 | |||
44 | spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file) | ||
50 | { | ||
51 | return single_open(file, bond_debug_rlb_hash_show, inode->i_private); | ||
52 | } | ||
53 | |||
54 | static const struct file_operations bond_debug_rlb_hash_fops = { | ||
55 | .owner = THIS_MODULE, | ||
56 | .open = bond_debug_rlb_hash_open, | ||
57 | .read = seq_read, | ||
58 | .llseek = seq_lseek, | ||
59 | .release = single_release, | ||
60 | }; | ||
61 | |||
62 | void bond_debug_register(struct bonding *bond) | ||
63 | { | ||
64 | if (!bonding_debug_root) | ||
65 | return; | ||
66 | |||
67 | bond->debug_dir = | ||
68 | debugfs_create_dir(bond->dev->name, bonding_debug_root); | ||
69 | |||
70 | if (!bond->debug_dir) { | ||
71 | pr_warning("%s: Warning: failed to register to debugfs\n", | ||
72 | bond->dev->name); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir, | ||
77 | bond, &bond_debug_rlb_hash_fops); | ||
78 | } | ||
79 | |||
80 | void bond_debug_unregister(struct bonding *bond) | ||
81 | { | ||
82 | if (!bonding_debug_root) | ||
83 | return; | ||
84 | |||
85 | debugfs_remove_recursive(bond->debug_dir); | ||
86 | } | ||
87 | |||
88 | void bond_debug_reregister(struct bonding *bond) | ||
89 | { | ||
90 | struct dentry *d; | ||
91 | |||
92 | if (!bonding_debug_root) | ||
93 | return; | ||
94 | |||
95 | d = debugfs_rename(bonding_debug_root, bond->debug_dir, | ||
96 | bonding_debug_root, bond->dev->name); | ||
97 | if (d) { | ||
98 | bond->debug_dir = d; | ||
99 | } else { | ||
100 | pr_warning("%s: Warning: failed to reregister, " | ||
101 | "so just unregister old one\n", | ||
102 | bond->dev->name); | ||
103 | bond_debug_unregister(bond); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | void bond_create_debugfs(void) | ||
108 | { | ||
109 | bonding_debug_root = debugfs_create_dir("bonding", NULL); | ||
110 | |||
111 | if (!bonding_debug_root) { | ||
112 | pr_warning("Warning: Cannot create bonding directory" | ||
113 | " in debugfs\n"); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | void bond_destroy_debugfs(void) | ||
118 | { | ||
119 | debugfs_remove_recursive(bonding_debug_root); | ||
120 | bonding_debug_root = NULL; | ||
121 | } | ||
122 | |||
123 | |||
124 | #else /* !CONFIG_DEBUG_FS */ | ||
125 | |||
126 | void bond_debug_register(struct bonding *bond) | ||
127 | { | ||
128 | } | ||
129 | |||
130 | void bond_debug_unregister(struct bonding *bond) | ||
131 | { | ||
132 | } | ||
133 | |||
134 | void bond_debug_reregister(struct bonding *bond) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | void bond_create_debugfs(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | void bond_destroy_debugfs(void) | ||
143 | { | ||
144 | } | ||
145 | |||
146 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c index 121b073a6c3f..84fbd4ebd778 100644 --- a/drivers/net/bonding/bond_ipv6.c +++ b/drivers/net/bonding/bond_ipv6.c | |||
@@ -88,7 +88,12 @@ static void bond_na_send(struct net_device *slave_dev, | |||
88 | } | 88 | } |
89 | 89 | ||
90 | if (vlan_id) { | 90 | if (vlan_id) { |
91 | skb = vlan_put_tag(skb, vlan_id); | 91 | /* The Ethernet header is not present yet, so it is |
92 | * too early to insert a VLAN tag. Force use of an | ||
93 | * out-of-line tag here and let dev_hard_start_xmit() | ||
94 | * insert it if the slave hardware can't. | ||
95 | */ | ||
96 | skb = __vlan_hwaccel_put_tag(skb, vlan_id); | ||
92 | if (!skb) { | 97 | if (!skb) { |
93 | pr_err("failed to insert VLAN tag\n"); | 98 | pr_err("failed to insert VLAN tag\n"); |
94 | return; | 99 | return; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e953c6ad6e6d..63c22b0bb5ad 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -59,15 +59,12 @@ | |||
59 | #include <linux/uaccess.h> | 59 | #include <linux/uaccess.h> |
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/netdevice.h> | 61 | #include <linux/netdevice.h> |
62 | #include <linux/netpoll.h> | ||
63 | #include <linux/inetdevice.h> | 62 | #include <linux/inetdevice.h> |
64 | #include <linux/igmp.h> | 63 | #include <linux/igmp.h> |
65 | #include <linux/etherdevice.h> | 64 | #include <linux/etherdevice.h> |
66 | #include <linux/skbuff.h> | 65 | #include <linux/skbuff.h> |
67 | #include <net/sock.h> | 66 | #include <net/sock.h> |
68 | #include <linux/rtnetlink.h> | 67 | #include <linux/rtnetlink.h> |
69 | #include <linux/proc_fs.h> | ||
70 | #include <linux/seq_file.h> | ||
71 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
72 | #include <linux/if_ether.h> | 69 | #include <linux/if_ether.h> |
73 | #include <net/arp.h> | 70 | #include <net/arp.h> |
@@ -76,6 +73,7 @@ | |||
76 | #include <linux/if_vlan.h> | 73 | #include <linux/if_vlan.h> |
77 | #include <linux/if_bonding.h> | 74 | #include <linux/if_bonding.h> |
78 | #include <linux/jiffies.h> | 75 | #include <linux/jiffies.h> |
76 | #include <linux/preempt.h> | ||
79 | #include <net/route.h> | 77 | #include <net/route.h> |
80 | #include <net/net_namespace.h> | 78 | #include <net/net_namespace.h> |
81 | #include <net/netns/generic.h> | 79 | #include <net/netns/generic.h> |
@@ -91,8 +89,7 @@ | |||
91 | 89 | ||
92 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; | 90 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; |
93 | static int tx_queues = BOND_DEFAULT_TX_QUEUES; | 91 | static int tx_queues = BOND_DEFAULT_TX_QUEUES; |
94 | static int num_grat_arp = 1; | 92 | static int num_peer_notif = 1; |
95 | static int num_unsol_na = 1; | ||
96 | static int miimon = BOND_LINK_MON_INTERV; | 93 | static int miimon = BOND_LINK_MON_INTERV; |
97 | static int updelay; | 94 | static int updelay; |
98 | static int downdelay; | 95 | static int downdelay; |
@@ -109,15 +106,18 @@ static char *arp_validate; | |||
109 | static char *fail_over_mac; | 106 | static char *fail_over_mac; |
110 | static int all_slaves_active = 0; | 107 | static int all_slaves_active = 0; |
111 | static struct bond_params bonding_defaults; | 108 | static struct bond_params bonding_defaults; |
109 | static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; | ||
112 | 110 | ||
113 | module_param(max_bonds, int, 0); | 111 | module_param(max_bonds, int, 0); |
114 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); | 112 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); |
115 | module_param(tx_queues, int, 0); | 113 | module_param(tx_queues, int, 0); |
116 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); | 114 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); |
117 | module_param(num_grat_arp, int, 0644); | 115 | module_param_named(num_grat_arp, num_peer_notif, int, 0644); |
118 | MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); | 116 | MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " |
119 | module_param(num_unsol_na, int, 0644); | 117 | "failover event (alias of num_unsol_na)"); |
120 | MODULE_PARM_DESC(num_unsol_na, "Number of unsolicited IPv6 Neighbor Advertisements packets to send on failover event"); | 118 | module_param_named(num_unsol_na, num_peer_notif, int, 0644); |
119 | MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " | ||
120 | "failover event (alias of num_grat_arp)"); | ||
121 | module_param(miimon, int, 0); | 121 | module_param(miimon, int, 0); |
122 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); | 122 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); |
123 | module_param(updelay, int, 0); | 123 | module_param(updelay, int, 0); |
@@ -129,7 +129,7 @@ module_param(use_carrier, int, 0); | |||
129 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " | 129 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " |
130 | "0 for off, 1 for on (default)"); | 130 | "0 for off, 1 for on (default)"); |
131 | module_param(mode, charp, 0); | 131 | module_param(mode, charp, 0); |
132 | MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, " | 132 | MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " |
133 | "1 for active-backup, 2 for balance-xor, " | 133 | "1 for active-backup, 2 for balance-xor, " |
134 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " | 134 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " |
135 | "6 for balance-alb"); | 135 | "6 for balance-alb"); |
@@ -144,30 +144,41 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " | |||
144 | "2 for only on active slave " | 144 | "2 for only on active slave " |
145 | "failure"); | 145 | "failure"); |
146 | module_param(lacp_rate, charp, 0); | 146 | module_param(lacp_rate, charp, 0); |
147 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " | 147 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
148 | "(slow/fast)"); | 148 | "0 for slow, 1 for fast"); |
149 | module_param(ad_select, charp, 0); | 149 | module_param(ad_select, charp, 0); |
150 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)"); | 150 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " |
151 | "0 for stable (default), 1 for bandwidth, " | ||
152 | "2 for count"); | ||
151 | module_param(xmit_hash_policy, charp, 0); | 153 | module_param(xmit_hash_policy, charp, 0); |
152 | MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)" | 154 | MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " |
153 | ", 1 for layer 3+4"); | 155 | "0 for layer 2 (default), 1 for layer 3+4, " |
156 | "2 for layer 2+3"); | ||
154 | module_param(arp_interval, int, 0); | 157 | module_param(arp_interval, int, 0); |
155 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); | 158 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); |
156 | module_param_array(arp_ip_target, charp, NULL, 0); | 159 | module_param_array(arp_ip_target, charp, NULL, 0); |
157 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); | 160 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); |
158 | module_param(arp_validate, charp, 0); | 161 | module_param(arp_validate, charp, 0); |
159 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); | 162 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " |
163 | "0 for none (default), 1 for active, " | ||
164 | "2 for backup, 3 for all"); | ||
160 | module_param(fail_over_mac, charp, 0); | 165 | module_param(fail_over_mac, charp, 0); |
161 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); | 166 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " |
167 | "the same MAC; 0 for none (default), " | ||
168 | "1 for active, 2 for follow"); | ||
162 | module_param(all_slaves_active, int, 0); | 169 | module_param(all_slaves_active, int, 0); |
163 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | 170 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" |
164 | "by setting active flag for all slaves. " | 171 | "by setting active flag for all slaves; " |
165 | "0 for never (default), 1 for always."); | 172 | "0 for never (default), 1 for always."); |
173 | module_param(resend_igmp, int, 0); | ||
174 | MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " | ||
175 | "link failure"); | ||
166 | 176 | ||
167 | /*----------------------------- Global variables ----------------------------*/ | 177 | /*----------------------------- Global variables ----------------------------*/ |
168 | 178 | ||
169 | static const char * const version = | 179 | #ifdef CONFIG_NET_POLL_CONTROLLER |
170 | DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; | 180 | atomic_t netpoll_block_tx = ATOMIC_INIT(0); |
181 | #endif | ||
171 | 182 | ||
172 | int bond_net_id __read_mostly; | 183 | int bond_net_id __read_mostly; |
173 | 184 | ||
@@ -176,9 +187,6 @@ static int arp_ip_count; | |||
176 | static int bond_mode = BOND_MODE_ROUNDROBIN; | 187 | static int bond_mode = BOND_MODE_ROUNDROBIN; |
177 | static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; | 188 | static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; |
178 | static int lacp_fast; | 189 | static int lacp_fast; |
179 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
180 | static int disable_netpoll = 1; | ||
181 | #endif | ||
182 | 190 | ||
183 | const struct bond_parm_tbl bond_lacp_tbl[] = { | 191 | const struct bond_parm_tbl bond_lacp_tbl[] = { |
184 | { "slow", AD_LACP_SLOW}, | 192 | { "slow", AD_LACP_SLOW}, |
@@ -235,13 +243,12 @@ struct bond_parm_tbl ad_select_tbl[] = { | |||
235 | 243 | ||
236 | /*-------------------------- Forward declarations ---------------------------*/ | 244 | /*-------------------------- Forward declarations ---------------------------*/ |
237 | 245 | ||
238 | static void bond_send_gratuitous_arp(struct bonding *bond); | ||
239 | static int bond_init(struct net_device *bond_dev); | 246 | static int bond_init(struct net_device *bond_dev); |
240 | static void bond_uninit(struct net_device *bond_dev); | 247 | static void bond_uninit(struct net_device *bond_dev); |
241 | 248 | ||
242 | /*---------------------------- General routines -----------------------------*/ | 249 | /*---------------------------- General routines -----------------------------*/ |
243 | 250 | ||
244 | static const char *bond_mode_name(int mode) | 251 | const char *bond_mode_name(int mode) |
245 | { | 252 | { |
246 | static const char *names[] = { | 253 | static const char *names[] = { |
247 | [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", | 254 | [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", |
@@ -307,6 +314,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) | |||
307 | 314 | ||
308 | pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); | 315 | pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); |
309 | 316 | ||
317 | block_netpoll_tx(); | ||
310 | write_lock_bh(&bond->lock); | 318 | write_lock_bh(&bond->lock); |
311 | 319 | ||
312 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | 320 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { |
@@ -341,36 +349,11 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) | |||
341 | 349 | ||
342 | out: | 350 | out: |
343 | write_unlock_bh(&bond->lock); | 351 | write_unlock_bh(&bond->lock); |
352 | unblock_netpoll_tx(); | ||
344 | return res; | 353 | return res; |
345 | } | 354 | } |
346 | 355 | ||
347 | /** | 356 | /** |
348 | * bond_has_challenged_slaves | ||
349 | * @bond: the bond we're working on | ||
350 | * | ||
351 | * Searches the slave list. Returns 1 if a vlan challenged slave | ||
352 | * was found, 0 otherwise. | ||
353 | * | ||
354 | * Assumes bond->lock is held. | ||
355 | */ | ||
356 | static int bond_has_challenged_slaves(struct bonding *bond) | ||
357 | { | ||
358 | struct slave *slave; | ||
359 | int i; | ||
360 | |||
361 | bond_for_each_slave(bond, slave, i) { | ||
362 | if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) { | ||
363 | pr_debug("found VLAN challenged slave - %s\n", | ||
364 | slave->dev->name); | ||
365 | return 1; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | pr_debug("no VLAN challenged slaves found\n"); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * bond_next_vlan - safely skip to the next item in the vlans list. | 357 | * bond_next_vlan - safely skip to the next item in the vlans list. |
375 | * @bond: the bond we're working on | 358 | * @bond: the bond we're working on |
376 | * @curr: item we're advancing from | 359 | * @curr: item we're advancing from |
@@ -405,54 +388,26 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) | |||
405 | return next; | 388 | return next; |
406 | } | 389 | } |
407 | 390 | ||
391 | #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) | ||
392 | |||
408 | /** | 393 | /** |
409 | * bond_dev_queue_xmit - Prepare skb for xmit. | 394 | * bond_dev_queue_xmit - Prepare skb for xmit. |
410 | * | 395 | * |
411 | * @bond: bond device that got this skb for tx. | 396 | * @bond: bond device that got this skb for tx. |
412 | * @skb: hw accel VLAN tagged skb to transmit | 397 | * @skb: hw accel VLAN tagged skb to transmit |
413 | * @slave_dev: slave that is supposed to xmit this skbuff | 398 | * @slave_dev: slave that is supposed to xmit this skbuff |
414 | * | ||
415 | * When the bond gets an skb to transmit that is | ||
416 | * already hardware accelerated VLAN tagged, and it | ||
417 | * needs to relay this skb to a slave that is not | ||
418 | * hw accel capable, the skb needs to be "unaccelerated", | ||
419 | * i.e. strip the hwaccel tag and re-insert it as part | ||
420 | * of the payload. | ||
421 | */ | 399 | */ |
422 | int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, | 400 | int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, |
423 | struct net_device *slave_dev) | 401 | struct net_device *slave_dev) |
424 | { | 402 | { |
425 | unsigned short uninitialized_var(vlan_id); | 403 | skb->dev = slave_dev; |
404 | skb->priority = 1; | ||
426 | 405 | ||
427 | /* Test vlan_list not vlgrp to catch and handle 802.1p tags */ | 406 | skb->queue_mapping = bond_queue_mapping(skb); |
428 | if (!list_empty(&bond->vlan_list) && | ||
429 | !(slave_dev->features & NETIF_F_HW_VLAN_TX) && | ||
430 | vlan_get_tag(skb, &vlan_id) == 0) { | ||
431 | skb->dev = slave_dev; | ||
432 | skb = vlan_put_tag(skb, vlan_id); | ||
433 | if (!skb) { | ||
434 | /* vlan_put_tag() frees the skb in case of error, | ||
435 | * so return success here so the calling functions | ||
436 | * won't attempt to free is again. | ||
437 | */ | ||
438 | return 0; | ||
439 | } | ||
440 | } else { | ||
441 | skb->dev = slave_dev; | ||
442 | } | ||
443 | 407 | ||
444 | skb->priority = 1; | 408 | if (unlikely(netpoll_tx_running(slave_dev))) |
445 | #ifdef CONFIG_NET_POLL_CONTROLLER | 409 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); |
446 | if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { | 410 | else |
447 | struct netpoll *np = bond->dev->npinfo->netpoll; | ||
448 | slave_dev->npinfo = bond->dev->npinfo; | ||
449 | np->real_dev = np->dev = skb->dev; | ||
450 | slave_dev->priv_flags |= IFF_IN_NETPOLL; | ||
451 | netpoll_send_skb(np, skb); | ||
452 | slave_dev->priv_flags &= ~IFF_IN_NETPOLL; | ||
453 | np->dev = bond->dev; | ||
454 | } else | ||
455 | #endif | ||
456 | dev_queue_xmit(skb); | 411 | dev_queue_xmit(skb); |
457 | 412 | ||
458 | return 0; | 413 | return 0; |
@@ -488,9 +443,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev, | |||
488 | struct slave *slave; | 443 | struct slave *slave; |
489 | int i; | 444 | int i; |
490 | 445 | ||
491 | write_lock(&bond->lock); | 446 | write_lock_bh(&bond->lock); |
492 | bond->vlgrp = grp; | 447 | bond->vlgrp = grp; |
493 | write_unlock(&bond->lock); | 448 | write_unlock_bh(&bond->lock); |
494 | 449 | ||
495 | bond_for_each_slave(bond, slave, i) { | 450 | bond_for_each_slave(bond, slave, i) { |
496 | struct net_device *slave_dev = slave->dev; | 451 | struct net_device *slave_dev = slave->dev; |
@@ -663,7 +618,8 @@ down: | |||
663 | static int bond_update_speed_duplex(struct slave *slave) | 618 | static int bond_update_speed_duplex(struct slave *slave) |
664 | { | 619 | { |
665 | struct net_device *slave_dev = slave->dev; | 620 | struct net_device *slave_dev = slave->dev; |
666 | struct ethtool_cmd etool; | 621 | struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET }; |
622 | u32 slave_speed; | ||
667 | int res; | 623 | int res; |
668 | 624 | ||
669 | /* Fake speed and duplex */ | 625 | /* Fake speed and duplex */ |
@@ -677,7 +633,8 @@ static int bond_update_speed_duplex(struct slave *slave) | |||
677 | if (res < 0) | 633 | if (res < 0) |
678 | return -1; | 634 | return -1; |
679 | 635 | ||
680 | switch (etool.speed) { | 636 | slave_speed = ethtool_cmd_speed(&etool); |
637 | switch (slave_speed) { | ||
681 | case SPEED_10: | 638 | case SPEED_10: |
682 | case SPEED_100: | 639 | case SPEED_100: |
683 | case SPEED_1000: | 640 | case SPEED_1000: |
@@ -695,7 +652,7 @@ static int bond_update_speed_duplex(struct slave *slave) | |||
695 | return -1; | 652 | return -1; |
696 | } | 653 | } |
697 | 654 | ||
698 | slave->speed = etool.speed; | 655 | slave->speed = slave_speed; |
699 | slave->duplex = etool.duplex; | 656 | slave->duplex = etool.duplex; |
700 | 657 | ||
701 | return 0; | 658 | return 0; |
@@ -865,6 +822,17 @@ static void bond_mc_del(struct bonding *bond, void *addr) | |||
865 | } | 822 | } |
866 | 823 | ||
867 | 824 | ||
825 | static void __bond_resend_igmp_join_requests(struct net_device *dev) | ||
826 | { | ||
827 | struct in_device *in_dev; | ||
828 | |||
829 | rcu_read_lock(); | ||
830 | in_dev = __in_dev_get_rcu(dev); | ||
831 | if (in_dev) | ||
832 | ip_mc_rejoin_groups(in_dev); | ||
833 | rcu_read_unlock(); | ||
834 | } | ||
835 | |||
868 | /* | 836 | /* |
869 | * Retrieve the list of registered multicast addresses for the bonding | 837 | * Retrieve the list of registered multicast addresses for the bonding |
870 | * device and retransmit an IGMP JOIN request to the current active | 838 | * device and retransmit an IGMP JOIN request to the current active |
@@ -872,17 +840,35 @@ static void bond_mc_del(struct bonding *bond, void *addr) | |||
872 | */ | 840 | */ |
873 | static void bond_resend_igmp_join_requests(struct bonding *bond) | 841 | static void bond_resend_igmp_join_requests(struct bonding *bond) |
874 | { | 842 | { |
875 | struct in_device *in_dev; | 843 | struct net_device *vlan_dev; |
876 | struct ip_mc_list *im; | 844 | struct vlan_entry *vlan; |
877 | 845 | ||
878 | rcu_read_lock(); | 846 | read_lock(&bond->lock); |
879 | in_dev = __in_dev_get_rcu(bond->dev); | 847 | |
880 | if (in_dev) { | 848 | /* rejoin all groups on bond device */ |
881 | for (im = in_dev->mc_list; im; im = im->next) | 849 | __bond_resend_igmp_join_requests(bond->dev); |
882 | ip_mc_rejoin_group(im); | 850 | |
851 | /* rejoin all groups on vlan devices */ | ||
852 | if (bond->vlgrp) { | ||
853 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | ||
854 | vlan_dev = vlan_group_get_device(bond->vlgrp, | ||
855 | vlan->vlan_id); | ||
856 | if (vlan_dev) | ||
857 | __bond_resend_igmp_join_requests(vlan_dev); | ||
858 | } | ||
883 | } | 859 | } |
884 | 860 | ||
885 | rcu_read_unlock(); | 861 | if (--bond->igmp_retrans > 0) |
862 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | ||
863 | |||
864 | read_unlock(&bond->lock); | ||
865 | } | ||
866 | |||
867 | static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) | ||
868 | { | ||
869 | struct bonding *bond = container_of(work, struct bonding, | ||
870 | mcast_work.work); | ||
871 | bond_resend_igmp_join_requests(bond); | ||
886 | } | 872 | } |
887 | 873 | ||
888 | /* | 874 | /* |
@@ -944,7 +930,6 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, | |||
944 | 930 | ||
945 | netdev_for_each_mc_addr(ha, bond->dev) | 931 | netdev_for_each_mc_addr(ha, bond->dev) |
946 | dev_mc_add(new_active->dev, ha->addr); | 932 | dev_mc_add(new_active->dev, ha->addr); |
947 | bond_resend_igmp_join_requests(bond); | ||
948 | } | 933 | } |
949 | } | 934 | } |
950 | 935 | ||
@@ -1091,6 +1076,21 @@ static struct slave *bond_find_best_slave(struct bonding *bond) | |||
1091 | return bestslave; | 1076 | return bestslave; |
1092 | } | 1077 | } |
1093 | 1078 | ||
1079 | static bool bond_should_notify_peers(struct bonding *bond) | ||
1080 | { | ||
1081 | struct slave *slave = bond->curr_active_slave; | ||
1082 | |||
1083 | pr_debug("bond_should_notify_peers: bond %s slave %s\n", | ||
1084 | bond->dev->name, slave ? slave->dev->name : "NULL"); | ||
1085 | |||
1086 | if (!slave || !bond->send_peer_notif || | ||
1087 | test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) | ||
1088 | return false; | ||
1089 | |||
1090 | bond->send_peer_notif--; | ||
1091 | return true; | ||
1092 | } | ||
1093 | |||
1094 | /** | 1094 | /** |
1095 | * change_active_interface - change the active slave into the specified one | 1095 | * change_active_interface - change the active slave into the specified one |
1096 | * @bond: our bonding struct | 1096 | * @bond: our bonding struct |
@@ -1158,31 +1158,43 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
1158 | bond_set_slave_inactive_flags(old_active); | 1158 | bond_set_slave_inactive_flags(old_active); |
1159 | 1159 | ||
1160 | if (new_active) { | 1160 | if (new_active) { |
1161 | bool should_notify_peers = false; | ||
1162 | |||
1161 | bond_set_slave_active_flags(new_active); | 1163 | bond_set_slave_active_flags(new_active); |
1162 | 1164 | ||
1163 | if (bond->params.fail_over_mac) | 1165 | if (bond->params.fail_over_mac) |
1164 | bond_do_fail_over_mac(bond, new_active, | 1166 | bond_do_fail_over_mac(bond, new_active, |
1165 | old_active); | 1167 | old_active); |
1166 | 1168 | ||
1167 | bond->send_grat_arp = bond->params.num_grat_arp; | 1169 | if (netif_running(bond->dev)) { |
1168 | bond_send_gratuitous_arp(bond); | 1170 | bond->send_peer_notif = |
1169 | 1171 | bond->params.num_peer_notif; | |
1170 | bond->send_unsol_na = bond->params.num_unsol_na; | 1172 | should_notify_peers = |
1171 | bond_send_unsolicited_na(bond); | 1173 | bond_should_notify_peers(bond); |
1174 | } | ||
1172 | 1175 | ||
1173 | write_unlock_bh(&bond->curr_slave_lock); | 1176 | write_unlock_bh(&bond->curr_slave_lock); |
1174 | read_unlock(&bond->lock); | 1177 | read_unlock(&bond->lock); |
1175 | 1178 | ||
1176 | netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); | 1179 | netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); |
1180 | if (should_notify_peers) | ||
1181 | netdev_bonding_change(bond->dev, | ||
1182 | NETDEV_NOTIFY_PEERS); | ||
1177 | 1183 | ||
1178 | read_lock(&bond->lock); | 1184 | read_lock(&bond->lock); |
1179 | write_lock_bh(&bond->curr_slave_lock); | 1185 | write_lock_bh(&bond->curr_slave_lock); |
1180 | } | 1186 | } |
1181 | } | 1187 | } |
1182 | 1188 | ||
1183 | /* resend IGMP joins since all were sent on curr_active_slave */ | 1189 | /* resend IGMP joins since active slave has changed or |
1184 | if (bond->params.mode == BOND_MODE_ROUNDROBIN) { | 1190 | * all were sent on curr_active_slave. |
1185 | bond_resend_igmp_join_requests(bond); | 1191 | * resend only if bond is brought up with the affected |
1192 | * bonding modes and the retransmission is enabled */ | ||
1193 | if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && | ||
1194 | ((USES_PRIMARY(bond->params.mode) && new_active) || | ||
1195 | bond->params.mode == BOND_MODE_ROUNDROBIN)) { | ||
1196 | bond->igmp_retrans = bond->params.resend_igmp; | ||
1197 | queue_delayed_work(bond->wq, &bond->mcast_work, 0); | ||
1186 | } | 1198 | } |
1187 | } | 1199 | } |
1188 | 1200 | ||
@@ -1274,58 +1286,104 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave) | |||
1274 | } | 1286 | } |
1275 | 1287 | ||
1276 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1288 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1277 | /* | 1289 | static inline int slave_enable_netpoll(struct slave *slave) |
1278 | * You must hold read lock on bond->lock before calling this. | ||
1279 | */ | ||
1280 | static bool slaves_support_netpoll(struct net_device *bond_dev) | ||
1281 | { | 1290 | { |
1282 | struct bonding *bond = netdev_priv(bond_dev); | 1291 | struct netpoll *np; |
1283 | struct slave *slave; | 1292 | int err = 0; |
1284 | int i = 0; | ||
1285 | bool ret = true; | ||
1286 | 1293 | ||
1287 | bond_for_each_slave(bond, slave, i) { | 1294 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
1288 | if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || | 1295 | err = -ENOMEM; |
1289 | !slave->dev->netdev_ops->ndo_poll_controller) | 1296 | if (!np) |
1290 | ret = false; | 1297 | goto out; |
1298 | |||
1299 | np->dev = slave->dev; | ||
1300 | strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ); | ||
1301 | err = __netpoll_setup(np); | ||
1302 | if (err) { | ||
1303 | kfree(np); | ||
1304 | goto out; | ||
1291 | } | 1305 | } |
1292 | return i != 0 && ret; | 1306 | slave->np = np; |
1307 | out: | ||
1308 | return err; | ||
1309 | } | ||
1310 | static inline void slave_disable_netpoll(struct slave *slave) | ||
1311 | { | ||
1312 | struct netpoll *np = slave->np; | ||
1313 | |||
1314 | if (!np) | ||
1315 | return; | ||
1316 | |||
1317 | slave->np = NULL; | ||
1318 | synchronize_rcu_bh(); | ||
1319 | __netpoll_cleanup(np); | ||
1320 | kfree(np); | ||
1321 | } | ||
1322 | static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) | ||
1323 | { | ||
1324 | if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL) | ||
1325 | return false; | ||
1326 | if (!slave_dev->netdev_ops->ndo_poll_controller) | ||
1327 | return false; | ||
1328 | return true; | ||
1293 | } | 1329 | } |
1294 | 1330 | ||
1295 | static void bond_poll_controller(struct net_device *bond_dev) | 1331 | static void bond_poll_controller(struct net_device *bond_dev) |
1296 | { | 1332 | { |
1297 | struct net_device *dev = bond_dev->npinfo->netpoll->real_dev; | ||
1298 | if (dev != bond_dev) | ||
1299 | netpoll_poll_dev(dev); | ||
1300 | } | 1333 | } |
1301 | 1334 | ||
1335 | static void __bond_netpoll_cleanup(struct bonding *bond) | ||
1336 | { | ||
1337 | struct slave *slave; | ||
1338 | int i; | ||
1339 | |||
1340 | bond_for_each_slave(bond, slave, i) | ||
1341 | if (IS_UP(slave->dev)) | ||
1342 | slave_disable_netpoll(slave); | ||
1343 | } | ||
1302 | static void bond_netpoll_cleanup(struct net_device *bond_dev) | 1344 | static void bond_netpoll_cleanup(struct net_device *bond_dev) |
1303 | { | 1345 | { |
1304 | struct bonding *bond = netdev_priv(bond_dev); | 1346 | struct bonding *bond = netdev_priv(bond_dev); |
1347 | |||
1348 | read_lock(&bond->lock); | ||
1349 | __bond_netpoll_cleanup(bond); | ||
1350 | read_unlock(&bond->lock); | ||
1351 | } | ||
1352 | |||
1353 | static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) | ||
1354 | { | ||
1355 | struct bonding *bond = netdev_priv(dev); | ||
1305 | struct slave *slave; | 1356 | struct slave *slave; |
1306 | const struct net_device_ops *ops; | 1357 | int i, err = 0; |
1307 | int i; | ||
1308 | 1358 | ||
1309 | read_lock(&bond->lock); | 1359 | read_lock(&bond->lock); |
1310 | bond_dev->npinfo = NULL; | ||
1311 | bond_for_each_slave(bond, slave, i) { | 1360 | bond_for_each_slave(bond, slave, i) { |
1312 | if (slave->dev) { | 1361 | err = slave_enable_netpoll(slave); |
1313 | ops = slave->dev->netdev_ops; | 1362 | if (err) { |
1314 | if (ops->ndo_netpoll_cleanup) | 1363 | __bond_netpoll_cleanup(bond); |
1315 | ops->ndo_netpoll_cleanup(slave->dev); | 1364 | break; |
1316 | else | ||
1317 | slave->dev->npinfo = NULL; | ||
1318 | } | 1365 | } |
1319 | } | 1366 | } |
1320 | read_unlock(&bond->lock); | 1367 | read_unlock(&bond->lock); |
1368 | return err; | ||
1321 | } | 1369 | } |
1322 | 1370 | ||
1323 | #else | 1371 | static struct netpoll_info *bond_netpoll_info(struct bonding *bond) |
1372 | { | ||
1373 | return bond->dev->npinfo; | ||
1374 | } | ||
1324 | 1375 | ||
1376 | #else | ||
1377 | static inline int slave_enable_netpoll(struct slave *slave) | ||
1378 | { | ||
1379 | return 0; | ||
1380 | } | ||
1381 | static inline void slave_disable_netpoll(struct slave *slave) | ||
1382 | { | ||
1383 | } | ||
1325 | static void bond_netpoll_cleanup(struct net_device *bond_dev) | 1384 | static void bond_netpoll_cleanup(struct net_device *bond_dev) |
1326 | { | 1385 | { |
1327 | } | 1386 | } |
1328 | |||
1329 | #endif | 1387 | #endif |
1330 | 1388 | ||
1331 | /*---------------------------------- IOCTL ----------------------------------*/ | 1389 | /*---------------------------------- IOCTL ----------------------------------*/ |
@@ -1340,52 +1398,68 @@ static int bond_sethwaddr(struct net_device *bond_dev, | |||
1340 | return 0; | 1398 | return 0; |
1341 | } | 1399 | } |
1342 | 1400 | ||
1343 | #define BOND_VLAN_FEATURES \ | 1401 | static u32 bond_fix_features(struct net_device *dev, u32 features) |
1344 | (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \ | ||
1345 | NETIF_F_HW_VLAN_FILTER) | ||
1346 | |||
1347 | /* | ||
1348 | * Compute the common dev->feature set available to all slaves. Some | ||
1349 | * feature bits are managed elsewhere, so preserve those feature bits | ||
1350 | * on the master device. | ||
1351 | */ | ||
1352 | static int bond_compute_features(struct bonding *bond) | ||
1353 | { | 1402 | { |
1354 | struct slave *slave; | 1403 | struct slave *slave; |
1355 | struct net_device *bond_dev = bond->dev; | 1404 | struct bonding *bond = netdev_priv(dev); |
1356 | unsigned long features = bond_dev->features; | 1405 | u32 mask; |
1357 | unsigned long vlan_features = 0; | ||
1358 | unsigned short max_hard_header_len = max((u16)ETH_HLEN, | ||
1359 | bond_dev->hard_header_len); | ||
1360 | int i; | 1406 | int i; |
1361 | 1407 | ||
1362 | features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); | 1408 | read_lock(&bond->lock); |
1363 | features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; | ||
1364 | 1409 | ||
1365 | if (!bond->first_slave) | 1410 | if (!bond->first_slave) { |
1366 | goto done; | 1411 | /* Disable adding VLANs to empty bond. But why? --mq */ |
1412 | features |= NETIF_F_VLAN_CHALLENGED; | ||
1413 | goto out; | ||
1414 | } | ||
1367 | 1415 | ||
1416 | mask = features; | ||
1368 | features &= ~NETIF_F_ONE_FOR_ALL; | 1417 | features &= ~NETIF_F_ONE_FOR_ALL; |
1418 | features |= NETIF_F_ALL_FOR_ALL; | ||
1369 | 1419 | ||
1370 | vlan_features = bond->first_slave->dev->vlan_features; | ||
1371 | bond_for_each_slave(bond, slave, i) { | 1420 | bond_for_each_slave(bond, slave, i) { |
1372 | features = netdev_increment_features(features, | 1421 | features = netdev_increment_features(features, |
1373 | slave->dev->features, | 1422 | slave->dev->features, |
1374 | NETIF_F_ONE_FOR_ALL); | 1423 | mask); |
1424 | } | ||
1425 | |||
1426 | out: | ||
1427 | read_unlock(&bond->lock); | ||
1428 | return features; | ||
1429 | } | ||
1430 | |||
1431 | #define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ | ||
1432 | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ | ||
1433 | NETIF_F_HIGHDMA | NETIF_F_LRO) | ||
1434 | |||
1435 | static void bond_compute_features(struct bonding *bond) | ||
1436 | { | ||
1437 | struct slave *slave; | ||
1438 | struct net_device *bond_dev = bond->dev; | ||
1439 | u32 vlan_features = BOND_VLAN_FEATURES; | ||
1440 | unsigned short max_hard_header_len = ETH_HLEN; | ||
1441 | int i; | ||
1442 | |||
1443 | read_lock(&bond->lock); | ||
1444 | |||
1445 | if (!bond->first_slave) | ||
1446 | goto done; | ||
1447 | |||
1448 | bond_for_each_slave(bond, slave, i) { | ||
1375 | vlan_features = netdev_increment_features(vlan_features, | 1449 | vlan_features = netdev_increment_features(vlan_features, |
1376 | slave->dev->vlan_features, | 1450 | slave->dev->vlan_features, BOND_VLAN_FEATURES); |
1377 | NETIF_F_ONE_FOR_ALL); | 1451 | |
1378 | if (slave->dev->hard_header_len > max_hard_header_len) | 1452 | if (slave->dev->hard_header_len > max_hard_header_len) |
1379 | max_hard_header_len = slave->dev->hard_header_len; | 1453 | max_hard_header_len = slave->dev->hard_header_len; |
1380 | } | 1454 | } |
1381 | 1455 | ||
1382 | done: | 1456 | done: |
1383 | features |= (bond_dev->features & BOND_VLAN_FEATURES); | 1457 | bond_dev->vlan_features = vlan_features; |
1384 | bond_dev->features = netdev_fix_features(features, NULL); | ||
1385 | bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); | ||
1386 | bond_dev->hard_header_len = max_hard_header_len; | 1458 | bond_dev->hard_header_len = max_hard_header_len; |
1387 | 1459 | ||
1388 | return 0; | 1460 | read_unlock(&bond->lock); |
1461 | |||
1462 | netdev_change_features(bond_dev); | ||
1389 | } | 1463 | } |
1390 | 1464 | ||
1391 | static void bond_setup_by_slave(struct net_device *bond_dev, | 1465 | static void bond_setup_by_slave(struct net_device *bond_dev, |
@@ -1404,6 +1478,71 @@ static void bond_setup_by_slave(struct net_device *bond_dev, | |||
1404 | bond->setup_by_slave = 1; | 1478 | bond->setup_by_slave = 1; |
1405 | } | 1479 | } |
1406 | 1480 | ||
1481 | /* On bonding slaves other than the currently active slave, suppress | ||
1482 | * duplicates except for alb non-mcast/bcast. | ||
1483 | */ | ||
1484 | static bool bond_should_deliver_exact_match(struct sk_buff *skb, | ||
1485 | struct slave *slave, | ||
1486 | struct bonding *bond) | ||
1487 | { | ||
1488 | if (bond_is_slave_inactive(slave)) { | ||
1489 | if (bond->params.mode == BOND_MODE_ALB && | ||
1490 | skb->pkt_type != PACKET_BROADCAST && | ||
1491 | skb->pkt_type != PACKET_MULTICAST) | ||
1492 | return false; | ||
1493 | return true; | ||
1494 | } | ||
1495 | return false; | ||
1496 | } | ||
1497 | |||
1498 | static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | ||
1499 | { | ||
1500 | struct sk_buff *skb = *pskb; | ||
1501 | struct slave *slave; | ||
1502 | struct bonding *bond; | ||
1503 | |||
1504 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
1505 | if (unlikely(!skb)) | ||
1506 | return RX_HANDLER_CONSUMED; | ||
1507 | |||
1508 | *pskb = skb; | ||
1509 | |||
1510 | slave = bond_slave_get_rcu(skb->dev); | ||
1511 | bond = slave->bond; | ||
1512 | |||
1513 | if (bond->params.arp_interval) | ||
1514 | slave->dev->last_rx = jiffies; | ||
1515 | |||
1516 | if (bond->recv_probe) { | ||
1517 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | ||
1518 | |||
1519 | if (likely(nskb)) { | ||
1520 | bond->recv_probe(nskb, bond, slave); | ||
1521 | dev_kfree_skb(nskb); | ||
1522 | } | ||
1523 | } | ||
1524 | |||
1525 | if (bond_should_deliver_exact_match(skb, slave, bond)) { | ||
1526 | return RX_HANDLER_EXACT; | ||
1527 | } | ||
1528 | |||
1529 | skb->dev = bond->dev; | ||
1530 | |||
1531 | if (bond->params.mode == BOND_MODE_ALB && | ||
1532 | bond->dev->priv_flags & IFF_BRIDGE_PORT && | ||
1533 | skb->pkt_type == PACKET_HOST) { | ||
1534 | |||
1535 | if (unlikely(skb_cow_head(skb, | ||
1536 | skb->data - skb_mac_header(skb)))) { | ||
1537 | kfree_skb(skb); | ||
1538 | return RX_HANDLER_CONSUMED; | ||
1539 | } | ||
1540 | memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); | ||
1541 | } | ||
1542 | |||
1543 | return RX_HANDLER_ANOTHER; | ||
1544 | } | ||
1545 | |||
1407 | /* enslave device <slave> to bond device <master> */ | 1546 | /* enslave device <slave> to bond device <master> */ |
1408 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | 1547 | int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
1409 | { | 1548 | { |
@@ -1413,7 +1552,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1413 | struct netdev_hw_addr *ha; | 1552 | struct netdev_hw_addr *ha; |
1414 | struct sockaddr addr; | 1553 | struct sockaddr addr; |
1415 | int link_reporting; | 1554 | int link_reporting; |
1416 | int old_features = bond_dev->features; | ||
1417 | int res = 0; | 1555 | int res = 0; |
1418 | 1556 | ||
1419 | if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && | 1557 | if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && |
@@ -1422,12 +1560,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1422 | bond_dev->name, slave_dev->name); | 1560 | bond_dev->name, slave_dev->name); |
1423 | } | 1561 | } |
1424 | 1562 | ||
1425 | /* bond must be initialized by bond_open() before enslaving */ | ||
1426 | if (!(bond_dev->flags & IFF_UP)) { | ||
1427 | pr_warning("%s: master_dev is not up in bond_enslave\n", | ||
1428 | bond_dev->name); | ||
1429 | } | ||
1430 | |||
1431 | /* already enslaved */ | 1563 | /* already enslaved */ |
1432 | if (slave_dev->flags & IFF_SLAVE) { | 1564 | if (slave_dev->flags & IFF_SLAVE) { |
1433 | pr_debug("Error, Device was already enslaved\n"); | 1565 | pr_debug("Error, Device was already enslaved\n"); |
@@ -1446,16 +1578,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1446 | pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", | 1578 | pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", |
1447 | bond_dev->name, slave_dev->name, | 1579 | bond_dev->name, slave_dev->name, |
1448 | slave_dev->name, bond_dev->name); | 1580 | slave_dev->name, bond_dev->name); |
1449 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
1450 | } | 1581 | } |
1451 | } else { | 1582 | } else { |
1452 | pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); | 1583 | pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); |
1453 | if (bond->slave_cnt == 0) { | ||
1454 | /* First slave, and it is not VLAN challenged, | ||
1455 | * so remove the block of adding VLANs over the bond. | ||
1456 | */ | ||
1457 | bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; | ||
1458 | } | ||
1459 | } | 1584 | } |
1460 | 1585 | ||
1461 | /* | 1586 | /* |
@@ -1527,9 +1652,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1527 | } | 1652 | } |
1528 | } | 1653 | } |
1529 | 1654 | ||
1655 | call_netdevice_notifiers(NETDEV_JOIN, slave_dev); | ||
1656 | |||
1530 | /* If this is the first slave, then we need to set the master's hardware | 1657 | /* If this is the first slave, then we need to set the master's hardware |
1531 | * address to be the same as the slave's. */ | 1658 | * address to be the same as the slave's. */ |
1532 | if (bond->slave_cnt == 0) | 1659 | if (is_zero_ether_addr(bond->dev->dev_addr)) |
1533 | memcpy(bond->dev->dev_addr, slave_dev->dev_addr, | 1660 | memcpy(bond->dev->dev_addr, slave_dev->dev_addr, |
1534 | slave_dev->addr_len); | 1661 | slave_dev->addr_len); |
1535 | 1662 | ||
@@ -1575,11 +1702,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1575 | } | 1702 | } |
1576 | } | 1703 | } |
1577 | 1704 | ||
1578 | res = netdev_set_master(slave_dev, bond_dev); | 1705 | res = netdev_set_bond_master(slave_dev, bond_dev); |
1579 | if (res) { | 1706 | if (res) { |
1580 | pr_debug("Error %d calling netdev_set_master\n", res); | 1707 | pr_debug("Error %d calling netdev_set_bond_master\n", res); |
1581 | goto err_restore_mac; | 1708 | goto err_restore_mac; |
1582 | } | 1709 | } |
1710 | |||
1583 | /* open the slave since the application closed it */ | 1711 | /* open the slave since the application closed it */ |
1584 | res = dev_open(slave_dev); | 1712 | res = dev_open(slave_dev); |
1585 | if (res) { | 1713 | if (res) { |
@@ -1587,6 +1715,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1587 | goto err_unset_master; | 1715 | goto err_unset_master; |
1588 | } | 1716 | } |
1589 | 1717 | ||
1718 | new_slave->bond = bond; | ||
1590 | new_slave->dev = slave_dev; | 1719 | new_slave->dev = slave_dev; |
1591 | slave_dev->priv_flags |= IFF_BONDING; | 1720 | slave_dev->priv_flags |= IFF_BONDING; |
1592 | 1721 | ||
@@ -1642,10 +1771,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1642 | new_slave->delay = 0; | 1771 | new_slave->delay = 0; |
1643 | new_slave->link_failure_count = 0; | 1772 | new_slave->link_failure_count = 0; |
1644 | 1773 | ||
1645 | bond_compute_features(bond); | ||
1646 | |||
1647 | write_unlock_bh(&bond->lock); | 1774 | write_unlock_bh(&bond->lock); |
1648 | 1775 | ||
1776 | bond_compute_features(bond); | ||
1777 | |||
1649 | read_lock(&bond->lock); | 1778 | read_lock(&bond->lock); |
1650 | 1779 | ||
1651 | new_slave->last_arp_rx = jiffies; | 1780 | new_slave->last_arp_rx = jiffies; |
@@ -1738,7 +1867,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1738 | break; | 1867 | break; |
1739 | case BOND_MODE_TLB: | 1868 | case BOND_MODE_TLB: |
1740 | case BOND_MODE_ALB: | 1869 | case BOND_MODE_ALB: |
1741 | new_slave->state = BOND_STATE_ACTIVE; | 1870 | bond_set_active_slave(new_slave); |
1742 | bond_set_slave_inactive_flags(new_slave); | 1871 | bond_set_slave_inactive_flags(new_slave); |
1743 | bond_select_active_slave(bond); | 1872 | bond_select_active_slave(bond); |
1744 | break; | 1873 | break; |
@@ -1746,7 +1875,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1746 | pr_debug("This slave is always active in trunk mode\n"); | 1875 | pr_debug("This slave is always active in trunk mode\n"); |
1747 | 1876 | ||
1748 | /* always active in trunk mode */ | 1877 | /* always active in trunk mode */ |
1749 | new_slave->state = BOND_STATE_ACTIVE; | 1878 | bond_set_active_slave(new_slave); |
1750 | 1879 | ||
1751 | /* In trunking mode there is little meaning to curr_active_slave | 1880 | /* In trunking mode there is little meaning to curr_active_slave |
1752 | * anyway (it holds no special properties of the bond device), | 1881 | * anyway (it holds no special properties of the bond device), |
@@ -1763,45 +1892,49 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1763 | bond_set_carrier(bond); | 1892 | bond_set_carrier(bond); |
1764 | 1893 | ||
1765 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1894 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1766 | /* | 1895 | slave_dev->npinfo = bond_netpoll_info(bond); |
1767 | * Netpoll and bonding is broken, make sure it is not initialized | 1896 | if (slave_dev->npinfo) { |
1768 | * until it is fixed. | 1897 | if (slave_enable_netpoll(new_slave)) { |
1769 | */ | 1898 | read_unlock(&bond->lock); |
1770 | if (disable_netpoll) { | 1899 | pr_info("Error, %s: master_dev is using netpoll, " |
1771 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | 1900 | "but new slave device does not support netpoll.\n", |
1772 | } else { | 1901 | bond_dev->name); |
1773 | if (slaves_support_netpoll(bond_dev)) { | 1902 | res = -EBUSY; |
1774 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | 1903 | goto err_close; |
1775 | if (bond_dev->npinfo) | ||
1776 | slave_dev->npinfo = bond_dev->npinfo; | ||
1777 | } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { | ||
1778 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
1779 | pr_info("New slave device %s does not support netpoll\n", | ||
1780 | slave_dev->name); | ||
1781 | pr_info("Disabling netpoll support for %s\n", bond_dev->name); | ||
1782 | } | 1904 | } |
1783 | } | 1905 | } |
1784 | #endif | 1906 | #endif |
1907 | |||
1785 | read_unlock(&bond->lock); | 1908 | read_unlock(&bond->lock); |
1786 | 1909 | ||
1787 | res = bond_create_slave_symlinks(bond_dev, slave_dev); | 1910 | res = bond_create_slave_symlinks(bond_dev, slave_dev); |
1788 | if (res) | 1911 | if (res) |
1789 | goto err_close; | 1912 | goto err_close; |
1790 | 1913 | ||
1914 | res = netdev_rx_handler_register(slave_dev, bond_handle_frame, | ||
1915 | new_slave); | ||
1916 | if (res) { | ||
1917 | pr_debug("Error %d calling netdev_rx_handler_register\n", res); | ||
1918 | goto err_dest_symlinks; | ||
1919 | } | ||
1920 | |||
1791 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", | 1921 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", |
1792 | bond_dev->name, slave_dev->name, | 1922 | bond_dev->name, slave_dev->name, |
1793 | new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup", | 1923 | bond_is_active_slave(new_slave) ? "n active" : " backup", |
1794 | new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); | 1924 | new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); |
1795 | 1925 | ||
1796 | /* enslave is successful */ | 1926 | /* enslave is successful */ |
1797 | return 0; | 1927 | return 0; |
1798 | 1928 | ||
1799 | /* Undo stages on error */ | 1929 | /* Undo stages on error */ |
1930 | err_dest_symlinks: | ||
1931 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | ||
1932 | |||
1800 | err_close: | 1933 | err_close: |
1801 | dev_close(slave_dev); | 1934 | dev_close(slave_dev); |
1802 | 1935 | ||
1803 | err_unset_master: | 1936 | err_unset_master: |
1804 | netdev_set_master(slave_dev, NULL); | 1937 | netdev_set_bond_master(slave_dev, NULL); |
1805 | 1938 | ||
1806 | err_restore_mac: | 1939 | err_restore_mac: |
1807 | if (!bond->params.fail_over_mac) { | 1940 | if (!bond->params.fail_over_mac) { |
@@ -1821,7 +1954,7 @@ err_free: | |||
1821 | kfree(new_slave); | 1954 | kfree(new_slave); |
1822 | 1955 | ||
1823 | err_undo_flags: | 1956 | err_undo_flags: |
1824 | bond_dev->features = old_features; | 1957 | bond_compute_features(bond); |
1825 | 1958 | ||
1826 | return res; | 1959 | return res; |
1827 | } | 1960 | } |
@@ -1842,6 +1975,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1842 | struct bonding *bond = netdev_priv(bond_dev); | 1975 | struct bonding *bond = netdev_priv(bond_dev); |
1843 | struct slave *slave, *oldcurrent; | 1976 | struct slave *slave, *oldcurrent; |
1844 | struct sockaddr addr; | 1977 | struct sockaddr addr; |
1978 | u32 old_features = bond_dev->features; | ||
1845 | 1979 | ||
1846 | /* slave is not a slave or master is not master of this slave */ | 1980 | /* slave is not a slave or master is not master of this slave */ |
1847 | if (!(slave_dev->flags & IFF_SLAVE) || | 1981 | if (!(slave_dev->flags & IFF_SLAVE) || |
@@ -1851,7 +1985,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1851 | return -EINVAL; | 1985 | return -EINVAL; |
1852 | } | 1986 | } |
1853 | 1987 | ||
1854 | netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE); | 1988 | block_netpoll_tx(); |
1989 | netdev_bonding_change(bond_dev, NETDEV_RELEASE); | ||
1855 | write_lock_bh(&bond->lock); | 1990 | write_lock_bh(&bond->lock); |
1856 | 1991 | ||
1857 | slave = bond_get_slave_by_dev(bond, slave_dev); | 1992 | slave = bond_get_slave_by_dev(bond, slave_dev); |
@@ -1860,9 +1995,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1860 | pr_info("%s: %s not enslaved\n", | 1995 | pr_info("%s: %s not enslaved\n", |
1861 | bond_dev->name, slave_dev->name); | 1996 | bond_dev->name, slave_dev->name); |
1862 | write_unlock_bh(&bond->lock); | 1997 | write_unlock_bh(&bond->lock); |
1998 | unblock_netpoll_tx(); | ||
1863 | return -EINVAL; | 1999 | return -EINVAL; |
1864 | } | 2000 | } |
1865 | 2001 | ||
2002 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | ||
2003 | * for this slave anymore. | ||
2004 | */ | ||
2005 | netdev_rx_handler_unregister(slave_dev); | ||
2006 | write_unlock_bh(&bond->lock); | ||
2007 | synchronize_net(); | ||
2008 | write_lock_bh(&bond->lock); | ||
2009 | |||
1866 | if (!bond->params.fail_over_mac) { | 2010 | if (!bond->params.fail_over_mac) { |
1867 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && | 2011 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && |
1868 | bond->slave_cnt > 1) | 2012 | bond->slave_cnt > 1) |
@@ -1882,7 +2026,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1882 | 2026 | ||
1883 | pr_info("%s: releasing %s interface %s\n", | 2027 | pr_info("%s: releasing %s interface %s\n", |
1884 | bond_dev->name, | 2028 | bond_dev->name, |
1885 | (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup", | 2029 | bond_is_active_slave(slave) ? "active" : "backup", |
1886 | slave_dev->name); | 2030 | slave_dev->name); |
1887 | 2031 | ||
1888 | oldcurrent = bond->curr_active_slave; | 2032 | oldcurrent = bond->curr_active_slave; |
@@ -1892,8 +2036,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1892 | /* release the slave from its bond */ | 2036 | /* release the slave from its bond */ |
1893 | bond_detach_slave(bond, slave); | 2037 | bond_detach_slave(bond, slave); |
1894 | 2038 | ||
1895 | bond_compute_features(bond); | ||
1896 | |||
1897 | if (bond->primary_slave == slave) | 2039 | if (bond->primary_slave == slave) |
1898 | bond->primary_slave = NULL; | 2040 | bond->primary_slave = NULL; |
1899 | 2041 | ||
@@ -1937,22 +2079,22 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1937 | */ | 2079 | */ |
1938 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2080 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
1939 | 2081 | ||
1940 | if (!bond->vlgrp) { | 2082 | if (bond->vlgrp) { |
1941 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
1942 | } else { | ||
1943 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2083 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
1944 | bond_dev->name, bond_dev->name); | 2084 | bond_dev->name, bond_dev->name); |
1945 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2085 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
1946 | bond_dev->name); | 2086 | bond_dev->name); |
1947 | } | 2087 | } |
1948 | } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | ||
1949 | !bond_has_challenged_slaves(bond)) { | ||
1950 | pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", | ||
1951 | bond_dev->name, slave_dev->name, bond_dev->name); | ||
1952 | bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; | ||
1953 | } | 2088 | } |
1954 | 2089 | ||
1955 | write_unlock_bh(&bond->lock); | 2090 | write_unlock_bh(&bond->lock); |
2091 | unblock_netpoll_tx(); | ||
2092 | |||
2093 | bond_compute_features(bond); | ||
2094 | if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | ||
2095 | (old_features & NETIF_F_VLAN_CHALLENGED)) | ||
2096 | pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", | ||
2097 | bond_dev->name, slave_dev->name, bond_dev->name); | ||
1956 | 2098 | ||
1957 | /* must do this from outside any spinlocks */ | 2099 | /* must do this from outside any spinlocks */ |
1958 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 2100 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
@@ -1978,21 +2120,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1978 | netif_addr_unlock_bh(bond_dev); | 2120 | netif_addr_unlock_bh(bond_dev); |
1979 | } | 2121 | } |
1980 | 2122 | ||
1981 | netdev_set_master(slave_dev, NULL); | 2123 | netdev_set_bond_master(slave_dev, NULL); |
1982 | 2124 | ||
1983 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2125 | slave_disable_netpoll(slave); |
1984 | read_lock_bh(&bond->lock); | ||
1985 | |||
1986 | /* Make sure netpoll over stays disabled until fixed. */ | ||
1987 | if (!disable_netpoll) | ||
1988 | if (slaves_support_netpoll(bond_dev)) | ||
1989 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
1990 | read_unlock_bh(&bond->lock); | ||
1991 | if (slave_dev->netdev_ops->ndo_netpoll_cleanup) | ||
1992 | slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); | ||
1993 | else | ||
1994 | slave_dev->npinfo = NULL; | ||
1995 | #endif | ||
1996 | 2126 | ||
1997 | /* close slave before restoring its mac address */ | 2127 | /* close slave before restoring its mac address */ |
1998 | dev_close(slave_dev); | 2128 | dev_close(slave_dev); |
@@ -2006,9 +2136,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2006 | 2136 | ||
2007 | dev_set_mtu(slave_dev, slave->original_mtu); | 2137 | dev_set_mtu(slave_dev, slave->original_mtu); |
2008 | 2138 | ||
2009 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | | 2139 | slave_dev->priv_flags &= ~IFF_BONDING; |
2010 | IFF_SLAVE_INACTIVE | IFF_BONDING | | ||
2011 | IFF_SLAVE_NEEDARP); | ||
2012 | 2140 | ||
2013 | kfree(slave); | 2141 | kfree(slave); |
2014 | 2142 | ||
@@ -2016,17 +2144,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2016 | } | 2144 | } |
2017 | 2145 | ||
2018 | /* | 2146 | /* |
2019 | * First release a slave and than destroy the bond if no more slaves are left. | 2147 | * First release a slave and then destroy the bond if no more slaves are left. |
2020 | * Must be under rtnl_lock when this function is called. | 2148 | * Must be under rtnl_lock when this function is called. |
2021 | */ | 2149 | */ |
2022 | int bond_release_and_destroy(struct net_device *bond_dev, | 2150 | static int bond_release_and_destroy(struct net_device *bond_dev, |
2023 | struct net_device *slave_dev) | 2151 | struct net_device *slave_dev) |
2024 | { | 2152 | { |
2025 | struct bonding *bond = netdev_priv(bond_dev); | 2153 | struct bonding *bond = netdev_priv(bond_dev); |
2026 | int ret; | 2154 | int ret; |
2027 | 2155 | ||
2028 | ret = bond_release(bond_dev, slave_dev); | 2156 | ret = bond_release(bond_dev, slave_dev); |
2029 | if ((ret == 0) && (bond->slave_cnt == 0)) { | 2157 | if ((ret == 0) && (bond->slave_cnt == 0)) { |
2158 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
2030 | pr_info("%s: destroying bond %s.\n", | 2159 | pr_info("%s: destroying bond %s.\n", |
2031 | bond_dev->name, bond_dev->name); | 2160 | bond_dev->name, bond_dev->name); |
2032 | unregister_netdevice(bond_dev); | 2161 | unregister_netdevice(bond_dev); |
@@ -2071,6 +2200,12 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2071 | */ | 2200 | */ |
2072 | write_unlock_bh(&bond->lock); | 2201 | write_unlock_bh(&bond->lock); |
2073 | 2202 | ||
2203 | /* unregister rx_handler early so bond_handle_frame wouldn't | ||
2204 | * be called for this slave anymore. | ||
2205 | */ | ||
2206 | netdev_rx_handler_unregister(slave_dev); | ||
2207 | synchronize_net(); | ||
2208 | |||
2074 | if (bond_is_lb(bond)) { | 2209 | if (bond_is_lb(bond)) { |
2075 | /* must be called only after the slave | 2210 | /* must be called only after the slave |
2076 | * has been detached from the list | 2211 | * has been detached from the list |
@@ -2078,8 +2213,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2078 | bond_alb_deinit_slave(bond, slave); | 2213 | bond_alb_deinit_slave(bond, slave); |
2079 | } | 2214 | } |
2080 | 2215 | ||
2081 | bond_compute_features(bond); | ||
2082 | |||
2083 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 2216 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
2084 | bond_del_vlans_from_slave(bond, slave_dev); | 2217 | bond_del_vlans_from_slave(bond, slave_dev); |
2085 | 2218 | ||
@@ -2102,7 +2235,9 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2102 | netif_addr_unlock_bh(bond_dev); | 2235 | netif_addr_unlock_bh(bond_dev); |
2103 | } | 2236 | } |
2104 | 2237 | ||
2105 | netdev_set_master(slave_dev, NULL); | 2238 | netdev_set_bond_master(slave_dev, NULL); |
2239 | |||
2240 | slave_disable_netpoll(slave); | ||
2106 | 2241 | ||
2107 | /* close slave before restoring its mac address */ | 2242 | /* close slave before restoring its mac address */ |
2108 | dev_close(slave_dev); | 2243 | dev_close(slave_dev); |
@@ -2114,9 +2249,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2114 | dev_set_mac_address(slave_dev, &addr); | 2249 | dev_set_mac_address(slave_dev, &addr); |
2115 | } | 2250 | } |
2116 | 2251 | ||
2117 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | | ||
2118 | IFF_SLAVE_INACTIVE); | ||
2119 | |||
2120 | kfree(slave); | 2252 | kfree(slave); |
2121 | 2253 | ||
2122 | /* re-acquire the lock before getting the next slave */ | 2254 | /* re-acquire the lock before getting the next slave */ |
@@ -2129,9 +2261,7 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2129 | */ | 2261 | */ |
2130 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2262 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
2131 | 2263 | ||
2132 | if (!bond->vlgrp) { | 2264 | if (bond->vlgrp) { |
2133 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
2134 | } else { | ||
2135 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2265 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
2136 | bond_dev->name, bond_dev->name); | 2266 | bond_dev->name, bond_dev->name); |
2137 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2267 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
@@ -2143,6 +2273,8 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2143 | out: | 2273 | out: |
2144 | write_unlock_bh(&bond->lock); | 2274 | write_unlock_bh(&bond->lock); |
2145 | 2275 | ||
2276 | bond_compute_features(bond); | ||
2277 | |||
2146 | return 0; | 2278 | return 0; |
2147 | } | 2279 | } |
2148 | 2280 | ||
@@ -2191,9 +2323,11 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi | |||
2191 | (old_active) && | 2323 | (old_active) && |
2192 | (new_active->link == BOND_LINK_UP) && | 2324 | (new_active->link == BOND_LINK_UP) && |
2193 | IS_UP(new_active->dev)) { | 2325 | IS_UP(new_active->dev)) { |
2326 | block_netpoll_tx(); | ||
2194 | write_lock_bh(&bond->curr_slave_lock); | 2327 | write_lock_bh(&bond->curr_slave_lock); |
2195 | bond_change_active_slave(bond, new_active); | 2328 | bond_change_active_slave(bond, new_active); |
2196 | write_unlock_bh(&bond->curr_slave_lock); | 2329 | write_unlock_bh(&bond->curr_slave_lock); |
2330 | unblock_netpoll_tx(); | ||
2197 | } else | 2331 | } else |
2198 | res = -EINVAL; | 2332 | res = -EINVAL; |
2199 | 2333 | ||
@@ -2229,7 +2363,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in | |||
2229 | res = 0; | 2363 | res = 0; |
2230 | strcpy(info->slave_name, slave->dev->name); | 2364 | strcpy(info->slave_name, slave->dev->name); |
2231 | info->link = slave->link; | 2365 | info->link = slave->link; |
2232 | info->state = slave->state; | 2366 | info->state = bond_slave_state(slave); |
2233 | info->link_failure_count = slave->link_failure_count; | 2367 | info->link_failure_count = slave->link_failure_count; |
2234 | break; | 2368 | break; |
2235 | } | 2369 | } |
@@ -2268,7 +2402,7 @@ static int bond_miimon_inspect(struct bonding *bond) | |||
2268 | bond->dev->name, | 2402 | bond->dev->name, |
2269 | (bond->params.mode == | 2403 | (bond->params.mode == |
2270 | BOND_MODE_ACTIVEBACKUP) ? | 2404 | BOND_MODE_ACTIVEBACKUP) ? |
2271 | ((slave->state == BOND_STATE_ACTIVE) ? | 2405 | (bond_is_active_slave(slave) ? |
2272 | "active " : "backup ") : "", | 2406 | "active " : "backup ") : "", |
2273 | slave->dev->name, | 2407 | slave->dev->name, |
2274 | bond->params.downdelay * bond->params.miimon); | 2408 | bond->params.downdelay * bond->params.miimon); |
@@ -2359,17 +2493,20 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2359 | 2493 | ||
2360 | if (bond->params.mode == BOND_MODE_8023AD) { | 2494 | if (bond->params.mode == BOND_MODE_8023AD) { |
2361 | /* prevent it from being the active one */ | 2495 | /* prevent it from being the active one */ |
2362 | slave->state = BOND_STATE_BACKUP; | 2496 | bond_set_backup_slave(slave); |
2363 | } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { | 2497 | } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { |
2364 | /* make it immediately active */ | 2498 | /* make it immediately active */ |
2365 | slave->state = BOND_STATE_ACTIVE; | 2499 | bond_set_active_slave(slave); |
2366 | } else if (slave != bond->primary_slave) { | 2500 | } else if (slave != bond->primary_slave) { |
2367 | /* prevent it from being the active one */ | 2501 | /* prevent it from being the active one */ |
2368 | slave->state = BOND_STATE_BACKUP; | 2502 | bond_set_backup_slave(slave); |
2369 | } | 2503 | } |
2370 | 2504 | ||
2371 | pr_info("%s: link status definitely up for interface %s.\n", | 2505 | bond_update_speed_duplex(slave); |
2372 | bond->dev->name, slave->dev->name); | 2506 | |
2507 | pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", | ||
2508 | bond->dev->name, slave->dev->name, | ||
2509 | slave->speed, slave->duplex ? "full" : "half"); | ||
2373 | 2510 | ||
2374 | /* notify ad that the link status has changed */ | 2511 | /* notify ad that the link status has changed */ |
2375 | if (bond->params.mode == BOND_MODE_8023AD) | 2512 | if (bond->params.mode == BOND_MODE_8023AD) |
@@ -2422,9 +2559,11 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2422 | 2559 | ||
2423 | do_failover: | 2560 | do_failover: |
2424 | ASSERT_RTNL(); | 2561 | ASSERT_RTNL(); |
2562 | block_netpoll_tx(); | ||
2425 | write_lock_bh(&bond->curr_slave_lock); | 2563 | write_lock_bh(&bond->curr_slave_lock); |
2426 | bond_select_active_slave(bond); | 2564 | bond_select_active_slave(bond); |
2427 | write_unlock_bh(&bond->curr_slave_lock); | 2565 | write_unlock_bh(&bond->curr_slave_lock); |
2566 | unblock_netpoll_tx(); | ||
2428 | } | 2567 | } |
2429 | 2568 | ||
2430 | bond_set_carrier(bond); | 2569 | bond_set_carrier(bond); |
@@ -2442,6 +2581,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2442 | { | 2581 | { |
2443 | struct bonding *bond = container_of(work, struct bonding, | 2582 | struct bonding *bond = container_of(work, struct bonding, |
2444 | mii_work.work); | 2583 | mii_work.work); |
2584 | bool should_notify_peers = false; | ||
2445 | 2585 | ||
2446 | read_lock(&bond->lock); | 2586 | read_lock(&bond->lock); |
2447 | if (bond->kill_timers) | 2587 | if (bond->kill_timers) |
@@ -2450,17 +2590,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2450 | if (bond->slave_cnt == 0) | 2590 | if (bond->slave_cnt == 0) |
2451 | goto re_arm; | 2591 | goto re_arm; |
2452 | 2592 | ||
2453 | if (bond->send_grat_arp) { | 2593 | should_notify_peers = bond_should_notify_peers(bond); |
2454 | read_lock(&bond->curr_slave_lock); | ||
2455 | bond_send_gratuitous_arp(bond); | ||
2456 | read_unlock(&bond->curr_slave_lock); | ||
2457 | } | ||
2458 | |||
2459 | if (bond->send_unsol_na) { | ||
2460 | read_lock(&bond->curr_slave_lock); | ||
2461 | bond_send_unsolicited_na(bond); | ||
2462 | read_unlock(&bond->curr_slave_lock); | ||
2463 | } | ||
2464 | 2594 | ||
2465 | if (bond_miimon_inspect(bond)) { | 2595 | if (bond_miimon_inspect(bond)) { |
2466 | read_unlock(&bond->lock); | 2596 | read_unlock(&bond->lock); |
@@ -2480,6 +2610,12 @@ re_arm: | |||
2480 | msecs_to_jiffies(bond->params.miimon)); | 2610 | msecs_to_jiffies(bond->params.miimon)); |
2481 | out: | 2611 | out: |
2482 | read_unlock(&bond->lock); | 2612 | read_unlock(&bond->lock); |
2613 | |||
2614 | if (should_notify_peers) { | ||
2615 | rtnl_lock(); | ||
2616 | netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); | ||
2617 | rtnl_unlock(); | ||
2618 | } | ||
2483 | } | 2619 | } |
2484 | 2620 | ||
2485 | static __be32 bond_glean_dev_ip(struct net_device *dev) | 2621 | static __be32 bond_glean_dev_ip(struct net_device *dev) |
@@ -2553,11 +2689,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ | |||
2553 | 2689 | ||
2554 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | 2690 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) |
2555 | { | 2691 | { |
2556 | int i, vlan_id, rv; | 2692 | int i, vlan_id; |
2557 | __be32 *targets = bond->params.arp_targets; | 2693 | __be32 *targets = bond->params.arp_targets; |
2558 | struct vlan_entry *vlan; | 2694 | struct vlan_entry *vlan; |
2559 | struct net_device *vlan_dev; | 2695 | struct net_device *vlan_dev; |
2560 | struct flowi fl; | ||
2561 | struct rtable *rt; | 2696 | struct rtable *rt; |
2562 | 2697 | ||
2563 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | 2698 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { |
@@ -2576,15 +2711,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2576 | * determine which VLAN interface would be used, so we | 2711 | * determine which VLAN interface would be used, so we |
2577 | * can tag the ARP with the proper VLAN tag. | 2712 | * can tag the ARP with the proper VLAN tag. |
2578 | */ | 2713 | */ |
2579 | memset(&fl, 0, sizeof(fl)); | 2714 | rt = ip_route_output(dev_net(bond->dev), targets[i], 0, |
2580 | fl.fl4_dst = targets[i]; | 2715 | RTO_ONLINK, 0); |
2581 | fl.fl4_tos = RTO_ONLINK; | 2716 | if (IS_ERR(rt)) { |
2582 | |||
2583 | rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl); | ||
2584 | if (rv) { | ||
2585 | if (net_ratelimit()) { | 2717 | if (net_ratelimit()) { |
2586 | pr_warning("%s: no route to arp_ip_target %pI4\n", | 2718 | pr_warning("%s: no route to arp_ip_target %pI4\n", |
2587 | bond->dev->name, &fl.fl4_dst); | 2719 | bond->dev->name, &targets[i]); |
2588 | } | 2720 | } |
2589 | continue; | 2721 | continue; |
2590 | } | 2722 | } |
@@ -2620,51 +2752,13 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2620 | 2752 | ||
2621 | if (net_ratelimit()) { | 2753 | if (net_ratelimit()) { |
2622 | pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", | 2754 | pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", |
2623 | bond->dev->name, &fl.fl4_dst, | 2755 | bond->dev->name, &targets[i], |
2624 | rt->dst.dev ? rt->dst.dev->name : "NULL"); | 2756 | rt->dst.dev ? rt->dst.dev->name : "NULL"); |
2625 | } | 2757 | } |
2626 | ip_rt_put(rt); | 2758 | ip_rt_put(rt); |
2627 | } | 2759 | } |
2628 | } | 2760 | } |
2629 | 2761 | ||
2630 | /* | ||
2631 | * Kick out a gratuitous ARP for an IP on the bonding master plus one | ||
2632 | * for each VLAN above us. | ||
2633 | * | ||
2634 | * Caller must hold curr_slave_lock for read or better | ||
2635 | */ | ||
2636 | static void bond_send_gratuitous_arp(struct bonding *bond) | ||
2637 | { | ||
2638 | struct slave *slave = bond->curr_active_slave; | ||
2639 | struct vlan_entry *vlan; | ||
2640 | struct net_device *vlan_dev; | ||
2641 | |||
2642 | pr_debug("bond_send_grat_arp: bond %s slave %s\n", | ||
2643 | bond->dev->name, slave ? slave->dev->name : "NULL"); | ||
2644 | |||
2645 | if (!slave || !bond->send_grat_arp || | ||
2646 | test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) | ||
2647 | return; | ||
2648 | |||
2649 | bond->send_grat_arp--; | ||
2650 | |||
2651 | if (bond->master_ip) { | ||
2652 | bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip, | ||
2653 | bond->master_ip, 0); | ||
2654 | } | ||
2655 | |||
2656 | if (!bond->vlgrp) | ||
2657 | return; | ||
2658 | |||
2659 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | ||
2660 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); | ||
2661 | if (vlan->vlan_ip) { | ||
2662 | bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, | ||
2663 | vlan->vlan_ip, vlan->vlan_id); | ||
2664 | } | ||
2665 | } | ||
2666 | } | ||
2667 | |||
2668 | static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) | 2762 | static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) |
2669 | { | 2763 | { |
2670 | int i; | 2764 | int i; |
@@ -2682,44 +2776,26 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 | |||
2682 | } | 2776 | } |
2683 | } | 2777 | } |
2684 | 2778 | ||
2685 | static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 2779 | static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, |
2780 | struct slave *slave) | ||
2686 | { | 2781 | { |
2687 | struct arphdr *arp; | 2782 | struct arphdr *arp; |
2688 | struct slave *slave; | ||
2689 | struct bonding *bond; | ||
2690 | unsigned char *arp_ptr; | 2783 | unsigned char *arp_ptr; |
2691 | __be32 sip, tip; | 2784 | __be32 sip, tip; |
2692 | 2785 | ||
2693 | if (dev->priv_flags & IFF_802_1Q_VLAN) { | 2786 | if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) |
2694 | /* | 2787 | return; |
2695 | * When using VLANS and bonding, dev and oriv_dev may be | ||
2696 | * incorrect if the physical interface supports VLAN | ||
2697 | * acceleration. With this change ARP validation now | ||
2698 | * works for hosts only reachable on the VLAN interface. | ||
2699 | */ | ||
2700 | dev = vlan_dev_real_dev(dev); | ||
2701 | orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif); | ||
2702 | } | ||
2703 | |||
2704 | if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) | ||
2705 | goto out; | ||
2706 | 2788 | ||
2707 | bond = netdev_priv(dev); | ||
2708 | read_lock(&bond->lock); | 2789 | read_lock(&bond->lock); |
2709 | 2790 | ||
2710 | pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n", | 2791 | pr_debug("bond_arp_rcv: bond %s skb->dev %s\n", |
2711 | bond->dev->name, skb->dev ? skb->dev->name : "NULL", | 2792 | bond->dev->name, skb->dev->name); |
2712 | orig_dev ? orig_dev->name : "NULL"); | ||
2713 | 2793 | ||
2714 | slave = bond_get_slave_by_dev(bond, orig_dev); | 2794 | if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) |
2715 | if (!slave || !slave_do_arp_validate(bond, slave)) | ||
2716 | goto out_unlock; | ||
2717 | |||
2718 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) | ||
2719 | goto out_unlock; | 2795 | goto out_unlock; |
2720 | 2796 | ||
2721 | arp = arp_hdr(skb); | 2797 | arp = arp_hdr(skb); |
2722 | if (arp->ar_hln != dev->addr_len || | 2798 | if (arp->ar_hln != bond->dev->addr_len || |
2723 | skb->pkt_type == PACKET_OTHERHOST || | 2799 | skb->pkt_type == PACKET_OTHERHOST || |
2724 | skb->pkt_type == PACKET_LOOPBACK || | 2800 | skb->pkt_type == PACKET_LOOPBACK || |
2725 | arp->ar_hrd != htons(ARPHRD_ETHER) || | 2801 | arp->ar_hrd != htons(ARPHRD_ETHER) || |
@@ -2728,13 +2804,13 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2728 | goto out_unlock; | 2804 | goto out_unlock; |
2729 | 2805 | ||
2730 | arp_ptr = (unsigned char *)(arp + 1); | 2806 | arp_ptr = (unsigned char *)(arp + 1); |
2731 | arp_ptr += dev->addr_len; | 2807 | arp_ptr += bond->dev->addr_len; |
2732 | memcpy(&sip, arp_ptr, 4); | 2808 | memcpy(&sip, arp_ptr, 4); |
2733 | arp_ptr += 4 + dev->addr_len; | 2809 | arp_ptr += 4 + bond->dev->addr_len; |
2734 | memcpy(&tip, arp_ptr, 4); | 2810 | memcpy(&tip, arp_ptr, 4); |
2735 | 2811 | ||
2736 | pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", | 2812 | pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", |
2737 | bond->dev->name, slave->dev->name, slave->state, | 2813 | bond->dev->name, slave->dev->name, bond_slave_state(slave), |
2738 | bond->params.arp_validate, slave_do_arp_validate(bond, slave), | 2814 | bond->params.arp_validate, slave_do_arp_validate(bond, slave), |
2739 | &sip, &tip); | 2815 | &sip, &tip); |
2740 | 2816 | ||
@@ -2746,16 +2822,13 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2746 | * the active, through one switch, the router, then the other | 2822 | * the active, through one switch, the router, then the other |
2747 | * switch before reaching the backup. | 2823 | * switch before reaching the backup. |
2748 | */ | 2824 | */ |
2749 | if (slave->state == BOND_STATE_ACTIVE) | 2825 | if (bond_is_active_slave(slave)) |
2750 | bond_validate_arp(bond, slave, sip, tip); | 2826 | bond_validate_arp(bond, slave, sip, tip); |
2751 | else | 2827 | else |
2752 | bond_validate_arp(bond, slave, tip, sip); | 2828 | bond_validate_arp(bond, slave, tip, sip); |
2753 | 2829 | ||
2754 | out_unlock: | 2830 | out_unlock: |
2755 | read_unlock(&bond->lock); | 2831 | read_unlock(&bond->lock); |
2756 | out: | ||
2757 | dev_kfree_skb(skb); | ||
2758 | return NET_RX_SUCCESS; | ||
2759 | } | 2832 | } |
2760 | 2833 | ||
2761 | /* | 2834 | /* |
@@ -2808,7 +2881,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2808 | slave->dev->last_rx + delta_in_ticks)) { | 2881 | slave->dev->last_rx + delta_in_ticks)) { |
2809 | 2882 | ||
2810 | slave->link = BOND_LINK_UP; | 2883 | slave->link = BOND_LINK_UP; |
2811 | slave->state = BOND_STATE_ACTIVE; | 2884 | bond_set_active_slave(slave); |
2812 | 2885 | ||
2813 | /* primary_slave has no meaning in round-robin | 2886 | /* primary_slave has no meaning in round-robin |
2814 | * mode. the window of a slave being up and | 2887 | * mode. the window of a slave being up and |
@@ -2841,7 +2914,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2841 | slave->dev->last_rx + 2 * delta_in_ticks)) { | 2914 | slave->dev->last_rx + 2 * delta_in_ticks)) { |
2842 | 2915 | ||
2843 | slave->link = BOND_LINK_DOWN; | 2916 | slave->link = BOND_LINK_DOWN; |
2844 | slave->state = BOND_STATE_BACKUP; | 2917 | bond_set_backup_slave(slave); |
2845 | 2918 | ||
2846 | if (slave->link_failure_count < UINT_MAX) | 2919 | if (slave->link_failure_count < UINT_MAX) |
2847 | slave->link_failure_count++; | 2920 | slave->link_failure_count++; |
@@ -2867,11 +2940,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2867 | } | 2940 | } |
2868 | 2941 | ||
2869 | if (do_failover) { | 2942 | if (do_failover) { |
2943 | block_netpoll_tx(); | ||
2870 | write_lock_bh(&bond->curr_slave_lock); | 2944 | write_lock_bh(&bond->curr_slave_lock); |
2871 | 2945 | ||
2872 | bond_select_active_slave(bond); | 2946 | bond_select_active_slave(bond); |
2873 | 2947 | ||
2874 | write_unlock_bh(&bond->curr_slave_lock); | 2948 | write_unlock_bh(&bond->curr_slave_lock); |
2949 | unblock_netpoll_tx(); | ||
2875 | } | 2950 | } |
2876 | 2951 | ||
2877 | re_arm: | 2952 | re_arm: |
@@ -2933,7 +3008,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2933 | * gives each slave a chance to tx/rx traffic | 3008 | * gives each slave a chance to tx/rx traffic |
2934 | * before being taken out | 3009 | * before being taken out |
2935 | */ | 3010 | */ |
2936 | if (slave->state == BOND_STATE_BACKUP && | 3011 | if (!bond_is_active_slave(slave) && |
2937 | !bond->current_arp_slave && | 3012 | !bond->current_arp_slave && |
2938 | !time_in_range(jiffies, | 3013 | !time_in_range(jiffies, |
2939 | slave_last_rx(bond, slave) - delta_in_ticks, | 3014 | slave_last_rx(bond, slave) - delta_in_ticks, |
@@ -2950,7 +3025,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) | |||
2950 | * the bond has an IP address) | 3025 | * the bond has an IP address) |
2951 | */ | 3026 | */ |
2952 | trans_start = dev_trans_start(slave->dev); | 3027 | trans_start = dev_trans_start(slave->dev); |
2953 | if ((slave->state == BOND_STATE_ACTIVE) && | 3028 | if (bond_is_active_slave(slave) && |
2954 | (!time_in_range(jiffies, | 3029 | (!time_in_range(jiffies, |
2955 | trans_start - delta_in_ticks, | 3030 | trans_start - delta_in_ticks, |
2956 | trans_start + 2 * delta_in_ticks) || | 3031 | trans_start + 2 * delta_in_ticks) || |
@@ -3030,9 +3105,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) | |||
3030 | 3105 | ||
3031 | do_failover: | 3106 | do_failover: |
3032 | ASSERT_RTNL(); | 3107 | ASSERT_RTNL(); |
3108 | block_netpoll_tx(); | ||
3033 | write_lock_bh(&bond->curr_slave_lock); | 3109 | write_lock_bh(&bond->curr_slave_lock); |
3034 | bond_select_active_slave(bond); | 3110 | bond_select_active_slave(bond); |
3035 | write_unlock_bh(&bond->curr_slave_lock); | 3111 | write_unlock_bh(&bond->curr_slave_lock); |
3112 | unblock_netpoll_tx(); | ||
3036 | } | 3113 | } |
3037 | 3114 | ||
3038 | bond_set_carrier(bond); | 3115 | bond_set_carrier(bond); |
@@ -3111,6 +3188,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3111 | { | 3188 | { |
3112 | struct bonding *bond = container_of(work, struct bonding, | 3189 | struct bonding *bond = container_of(work, struct bonding, |
3113 | arp_work.work); | 3190 | arp_work.work); |
3191 | bool should_notify_peers = false; | ||
3114 | int delta_in_ticks; | 3192 | int delta_in_ticks; |
3115 | 3193 | ||
3116 | read_lock(&bond->lock); | 3194 | read_lock(&bond->lock); |
@@ -3123,17 +3201,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3123 | if (bond->slave_cnt == 0) | 3201 | if (bond->slave_cnt == 0) |
3124 | goto re_arm; | 3202 | goto re_arm; |
3125 | 3203 | ||
3126 | if (bond->send_grat_arp) { | 3204 | should_notify_peers = bond_should_notify_peers(bond); |
3127 | read_lock(&bond->curr_slave_lock); | ||
3128 | bond_send_gratuitous_arp(bond); | ||
3129 | read_unlock(&bond->curr_slave_lock); | ||
3130 | } | ||
3131 | |||
3132 | if (bond->send_unsol_na) { | ||
3133 | read_lock(&bond->curr_slave_lock); | ||
3134 | bond_send_unsolicited_na(bond); | ||
3135 | read_unlock(&bond->curr_slave_lock); | ||
3136 | } | ||
3137 | 3205 | ||
3138 | if (bond_ab_arp_inspect(bond, delta_in_ticks)) { | 3206 | if (bond_ab_arp_inspect(bond, delta_in_ticks)) { |
3139 | read_unlock(&bond->lock); | 3207 | read_unlock(&bond->lock); |
@@ -3154,299 +3222,14 @@ re_arm: | |||
3154 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3222 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3155 | out: | 3223 | out: |
3156 | read_unlock(&bond->lock); | 3224 | read_unlock(&bond->lock); |
3157 | } | ||
3158 | |||
3159 | /*------------------------------ proc/seq_file-------------------------------*/ | ||
3160 | |||
3161 | #ifdef CONFIG_PROC_FS | ||
3162 | |||
3163 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | ||
3164 | __acquires(&dev_base_lock) | ||
3165 | __acquires(&bond->lock) | ||
3166 | { | ||
3167 | struct bonding *bond = seq->private; | ||
3168 | loff_t off = 0; | ||
3169 | struct slave *slave; | ||
3170 | int i; | ||
3171 | |||
3172 | /* make sure the bond won't be taken away */ | ||
3173 | read_lock(&dev_base_lock); | ||
3174 | read_lock(&bond->lock); | ||
3175 | |||
3176 | if (*pos == 0) | ||
3177 | return SEQ_START_TOKEN; | ||
3178 | |||
3179 | bond_for_each_slave(bond, slave, i) { | ||
3180 | if (++off == *pos) | ||
3181 | return slave; | ||
3182 | } | ||
3183 | |||
3184 | return NULL; | ||
3185 | } | ||
3186 | |||
3187 | static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
3188 | { | ||
3189 | struct bonding *bond = seq->private; | ||
3190 | struct slave *slave = v; | ||
3191 | |||
3192 | ++*pos; | ||
3193 | if (v == SEQ_START_TOKEN) | ||
3194 | return bond->first_slave; | ||
3195 | |||
3196 | slave = slave->next; | ||
3197 | |||
3198 | return (slave == bond->first_slave) ? NULL : slave; | ||
3199 | } | ||
3200 | |||
3201 | static void bond_info_seq_stop(struct seq_file *seq, void *v) | ||
3202 | __releases(&bond->lock) | ||
3203 | __releases(&dev_base_lock) | ||
3204 | { | ||
3205 | struct bonding *bond = seq->private; | ||
3206 | |||
3207 | read_unlock(&bond->lock); | ||
3208 | read_unlock(&dev_base_lock); | ||
3209 | } | ||
3210 | |||
3211 | static void bond_info_show_master(struct seq_file *seq) | ||
3212 | { | ||
3213 | struct bonding *bond = seq->private; | ||
3214 | struct slave *curr; | ||
3215 | int i; | ||
3216 | |||
3217 | read_lock(&bond->curr_slave_lock); | ||
3218 | curr = bond->curr_active_slave; | ||
3219 | read_unlock(&bond->curr_slave_lock); | ||
3220 | |||
3221 | seq_printf(seq, "Bonding Mode: %s", | ||
3222 | bond_mode_name(bond->params.mode)); | ||
3223 | |||
3224 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && | ||
3225 | bond->params.fail_over_mac) | ||
3226 | seq_printf(seq, " (fail_over_mac %s)", | ||
3227 | fail_over_mac_tbl[bond->params.fail_over_mac].modename); | ||
3228 | |||
3229 | seq_printf(seq, "\n"); | ||
3230 | |||
3231 | if (bond->params.mode == BOND_MODE_XOR || | ||
3232 | bond->params.mode == BOND_MODE_8023AD) { | ||
3233 | seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", | ||
3234 | xmit_hashtype_tbl[bond->params.xmit_policy].modename, | ||
3235 | bond->params.xmit_policy); | ||
3236 | } | ||
3237 | |||
3238 | if (USES_PRIMARY(bond->params.mode)) { | ||
3239 | seq_printf(seq, "Primary Slave: %s", | ||
3240 | (bond->primary_slave) ? | ||
3241 | bond->primary_slave->dev->name : "None"); | ||
3242 | if (bond->primary_slave) | ||
3243 | seq_printf(seq, " (primary_reselect %s)", | ||
3244 | pri_reselect_tbl[bond->params.primary_reselect].modename); | ||
3245 | |||
3246 | seq_printf(seq, "\nCurrently Active Slave: %s\n", | ||
3247 | (curr) ? curr->dev->name : "None"); | ||
3248 | } | ||
3249 | |||
3250 | seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ? | ||
3251 | "up" : "down"); | ||
3252 | seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon); | ||
3253 | seq_printf(seq, "Up Delay (ms): %d\n", | ||
3254 | bond->params.updelay * bond->params.miimon); | ||
3255 | seq_printf(seq, "Down Delay (ms): %d\n", | ||
3256 | bond->params.downdelay * bond->params.miimon); | ||
3257 | |||
3258 | |||
3259 | /* ARP information */ | ||
3260 | if (bond->params.arp_interval > 0) { | ||
3261 | int printed = 0; | ||
3262 | seq_printf(seq, "ARP Polling Interval (ms): %d\n", | ||
3263 | bond->params.arp_interval); | ||
3264 | |||
3265 | seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); | ||
3266 | |||
3267 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | ||
3268 | if (!bond->params.arp_targets[i]) | ||
3269 | break; | ||
3270 | if (printed) | ||
3271 | seq_printf(seq, ","); | ||
3272 | seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); | ||
3273 | printed = 1; | ||
3274 | } | ||
3275 | seq_printf(seq, "\n"); | ||
3276 | } | ||
3277 | |||
3278 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
3279 | struct ad_info ad_info; | ||
3280 | |||
3281 | seq_puts(seq, "\n802.3ad info\n"); | ||
3282 | seq_printf(seq, "LACP rate: %s\n", | ||
3283 | (bond->params.lacp_fast) ? "fast" : "slow"); | ||
3284 | seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", | ||
3285 | ad_select_tbl[bond->params.ad_select].modename); | ||
3286 | |||
3287 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) { | ||
3288 | seq_printf(seq, "bond %s has no active aggregator\n", | ||
3289 | bond->dev->name); | ||
3290 | } else { | ||
3291 | seq_printf(seq, "Active Aggregator Info:\n"); | ||
3292 | |||
3293 | seq_printf(seq, "\tAggregator ID: %d\n", | ||
3294 | ad_info.aggregator_id); | ||
3295 | seq_printf(seq, "\tNumber of ports: %d\n", | ||
3296 | ad_info.ports); | ||
3297 | seq_printf(seq, "\tActor Key: %d\n", | ||
3298 | ad_info.actor_key); | ||
3299 | seq_printf(seq, "\tPartner Key: %d\n", | ||
3300 | ad_info.partner_key); | ||
3301 | seq_printf(seq, "\tPartner Mac Address: %pM\n", | ||
3302 | ad_info.partner_system); | ||
3303 | } | ||
3304 | } | ||
3305 | } | ||
3306 | |||
3307 | static void bond_info_show_slave(struct seq_file *seq, | ||
3308 | const struct slave *slave) | ||
3309 | { | ||
3310 | struct bonding *bond = seq->private; | ||
3311 | |||
3312 | seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); | ||
3313 | seq_printf(seq, "MII Status: %s\n", | ||
3314 | (slave->link == BOND_LINK_UP) ? "up" : "down"); | ||
3315 | seq_printf(seq, "Link Failure Count: %u\n", | ||
3316 | slave->link_failure_count); | ||
3317 | |||
3318 | seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); | ||
3319 | |||
3320 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
3321 | const struct aggregator *agg | ||
3322 | = SLAVE_AD_INFO(slave).port.aggregator; | ||
3323 | |||
3324 | if (agg) | ||
3325 | seq_printf(seq, "Aggregator ID: %d\n", | ||
3326 | agg->aggregator_identifier); | ||
3327 | else | ||
3328 | seq_puts(seq, "Aggregator ID: N/A\n"); | ||
3329 | } | ||
3330 | seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); | ||
3331 | } | ||
3332 | |||
3333 | static int bond_info_seq_show(struct seq_file *seq, void *v) | ||
3334 | { | ||
3335 | if (v == SEQ_START_TOKEN) { | ||
3336 | seq_printf(seq, "%s\n", version); | ||
3337 | bond_info_show_master(seq); | ||
3338 | } else | ||
3339 | bond_info_show_slave(seq, v); | ||
3340 | |||
3341 | return 0; | ||
3342 | } | ||
3343 | |||
3344 | static const struct seq_operations bond_info_seq_ops = { | ||
3345 | .start = bond_info_seq_start, | ||
3346 | .next = bond_info_seq_next, | ||
3347 | .stop = bond_info_seq_stop, | ||
3348 | .show = bond_info_seq_show, | ||
3349 | }; | ||
3350 | |||
3351 | static int bond_info_open(struct inode *inode, struct file *file) | ||
3352 | { | ||
3353 | struct seq_file *seq; | ||
3354 | struct proc_dir_entry *proc; | ||
3355 | int res; | ||
3356 | |||
3357 | res = seq_open(file, &bond_info_seq_ops); | ||
3358 | if (!res) { | ||
3359 | /* recover the pointer buried in proc_dir_entry data */ | ||
3360 | seq = file->private_data; | ||
3361 | proc = PDE(inode); | ||
3362 | seq->private = proc->data; | ||
3363 | } | ||
3364 | |||
3365 | return res; | ||
3366 | } | ||
3367 | |||
3368 | static const struct file_operations bond_info_fops = { | ||
3369 | .owner = THIS_MODULE, | ||
3370 | .open = bond_info_open, | ||
3371 | .read = seq_read, | ||
3372 | .llseek = seq_lseek, | ||
3373 | .release = seq_release, | ||
3374 | }; | ||
3375 | |||
3376 | static void bond_create_proc_entry(struct bonding *bond) | ||
3377 | { | ||
3378 | struct net_device *bond_dev = bond->dev; | ||
3379 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | ||
3380 | |||
3381 | if (bn->proc_dir) { | ||
3382 | bond->proc_entry = proc_create_data(bond_dev->name, | ||
3383 | S_IRUGO, bn->proc_dir, | ||
3384 | &bond_info_fops, bond); | ||
3385 | if (bond->proc_entry == NULL) | ||
3386 | pr_warning("Warning: Cannot create /proc/net/%s/%s\n", | ||
3387 | DRV_NAME, bond_dev->name); | ||
3388 | else | ||
3389 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); | ||
3390 | } | ||
3391 | } | ||
3392 | |||
3393 | static void bond_remove_proc_entry(struct bonding *bond) | ||
3394 | { | ||
3395 | struct net_device *bond_dev = bond->dev; | ||
3396 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | ||
3397 | |||
3398 | if (bn->proc_dir && bond->proc_entry) { | ||
3399 | remove_proc_entry(bond->proc_file_name, bn->proc_dir); | ||
3400 | memset(bond->proc_file_name, 0, IFNAMSIZ); | ||
3401 | bond->proc_entry = NULL; | ||
3402 | } | ||
3403 | } | ||
3404 | 3225 | ||
3405 | /* Create the bonding directory under /proc/net, if doesn't exist yet. | 3226 | if (should_notify_peers) { |
3406 | * Caller must hold rtnl_lock. | 3227 | rtnl_lock(); |
3407 | */ | 3228 | netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); |
3408 | static void __net_init bond_create_proc_dir(struct bond_net *bn) | 3229 | rtnl_unlock(); |
3409 | { | ||
3410 | if (!bn->proc_dir) { | ||
3411 | bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); | ||
3412 | if (!bn->proc_dir) | ||
3413 | pr_warning("Warning: cannot create /proc/net/%s\n", | ||
3414 | DRV_NAME); | ||
3415 | } | ||
3416 | } | ||
3417 | |||
3418 | /* Destroy the bonding directory under /proc/net, if empty. | ||
3419 | * Caller must hold rtnl_lock. | ||
3420 | */ | ||
3421 | static void __net_exit bond_destroy_proc_dir(struct bond_net *bn) | ||
3422 | { | ||
3423 | if (bn->proc_dir) { | ||
3424 | remove_proc_entry(DRV_NAME, bn->net->proc_net); | ||
3425 | bn->proc_dir = NULL; | ||
3426 | } | 3230 | } |
3427 | } | 3231 | } |
3428 | 3232 | ||
3429 | #else /* !CONFIG_PROC_FS */ | ||
3430 | |||
3431 | static void bond_create_proc_entry(struct bonding *bond) | ||
3432 | { | ||
3433 | } | ||
3434 | |||
3435 | static void bond_remove_proc_entry(struct bonding *bond) | ||
3436 | { | ||
3437 | } | ||
3438 | |||
3439 | static inline void bond_create_proc_dir(struct bond_net *bn) | ||
3440 | { | ||
3441 | } | ||
3442 | |||
3443 | static inline void bond_destroy_proc_dir(struct bond_net *bn) | ||
3444 | { | ||
3445 | } | ||
3446 | |||
3447 | #endif /* CONFIG_PROC_FS */ | ||
3448 | |||
3449 | |||
3450 | /*-------------------------- netdev event handling --------------------------*/ | 3233 | /*-------------------------- netdev event handling --------------------------*/ |
3451 | 3234 | ||
3452 | /* | 3235 | /* |
@@ -3457,6 +3240,8 @@ static int bond_event_changename(struct bonding *bond) | |||
3457 | bond_remove_proc_entry(bond); | 3240 | bond_remove_proc_entry(bond); |
3458 | bond_create_proc_entry(bond); | 3241 | bond_create_proc_entry(bond); |
3459 | 3242 | ||
3243 | bond_debug_reregister(bond); | ||
3244 | |||
3460 | return NOTIFY_DONE; | 3245 | return NOTIFY_DONE; |
3461 | } | 3246 | } |
3462 | 3247 | ||
@@ -3496,8 +3281,8 @@ static int bond_slave_netdev_event(unsigned long event, | |||
3496 | 3281 | ||
3497 | slave = bond_get_slave_by_dev(bond, slave_dev); | 3282 | slave = bond_get_slave_by_dev(bond, slave_dev); |
3498 | if (slave) { | 3283 | if (slave) { |
3499 | u16 old_speed = slave->speed; | 3284 | u32 old_speed = slave->speed; |
3500 | u16 old_duplex = slave->duplex; | 3285 | u8 old_duplex = slave->duplex; |
3501 | 3286 | ||
3502 | bond_update_speed_duplex(slave); | 3287 | bond_update_speed_duplex(slave); |
3503 | 3288 | ||
@@ -3639,48 +3424,6 @@ static struct notifier_block bond_inetaddr_notifier = { | |||
3639 | .notifier_call = bond_inetaddr_event, | 3424 | .notifier_call = bond_inetaddr_event, |
3640 | }; | 3425 | }; |
3641 | 3426 | ||
3642 | /*-------------------------- Packet type handling ---------------------------*/ | ||
3643 | |||
3644 | /* register to receive lacpdus on a bond */ | ||
3645 | static void bond_register_lacpdu(struct bonding *bond) | ||
3646 | { | ||
3647 | struct packet_type *pk_type = &(BOND_AD_INFO(bond).ad_pkt_type); | ||
3648 | |||
3649 | /* initialize packet type */ | ||
3650 | pk_type->type = PKT_TYPE_LACPDU; | ||
3651 | pk_type->dev = bond->dev; | ||
3652 | pk_type->func = bond_3ad_lacpdu_recv; | ||
3653 | |||
3654 | dev_add_pack(pk_type); | ||
3655 | } | ||
3656 | |||
3657 | /* unregister to receive lacpdus on a bond */ | ||
3658 | static void bond_unregister_lacpdu(struct bonding *bond) | ||
3659 | { | ||
3660 | dev_remove_pack(&(BOND_AD_INFO(bond).ad_pkt_type)); | ||
3661 | } | ||
3662 | |||
3663 | void bond_register_arp(struct bonding *bond) | ||
3664 | { | ||
3665 | struct packet_type *pt = &bond->arp_mon_pt; | ||
3666 | |||
3667 | if (pt->type) | ||
3668 | return; | ||
3669 | |||
3670 | pt->type = htons(ETH_P_ARP); | ||
3671 | pt->dev = bond->dev; | ||
3672 | pt->func = bond_arp_rcv; | ||
3673 | dev_add_pack(pt); | ||
3674 | } | ||
3675 | |||
3676 | void bond_unregister_arp(struct bonding *bond) | ||
3677 | { | ||
3678 | struct packet_type *pt = &bond->arp_mon_pt; | ||
3679 | |||
3680 | dev_remove_pack(pt); | ||
3681 | pt->type = 0; | ||
3682 | } | ||
3683 | |||
3684 | /*---------------------------- Hashing Policies -----------------------------*/ | 3427 | /*---------------------------- Hashing Policies -----------------------------*/ |
3685 | 3428 | ||
3686 | /* | 3429 | /* |
@@ -3744,6 +3487,8 @@ static int bond_open(struct net_device *bond_dev) | |||
3744 | 3487 | ||
3745 | bond->kill_timers = 0; | 3488 | bond->kill_timers = 0; |
3746 | 3489 | ||
3490 | INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); | ||
3491 | |||
3747 | if (bond_is_lb(bond)) { | 3492 | if (bond_is_lb(bond)) { |
3748 | /* bond_alb_initialize must be called before the timer | 3493 | /* bond_alb_initialize must be called before the timer |
3749 | * is started. | 3494 | * is started. |
@@ -3772,14 +3517,14 @@ static int bond_open(struct net_device *bond_dev) | |||
3772 | 3517 | ||
3773 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | 3518 | queue_delayed_work(bond->wq, &bond->arp_work, 0); |
3774 | if (bond->params.arp_validate) | 3519 | if (bond->params.arp_validate) |
3775 | bond_register_arp(bond); | 3520 | bond->recv_probe = bond_arp_rcv; |
3776 | } | 3521 | } |
3777 | 3522 | ||
3778 | if (bond->params.mode == BOND_MODE_8023AD) { | 3523 | if (bond->params.mode == BOND_MODE_8023AD) { |
3779 | INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); | 3524 | INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); |
3780 | queue_delayed_work(bond->wq, &bond->ad_work, 0); | 3525 | queue_delayed_work(bond->wq, &bond->ad_work, 0); |
3781 | /* register to receive LACPDUs */ | 3526 | /* register to receive LACPDUs */ |
3782 | bond_register_lacpdu(bond); | 3527 | bond->recv_probe = bond_3ad_lacpdu_recv; |
3783 | bond_3ad_initiate_agg_selection(bond, 1); | 3528 | bond_3ad_initiate_agg_selection(bond, 1); |
3784 | } | 3529 | } |
3785 | 3530 | ||
@@ -3790,18 +3535,9 @@ static int bond_close(struct net_device *bond_dev) | |||
3790 | { | 3535 | { |
3791 | struct bonding *bond = netdev_priv(bond_dev); | 3536 | struct bonding *bond = netdev_priv(bond_dev); |
3792 | 3537 | ||
3793 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
3794 | /* Unregister the receive of LACPDUs */ | ||
3795 | bond_unregister_lacpdu(bond); | ||
3796 | } | ||
3797 | |||
3798 | if (bond->params.arp_validate) | ||
3799 | bond_unregister_arp(bond); | ||
3800 | |||
3801 | write_lock_bh(&bond->lock); | 3538 | write_lock_bh(&bond->lock); |
3802 | 3539 | ||
3803 | bond->send_grat_arp = 0; | 3540 | bond->send_peer_notif = 0; |
3804 | bond->send_unsol_na = 0; | ||
3805 | 3541 | ||
3806 | /* signal timers not to re-arm */ | 3542 | /* signal timers not to re-arm */ |
3807 | bond->kill_timers = 1; | 3543 | bond->kill_timers = 1; |
@@ -3828,6 +3564,8 @@ static int bond_close(struct net_device *bond_dev) | |||
3828 | break; | 3564 | break; |
3829 | } | 3565 | } |
3830 | 3566 | ||
3567 | if (delayed_work_pending(&bond->mcast_work)) | ||
3568 | cancel_delayed_work(&bond->mcast_work); | ||
3831 | 3569 | ||
3832 | if (bond_is_lb(bond)) { | 3570 | if (bond_is_lb(bond)) { |
3833 | /* Must be called only after all | 3571 | /* Must be called only after all |
@@ -3835,6 +3573,7 @@ static int bond_close(struct net_device *bond_dev) | |||
3835 | */ | 3573 | */ |
3836 | bond_alb_deinitialize(bond); | 3574 | bond_alb_deinitialize(bond); |
3837 | } | 3575 | } |
3576 | bond->recv_probe = NULL; | ||
3838 | 3577 | ||
3839 | return 0; | 3578 | return 0; |
3840 | } | 3579 | } |
@@ -4258,10 +3997,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4258 | int i, slave_no, res = 1; | 3997 | int i, slave_no, res = 1; |
4259 | struct iphdr *iph = ip_hdr(skb); | 3998 | struct iphdr *iph = ip_hdr(skb); |
4260 | 3999 | ||
4261 | read_lock(&bond->lock); | ||
4262 | |||
4263 | if (!BOND_IS_OK(bond)) | ||
4264 | goto out; | ||
4265 | /* | 4000 | /* |
4266 | * Start with the curr_active_slave that joined the bond as the | 4001 | * Start with the curr_active_slave that joined the bond as the |
4267 | * default for sending IGMP traffic. For failover purposes one | 4002 | * default for sending IGMP traffic. For failover purposes one |
@@ -4297,7 +4032,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
4297 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4032 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4298 | if (IS_UP(slave->dev) && | 4033 | if (IS_UP(slave->dev) && |
4299 | (slave->link == BOND_LINK_UP) && | 4034 | (slave->link == BOND_LINK_UP) && |
4300 | (slave->state == BOND_STATE_ACTIVE)) { | 4035 | bond_is_active_slave(slave)) { |
4301 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4036 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4302 | break; | 4037 | break; |
4303 | } | 4038 | } |
@@ -4308,7 +4043,7 @@ out: | |||
4308 | /* no suitable interface, frame not sent */ | 4043 | /* no suitable interface, frame not sent */ |
4309 | dev_kfree_skb(skb); | 4044 | dev_kfree_skb(skb); |
4310 | } | 4045 | } |
4311 | read_unlock(&bond->lock); | 4046 | |
4312 | return NETDEV_TX_OK; | 4047 | return NETDEV_TX_OK; |
4313 | } | 4048 | } |
4314 | 4049 | ||
@@ -4322,24 +4057,18 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
4322 | struct bonding *bond = netdev_priv(bond_dev); | 4057 | struct bonding *bond = netdev_priv(bond_dev); |
4323 | int res = 1; | 4058 | int res = 1; |
4324 | 4059 | ||
4325 | read_lock(&bond->lock); | ||
4326 | read_lock(&bond->curr_slave_lock); | 4060 | read_lock(&bond->curr_slave_lock); |
4327 | 4061 | ||
4328 | if (!BOND_IS_OK(bond)) | 4062 | if (bond->curr_active_slave) |
4329 | goto out; | 4063 | res = bond_dev_queue_xmit(bond, skb, |
4330 | 4064 | bond->curr_active_slave->dev); | |
4331 | if (!bond->curr_active_slave) | ||
4332 | goto out; | ||
4333 | |||
4334 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); | ||
4335 | 4065 | ||
4336 | out: | ||
4337 | if (res) | 4066 | if (res) |
4338 | /* no suitable interface, frame not sent */ | 4067 | /* no suitable interface, frame not sent */ |
4339 | dev_kfree_skb(skb); | 4068 | dev_kfree_skb(skb); |
4340 | 4069 | ||
4341 | read_unlock(&bond->curr_slave_lock); | 4070 | read_unlock(&bond->curr_slave_lock); |
4342 | read_unlock(&bond->lock); | 4071 | |
4343 | return NETDEV_TX_OK; | 4072 | return NETDEV_TX_OK; |
4344 | } | 4073 | } |
4345 | 4074 | ||
@@ -4356,11 +4085,6 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | |||
4356 | int i; | 4085 | int i; |
4357 | int res = 1; | 4086 | int res = 1; |
4358 | 4087 | ||
4359 | read_lock(&bond->lock); | ||
4360 | |||
4361 | if (!BOND_IS_OK(bond)) | ||
4362 | goto out; | ||
4363 | |||
4364 | slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); | 4088 | slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); |
4365 | 4089 | ||
4366 | bond_for_each_slave(bond, slave, i) { | 4090 | bond_for_each_slave(bond, slave, i) { |
@@ -4374,18 +4098,17 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | |||
4374 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4098 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4375 | if (IS_UP(slave->dev) && | 4099 | if (IS_UP(slave->dev) && |
4376 | (slave->link == BOND_LINK_UP) && | 4100 | (slave->link == BOND_LINK_UP) && |
4377 | (slave->state == BOND_STATE_ACTIVE)) { | 4101 | bond_is_active_slave(slave)) { |
4378 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4102 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4379 | break; | 4103 | break; |
4380 | } | 4104 | } |
4381 | } | 4105 | } |
4382 | 4106 | ||
4383 | out: | ||
4384 | if (res) { | 4107 | if (res) { |
4385 | /* no suitable interface, frame not sent */ | 4108 | /* no suitable interface, frame not sent */ |
4386 | dev_kfree_skb(skb); | 4109 | dev_kfree_skb(skb); |
4387 | } | 4110 | } |
4388 | read_unlock(&bond->lock); | 4111 | |
4389 | return NETDEV_TX_OK; | 4112 | return NETDEV_TX_OK; |
4390 | } | 4113 | } |
4391 | 4114 | ||
@@ -4400,11 +4123,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) | |||
4400 | int i; | 4123 | int i; |
4401 | int res = 1; | 4124 | int res = 1; |
4402 | 4125 | ||
4403 | read_lock(&bond->lock); | ||
4404 | |||
4405 | if (!BOND_IS_OK(bond)) | ||
4406 | goto out; | ||
4407 | |||
4408 | read_lock(&bond->curr_slave_lock); | 4126 | read_lock(&bond->curr_slave_lock); |
4409 | start_at = bond->curr_active_slave; | 4127 | start_at = bond->curr_active_slave; |
4410 | read_unlock(&bond->curr_slave_lock); | 4128 | read_unlock(&bond->curr_slave_lock); |
@@ -4415,7 +4133,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) | |||
4415 | bond_for_each_slave_from(bond, slave, i, start_at) { | 4133 | bond_for_each_slave_from(bond, slave, i, start_at) { |
4416 | if (IS_UP(slave->dev) && | 4134 | if (IS_UP(slave->dev) && |
4417 | (slave->link == BOND_LINK_UP) && | 4135 | (slave->link == BOND_LINK_UP) && |
4418 | (slave->state == BOND_STATE_ACTIVE)) { | 4136 | bond_is_active_slave(slave)) { |
4419 | if (tx_dev) { | 4137 | if (tx_dev) { |
4420 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 4138 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
4421 | if (!skb2) { | 4139 | if (!skb2) { |
@@ -4443,7 +4161,6 @@ out: | |||
4443 | dev_kfree_skb(skb); | 4161 | dev_kfree_skb(skb); |
4444 | 4162 | ||
4445 | /* frame sent to all suitable interfaces */ | 4163 | /* frame sent to all suitable interfaces */ |
4446 | read_unlock(&bond->lock); | ||
4447 | return NETDEV_TX_OK; | 4164 | return NETDEV_TX_OK; |
4448 | } | 4165 | } |
4449 | 4166 | ||
@@ -4475,10 +4192,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
4475 | struct slave *slave = NULL; | 4192 | struct slave *slave = NULL; |
4476 | struct slave *check_slave; | 4193 | struct slave *check_slave; |
4477 | 4194 | ||
4478 | read_lock(&bond->lock); | 4195 | if (!skb->queue_mapping) |
4479 | 4196 | return 1; | |
4480 | if (!BOND_IS_OK(bond) || !skb->queue_mapping) | ||
4481 | goto out; | ||
4482 | 4197 | ||
4483 | /* Find out if any slaves have the same mapping as this skb. */ | 4198 | /* Find out if any slaves have the same mapping as this skb. */ |
4484 | bond_for_each_slave(bond, check_slave, i) { | 4199 | bond_for_each_slave(bond, check_slave, i) { |
@@ -4494,23 +4209,34 @@ static inline int bond_slave_override(struct bonding *bond, | |||
4494 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | 4209 | res = bond_dev_queue_xmit(bond, skb, slave->dev); |
4495 | } | 4210 | } |
4496 | 4211 | ||
4497 | out: | ||
4498 | read_unlock(&bond->lock); | ||
4499 | return res; | 4212 | return res; |
4500 | } | 4213 | } |
4501 | 4214 | ||
4215 | |||
4502 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | 4216 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) |
4503 | { | 4217 | { |
4504 | /* | 4218 | /* |
4505 | * This helper function exists to help dev_pick_tx get the correct | 4219 | * This helper function exists to help dev_pick_tx get the correct |
4506 | * destination queue. Using a helper function skips the a call to | 4220 | * destination queue. Using a helper function skips a call to |
4507 | * skb_tx_hash and will put the skbs in the queue we expect on their | 4221 | * skb_tx_hash and will put the skbs in the queue we expect on their |
4508 | * way down to the bonding driver. | 4222 | * way down to the bonding driver. |
4509 | */ | 4223 | */ |
4510 | return skb->queue_mapping; | 4224 | u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; |
4225 | |||
4226 | /* | ||
4227 | * Save the original txq to restore before passing to the driver | ||
4228 | */ | ||
4229 | bond_queue_mapping(skb) = skb->queue_mapping; | ||
4230 | |||
4231 | if (unlikely(txq >= dev->real_num_tx_queues)) { | ||
4232 | do { | ||
4233 | txq -= dev->real_num_tx_queues; | ||
4234 | } while (txq >= dev->real_num_tx_queues); | ||
4235 | } | ||
4236 | return txq; | ||
4511 | } | 4237 | } |
4512 | 4238 | ||
4513 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | 4239 | static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) |
4514 | { | 4240 | { |
4515 | struct bonding *bond = netdev_priv(dev); | 4241 | struct bonding *bond = netdev_priv(dev); |
4516 | 4242 | ||
@@ -4543,6 +4269,29 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4543 | } | 4269 | } |
4544 | } | 4270 | } |
4545 | 4271 | ||
4272 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
4273 | { | ||
4274 | struct bonding *bond = netdev_priv(dev); | ||
4275 | netdev_tx_t ret = NETDEV_TX_OK; | ||
4276 | |||
4277 | /* | ||
4278 | * If we risk deadlock from transmitting this in the | ||
4279 | * netpoll path, tell netpoll to queue the frame for later tx | ||
4280 | */ | ||
4281 | if (is_netpoll_tx_blocked(dev)) | ||
4282 | return NETDEV_TX_BUSY; | ||
4283 | |||
4284 | read_lock(&bond->lock); | ||
4285 | |||
4286 | if (bond->slave_cnt) | ||
4287 | ret = __bond_start_xmit(skb, dev); | ||
4288 | else | ||
4289 | dev_kfree_skb(skb); | ||
4290 | |||
4291 | read_unlock(&bond->lock); | ||
4292 | |||
4293 | return ret; | ||
4294 | } | ||
4546 | 4295 | ||
4547 | /* | 4296 | /* |
4548 | * set bond mode specific net device operations | 4297 | * set bond mode specific net device operations |
@@ -4562,11 +4311,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode) | |||
4562 | case BOND_MODE_BROADCAST: | 4311 | case BOND_MODE_BROADCAST: |
4563 | break; | 4312 | break; |
4564 | case BOND_MODE_8023AD: | 4313 | case BOND_MODE_8023AD: |
4565 | bond_set_master_3ad_flags(bond); | ||
4566 | bond_set_xmit_hash_policy(bond); | 4314 | bond_set_xmit_hash_policy(bond); |
4567 | break; | 4315 | break; |
4568 | case BOND_MODE_ALB: | 4316 | case BOND_MODE_ALB: |
4569 | bond_set_master_alb_flags(bond); | ||
4570 | /* FALLTHRU */ | 4317 | /* FALLTHRU */ |
4571 | case BOND_MODE_TLB: | 4318 | case BOND_MODE_TLB: |
4572 | break; | 4319 | break; |
@@ -4589,11 +4336,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, | |||
4589 | static const struct ethtool_ops bond_ethtool_ops = { | 4336 | static const struct ethtool_ops bond_ethtool_ops = { |
4590 | .get_drvinfo = bond_ethtool_get_drvinfo, | 4337 | .get_drvinfo = bond_ethtool_get_drvinfo, |
4591 | .get_link = ethtool_op_get_link, | 4338 | .get_link = ethtool_op_get_link, |
4592 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
4593 | .get_sg = ethtool_op_get_sg, | ||
4594 | .get_tso = ethtool_op_get_tso, | ||
4595 | .get_ufo = ethtool_op_get_ufo, | ||
4596 | .get_flags = ethtool_op_get_flags, | ||
4597 | }; | 4339 | }; |
4598 | 4340 | ||
4599 | static const struct net_device_ops bond_netdev_ops = { | 4341 | static const struct net_device_ops bond_netdev_ops = { |
@@ -4613,9 +4355,13 @@ static const struct net_device_ops bond_netdev_ops = { | |||
4613 | .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, | 4355 | .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, |
4614 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, | 4356 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, |
4615 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4357 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4358 | .ndo_netpoll_setup = bond_netpoll_setup, | ||
4616 | .ndo_netpoll_cleanup = bond_netpoll_cleanup, | 4359 | .ndo_netpoll_cleanup = bond_netpoll_cleanup, |
4617 | .ndo_poll_controller = bond_poll_controller, | 4360 | .ndo_poll_controller = bond_poll_controller, |
4618 | #endif | 4361 | #endif |
4362 | .ndo_add_slave = bond_enslave, | ||
4363 | .ndo_del_slave = bond_release, | ||
4364 | .ndo_fix_features = bond_fix_features, | ||
4619 | }; | 4365 | }; |
4620 | 4366 | ||
4621 | static void bond_destructor(struct net_device *bond_dev) | 4367 | static void bond_destructor(struct net_device *bond_dev) |
@@ -4654,9 +4400,6 @@ static void bond_setup(struct net_device *bond_dev) | |||
4654 | bond_dev->priv_flags |= IFF_BONDING; | 4400 | bond_dev->priv_flags |= IFF_BONDING; |
4655 | bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 4401 | bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
4656 | 4402 | ||
4657 | if (bond->params.arp_interval) | ||
4658 | bond_dev->priv_flags |= IFF_MASTER_ARPMON; | ||
4659 | |||
4660 | /* At first, we block adding VLANs. That's the only way to | 4403 | /* At first, we block adding VLANs. That's the only way to |
4661 | * prevent problems that occur when adding VLANs over an | 4404 | * prevent problems that occur when adding VLANs over an |
4662 | * empty bond. The block will be removed once non-challenged | 4405 | * empty bond. The block will be removed once non-challenged |
@@ -4674,10 +4417,14 @@ static void bond_setup(struct net_device *bond_dev) | |||
4674 | * when there are slaves that are not hw accel | 4417 | * when there are slaves that are not hw accel |
4675 | * capable | 4418 | * capable |
4676 | */ | 4419 | */ |
4677 | bond_dev->features |= (NETIF_F_HW_VLAN_TX | | ||
4678 | NETIF_F_HW_VLAN_RX | | ||
4679 | NETIF_F_HW_VLAN_FILTER); | ||
4680 | 4420 | ||
4421 | bond_dev->hw_features = BOND_VLAN_FEATURES | | ||
4422 | NETIF_F_HW_VLAN_TX | | ||
4423 | NETIF_F_HW_VLAN_RX | | ||
4424 | NETIF_F_HW_VLAN_FILTER; | ||
4425 | |||
4426 | bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM); | ||
4427 | bond_dev->features |= bond_dev->hw_features; | ||
4681 | } | 4428 | } |
4682 | 4429 | ||
4683 | static void bond_work_cancel_all(struct bonding *bond) | 4430 | static void bond_work_cancel_all(struct bonding *bond) |
@@ -4699,6 +4446,9 @@ static void bond_work_cancel_all(struct bonding *bond) | |||
4699 | if (bond->params.mode == BOND_MODE_8023AD && | 4446 | if (bond->params.mode == BOND_MODE_8023AD && |
4700 | delayed_work_pending(&bond->ad_work)) | 4447 | delayed_work_pending(&bond->ad_work)) |
4701 | cancel_delayed_work(&bond->ad_work); | 4448 | cancel_delayed_work(&bond->ad_work); |
4449 | |||
4450 | if (delayed_work_pending(&bond->mcast_work)) | ||
4451 | cancel_delayed_work(&bond->mcast_work); | ||
4702 | } | 4452 | } |
4703 | 4453 | ||
4704 | /* | 4454 | /* |
@@ -4721,6 +4471,8 @@ static void bond_uninit(struct net_device *bond_dev) | |||
4721 | 4471 | ||
4722 | bond_remove_proc_entry(bond); | 4472 | bond_remove_proc_entry(bond); |
4723 | 4473 | ||
4474 | bond_debug_unregister(bond); | ||
4475 | |||
4724 | __hw_addr_flush(&bond->mc_list); | 4476 | __hw_addr_flush(&bond->mc_list); |
4725 | 4477 | ||
4726 | list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { | 4478 | list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { |
@@ -4856,16 +4608,10 @@ static int bond_check_params(struct bond_params *params) | |||
4856 | use_carrier = 1; | 4608 | use_carrier = 1; |
4857 | } | 4609 | } |
4858 | 4610 | ||
4859 | if (num_grat_arp < 0 || num_grat_arp > 255) { | 4611 | if (num_peer_notif < 0 || num_peer_notif > 255) { |
4860 | pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n", | 4612 | pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", |
4861 | num_grat_arp); | 4613 | num_peer_notif); |
4862 | num_grat_arp = 1; | 4614 | num_peer_notif = 1; |
4863 | } | ||
4864 | |||
4865 | if (num_unsol_na < 0 || num_unsol_na > 255) { | ||
4866 | pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", | ||
4867 | num_unsol_na); | ||
4868 | num_unsol_na = 1; | ||
4869 | } | 4615 | } |
4870 | 4616 | ||
4871 | /* reset values for 802.3ad */ | 4617 | /* reset values for 802.3ad */ |
@@ -4891,6 +4637,13 @@ static int bond_check_params(struct bond_params *params) | |||
4891 | all_slaves_active = 0; | 4637 | all_slaves_active = 0; |
4892 | } | 4638 | } |
4893 | 4639 | ||
4640 | if (resend_igmp < 0 || resend_igmp > 255) { | ||
4641 | pr_warning("Warning: resend_igmp (%d) should be between " | ||
4642 | "0 and 255, resetting to %d\n", | ||
4643 | resend_igmp, BOND_DEFAULT_RESEND_IGMP); | ||
4644 | resend_igmp = BOND_DEFAULT_RESEND_IGMP; | ||
4645 | } | ||
4646 | |||
4894 | /* reset values for TLB/ALB */ | 4647 | /* reset values for TLB/ALB */ |
4895 | if ((bond_mode == BOND_MODE_TLB) || | 4648 | if ((bond_mode == BOND_MODE_TLB) || |
4896 | (bond_mode == BOND_MODE_ALB)) { | 4649 | (bond_mode == BOND_MODE_ALB)) { |
@@ -5050,8 +4803,7 @@ static int bond_check_params(struct bond_params *params) | |||
5050 | params->mode = bond_mode; | 4803 | params->mode = bond_mode; |
5051 | params->xmit_policy = xmit_hashtype; | 4804 | params->xmit_policy = xmit_hashtype; |
5052 | params->miimon = miimon; | 4805 | params->miimon = miimon; |
5053 | params->num_grat_arp = num_grat_arp; | 4806 | params->num_peer_notif = num_peer_notif; |
5054 | params->num_unsol_na = num_unsol_na; | ||
5055 | params->arp_interval = arp_interval; | 4807 | params->arp_interval = arp_interval; |
5056 | params->arp_validate = arp_validate_value; | 4808 | params->arp_validate = arp_validate_value; |
5057 | params->updelay = updelay; | 4809 | params->updelay = updelay; |
@@ -5063,6 +4815,7 @@ static int bond_check_params(struct bond_params *params) | |||
5063 | params->fail_over_mac = fail_over_mac_value; | 4815 | params->fail_over_mac = fail_over_mac_value; |
5064 | params->tx_queues = tx_queues; | 4816 | params->tx_queues = tx_queues; |
5065 | params->all_slaves_active = all_slaves_active; | 4817 | params->all_slaves_active = all_slaves_active; |
4818 | params->resend_igmp = resend_igmp; | ||
5066 | 4819 | ||
5067 | if (primary) { | 4820 | if (primary) { |
5068 | strncpy(params->primary, primary, IFNAMSIZ); | 4821 | strncpy(params->primary, primary, IFNAMSIZ); |
@@ -5099,22 +4852,32 @@ static int bond_init(struct net_device *bond_dev) | |||
5099 | { | 4852 | { |
5100 | struct bonding *bond = netdev_priv(bond_dev); | 4853 | struct bonding *bond = netdev_priv(bond_dev); |
5101 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | 4854 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); |
4855 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | ||
5102 | 4856 | ||
5103 | pr_debug("Begin bond_init for %s\n", bond_dev->name); | 4857 | pr_debug("Begin bond_init for %s\n", bond_dev->name); |
5104 | 4858 | ||
4859 | /* | ||
4860 | * Initialize locks that may be required during | ||
4861 | * en/deslave operations. All of the bond_open work | ||
4862 | * (of which this is part) should really be moved to | ||
4863 | * a phase prior to dev_open | ||
4864 | */ | ||
4865 | spin_lock_init(&(bond_info->tx_hashtbl_lock)); | ||
4866 | spin_lock_init(&(bond_info->rx_hashtbl_lock)); | ||
4867 | |||
5105 | bond->wq = create_singlethread_workqueue(bond_dev->name); | 4868 | bond->wq = create_singlethread_workqueue(bond_dev->name); |
5106 | if (!bond->wq) | 4869 | if (!bond->wq) |
5107 | return -ENOMEM; | 4870 | return -ENOMEM; |
5108 | 4871 | ||
5109 | bond_set_lockdep_class(bond_dev); | 4872 | bond_set_lockdep_class(bond_dev); |
5110 | 4873 | ||
5111 | netif_carrier_off(bond_dev); | ||
5112 | |||
5113 | bond_create_proc_entry(bond); | 4874 | bond_create_proc_entry(bond); |
5114 | list_add_tail(&bond->bond_list, &bn->dev_list); | 4875 | list_add_tail(&bond->bond_list, &bn->dev_list); |
5115 | 4876 | ||
5116 | bond_prepare_sysfs_group(bond); | 4877 | bond_prepare_sysfs_group(bond); |
5117 | 4878 | ||
4879 | bond_debug_register(bond); | ||
4880 | |||
5118 | __hw_addr_init(&bond->mc_list); | 4881 | __hw_addr_init(&bond->mc_list); |
5119 | return 0; | 4882 | return 0; |
5120 | } | 4883 | } |
@@ -5149,8 +4912,9 @@ int bond_create(struct net *net, const char *name) | |||
5149 | 4912 | ||
5150 | rtnl_lock(); | 4913 | rtnl_lock(); |
5151 | 4914 | ||
5152 | bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "", | 4915 | bond_dev = alloc_netdev_mq(sizeof(struct bonding), |
5153 | bond_setup, tx_queues); | 4916 | name ? name : "bond%d", |
4917 | bond_setup, tx_queues); | ||
5154 | if (!bond_dev) { | 4918 | if (!bond_dev) { |
5155 | pr_err("%s: eek! can't alloc netdev!\n", name); | 4919 | pr_err("%s: eek! can't alloc netdev!\n", name); |
5156 | rtnl_unlock(); | 4920 | rtnl_unlock(); |
@@ -5160,24 +4924,10 @@ int bond_create(struct net *net, const char *name) | |||
5160 | dev_net_set(bond_dev, net); | 4924 | dev_net_set(bond_dev, net); |
5161 | bond_dev->rtnl_link_ops = &bond_link_ops; | 4925 | bond_dev->rtnl_link_ops = &bond_link_ops; |
5162 | 4926 | ||
5163 | if (!name) { | ||
5164 | res = dev_alloc_name(bond_dev, "bond%d"); | ||
5165 | if (res < 0) | ||
5166 | goto out; | ||
5167 | } else { | ||
5168 | /* | ||
5169 | * If we're given a name to register | ||
5170 | * we need to ensure that its not already | ||
5171 | * registered | ||
5172 | */ | ||
5173 | res = -EEXIST; | ||
5174 | if (__dev_get_by_name(net, name) != NULL) | ||
5175 | goto out; | ||
5176 | } | ||
5177 | |||
5178 | res = register_netdevice(bond_dev); | 4927 | res = register_netdevice(bond_dev); |
5179 | 4928 | ||
5180 | out: | 4929 | netif_carrier_off(bond_dev); |
4930 | |||
5181 | rtnl_unlock(); | 4931 | rtnl_unlock(); |
5182 | if (res < 0) | 4932 | if (res < 0) |
5183 | bond_destructor(bond_dev); | 4933 | bond_destructor(bond_dev); |
@@ -5215,7 +4965,7 @@ static int __init bonding_init(void) | |||
5215 | int i; | 4965 | int i; |
5216 | int res; | 4966 | int res; |
5217 | 4967 | ||
5218 | pr_info("%s", version); | 4968 | pr_info("%s", bond_version); |
5219 | 4969 | ||
5220 | res = bond_check_params(&bonding_defaults); | 4970 | res = bond_check_params(&bonding_defaults); |
5221 | if (res) | 4971 | if (res) |
@@ -5229,6 +4979,8 @@ static int __init bonding_init(void) | |||
5229 | if (res) | 4979 | if (res) |
5230 | goto err_link; | 4980 | goto err_link; |
5231 | 4981 | ||
4982 | bond_create_debugfs(); | ||
4983 | |||
5232 | for (i = 0; i < max_bonds; i++) { | 4984 | for (i = 0; i < max_bonds; i++) { |
5233 | res = bond_create(&init_net, NULL); | 4985 | res = bond_create(&init_net, NULL); |
5234 | if (res) | 4986 | if (res) |
@@ -5241,7 +4993,6 @@ static int __init bonding_init(void) | |||
5241 | 4993 | ||
5242 | register_netdevice_notifier(&bond_netdev_notifier); | 4994 | register_netdevice_notifier(&bond_netdev_notifier); |
5243 | register_inetaddr_notifier(&bond_inetaddr_notifier); | 4995 | register_inetaddr_notifier(&bond_inetaddr_notifier); |
5244 | bond_register_ipv6_notifier(); | ||
5245 | out: | 4996 | out: |
5246 | return res; | 4997 | return res; |
5247 | err: | 4998 | err: |
@@ -5256,12 +5007,19 @@ static void __exit bonding_exit(void) | |||
5256 | { | 5007 | { |
5257 | unregister_netdevice_notifier(&bond_netdev_notifier); | 5008 | unregister_netdevice_notifier(&bond_netdev_notifier); |
5258 | unregister_inetaddr_notifier(&bond_inetaddr_notifier); | 5009 | unregister_inetaddr_notifier(&bond_inetaddr_notifier); |
5259 | bond_unregister_ipv6_notifier(); | ||
5260 | 5010 | ||
5261 | bond_destroy_sysfs(); | 5011 | bond_destroy_sysfs(); |
5012 | bond_destroy_debugfs(); | ||
5262 | 5013 | ||
5263 | rtnl_link_unregister(&bond_link_ops); | 5014 | rtnl_link_unregister(&bond_link_ops); |
5264 | unregister_pernet_subsys(&bond_net_ops); | 5015 | unregister_pernet_subsys(&bond_net_ops); |
5016 | |||
5017 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
5018 | /* | ||
5019 | * Make sure we don't have an imbalance on our netpoll blocking | ||
5020 | */ | ||
5021 | WARN_ON(atomic_read(&netpoll_block_tx)); | ||
5022 | #endif | ||
5265 | } | 5023 | } |
5266 | 5024 | ||
5267 | module_init(bonding_init); | 5025 | module_init(bonding_init); |
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c new file mode 100644 index 000000000000..c97307ddd1c9 --- /dev/null +++ b/drivers/net/bonding/bond_procfs.c | |||
@@ -0,0 +1,273 @@ | |||
1 | #include <linux/proc_fs.h> | ||
2 | #include <net/net_namespace.h> | ||
3 | #include <net/netns/generic.h> | ||
4 | #include "bonding.h" | ||
5 | |||
6 | |||
7 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | ||
8 | __acquires(RCU) | ||
9 | __acquires(&bond->lock) | ||
10 | { | ||
11 | struct bonding *bond = seq->private; | ||
12 | loff_t off = 0; | ||
13 | struct slave *slave; | ||
14 | int i; | ||
15 | |||
16 | /* make sure the bond won't be taken away */ | ||
17 | rcu_read_lock(); | ||
18 | read_lock(&bond->lock); | ||
19 | |||
20 | if (*pos == 0) | ||
21 | return SEQ_START_TOKEN; | ||
22 | |||
23 | bond_for_each_slave(bond, slave, i) { | ||
24 | if (++off == *pos) | ||
25 | return slave; | ||
26 | } | ||
27 | |||
28 | return NULL; | ||
29 | } | ||
30 | |||
31 | static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
32 | { | ||
33 | struct bonding *bond = seq->private; | ||
34 | struct slave *slave = v; | ||
35 | |||
36 | ++*pos; | ||
37 | if (v == SEQ_START_TOKEN) | ||
38 | return bond->first_slave; | ||
39 | |||
40 | slave = slave->next; | ||
41 | |||
42 | return (slave == bond->first_slave) ? NULL : slave; | ||
43 | } | ||
44 | |||
45 | static void bond_info_seq_stop(struct seq_file *seq, void *v) | ||
46 | __releases(&bond->lock) | ||
47 | __releases(RCU) | ||
48 | { | ||
49 | struct bonding *bond = seq->private; | ||
50 | |||
51 | read_unlock(&bond->lock); | ||
52 | rcu_read_unlock(); | ||
53 | } | ||
54 | |||
55 | static void bond_info_show_master(struct seq_file *seq) | ||
56 | { | ||
57 | struct bonding *bond = seq->private; | ||
58 | struct slave *curr; | ||
59 | int i; | ||
60 | |||
61 | read_lock(&bond->curr_slave_lock); | ||
62 | curr = bond->curr_active_slave; | ||
63 | read_unlock(&bond->curr_slave_lock); | ||
64 | |||
65 | seq_printf(seq, "Bonding Mode: %s", | ||
66 | bond_mode_name(bond->params.mode)); | ||
67 | |||
68 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && | ||
69 | bond->params.fail_over_mac) | ||
70 | seq_printf(seq, " (fail_over_mac %s)", | ||
71 | fail_over_mac_tbl[bond->params.fail_over_mac].modename); | ||
72 | |||
73 | seq_printf(seq, "\n"); | ||
74 | |||
75 | if (bond->params.mode == BOND_MODE_XOR || | ||
76 | bond->params.mode == BOND_MODE_8023AD) { | ||
77 | seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", | ||
78 | xmit_hashtype_tbl[bond->params.xmit_policy].modename, | ||
79 | bond->params.xmit_policy); | ||
80 | } | ||
81 | |||
82 | if (USES_PRIMARY(bond->params.mode)) { | ||
83 | seq_printf(seq, "Primary Slave: %s", | ||
84 | (bond->primary_slave) ? | ||
85 | bond->primary_slave->dev->name : "None"); | ||
86 | if (bond->primary_slave) | ||
87 | seq_printf(seq, " (primary_reselect %s)", | ||
88 | pri_reselect_tbl[bond->params.primary_reselect].modename); | ||
89 | |||
90 | seq_printf(seq, "\nCurrently Active Slave: %s\n", | ||
91 | (curr) ? curr->dev->name : "None"); | ||
92 | } | ||
93 | |||
94 | seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ? | ||
95 | "up" : "down"); | ||
96 | seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon); | ||
97 | seq_printf(seq, "Up Delay (ms): %d\n", | ||
98 | bond->params.updelay * bond->params.miimon); | ||
99 | seq_printf(seq, "Down Delay (ms): %d\n", | ||
100 | bond->params.downdelay * bond->params.miimon); | ||
101 | |||
102 | |||
103 | /* ARP information */ | ||
104 | if (bond->params.arp_interval > 0) { | ||
105 | int printed = 0; | ||
106 | seq_printf(seq, "ARP Polling Interval (ms): %d\n", | ||
107 | bond->params.arp_interval); | ||
108 | |||
109 | seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); | ||
110 | |||
111 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | ||
112 | if (!bond->params.arp_targets[i]) | ||
113 | break; | ||
114 | if (printed) | ||
115 | seq_printf(seq, ","); | ||
116 | seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); | ||
117 | printed = 1; | ||
118 | } | ||
119 | seq_printf(seq, "\n"); | ||
120 | } | ||
121 | |||
122 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
123 | struct ad_info ad_info; | ||
124 | |||
125 | seq_puts(seq, "\n802.3ad info\n"); | ||
126 | seq_printf(seq, "LACP rate: %s\n", | ||
127 | (bond->params.lacp_fast) ? "fast" : "slow"); | ||
128 | seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", | ||
129 | ad_select_tbl[bond->params.ad_select].modename); | ||
130 | |||
131 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) { | ||
132 | seq_printf(seq, "bond %s has no active aggregator\n", | ||
133 | bond->dev->name); | ||
134 | } else { | ||
135 | seq_printf(seq, "Active Aggregator Info:\n"); | ||
136 | |||
137 | seq_printf(seq, "\tAggregator ID: %d\n", | ||
138 | ad_info.aggregator_id); | ||
139 | seq_printf(seq, "\tNumber of ports: %d\n", | ||
140 | ad_info.ports); | ||
141 | seq_printf(seq, "\tActor Key: %d\n", | ||
142 | ad_info.actor_key); | ||
143 | seq_printf(seq, "\tPartner Key: %d\n", | ||
144 | ad_info.partner_key); | ||
145 | seq_printf(seq, "\tPartner Mac Address: %pM\n", | ||
146 | ad_info.partner_system); | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static void bond_info_show_slave(struct seq_file *seq, | ||
152 | const struct slave *slave) | ||
153 | { | ||
154 | struct bonding *bond = seq->private; | ||
155 | |||
156 | seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); | ||
157 | seq_printf(seq, "MII Status: %s\n", | ||
158 | (slave->link == BOND_LINK_UP) ? "up" : "down"); | ||
159 | seq_printf(seq, "Speed: %d Mbps\n", slave->speed); | ||
160 | seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); | ||
161 | seq_printf(seq, "Link Failure Count: %u\n", | ||
162 | slave->link_failure_count); | ||
163 | |||
164 | seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); | ||
165 | |||
166 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
167 | const struct aggregator *agg | ||
168 | = SLAVE_AD_INFO(slave).port.aggregator; | ||
169 | |||
170 | if (agg) | ||
171 | seq_printf(seq, "Aggregator ID: %d\n", | ||
172 | agg->aggregator_identifier); | ||
173 | else | ||
174 | seq_puts(seq, "Aggregator ID: N/A\n"); | ||
175 | } | ||
176 | seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); | ||
177 | } | ||
178 | |||
179 | static int bond_info_seq_show(struct seq_file *seq, void *v) | ||
180 | { | ||
181 | if (v == SEQ_START_TOKEN) { | ||
182 | seq_printf(seq, "%s\n", bond_version); | ||
183 | bond_info_show_master(seq); | ||
184 | } else | ||
185 | bond_info_show_slave(seq, v); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static const struct seq_operations bond_info_seq_ops = { | ||
191 | .start = bond_info_seq_start, | ||
192 | .next = bond_info_seq_next, | ||
193 | .stop = bond_info_seq_stop, | ||
194 | .show = bond_info_seq_show, | ||
195 | }; | ||
196 | |||
197 | static int bond_info_open(struct inode *inode, struct file *file) | ||
198 | { | ||
199 | struct seq_file *seq; | ||
200 | struct proc_dir_entry *proc; | ||
201 | int res; | ||
202 | |||
203 | res = seq_open(file, &bond_info_seq_ops); | ||
204 | if (!res) { | ||
205 | /* recover the pointer buried in proc_dir_entry data */ | ||
206 | seq = file->private_data; | ||
207 | proc = PDE(inode); | ||
208 | seq->private = proc->data; | ||
209 | } | ||
210 | |||
211 | return res; | ||
212 | } | ||
213 | |||
214 | static const struct file_operations bond_info_fops = { | ||
215 | .owner = THIS_MODULE, | ||
216 | .open = bond_info_open, | ||
217 | .read = seq_read, | ||
218 | .llseek = seq_lseek, | ||
219 | .release = seq_release, | ||
220 | }; | ||
221 | |||
222 | void bond_create_proc_entry(struct bonding *bond) | ||
223 | { | ||
224 | struct net_device *bond_dev = bond->dev; | ||
225 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | ||
226 | |||
227 | if (bn->proc_dir) { | ||
228 | bond->proc_entry = proc_create_data(bond_dev->name, | ||
229 | S_IRUGO, bn->proc_dir, | ||
230 | &bond_info_fops, bond); | ||
231 | if (bond->proc_entry == NULL) | ||
232 | pr_warning("Warning: Cannot create /proc/net/%s/%s\n", | ||
233 | DRV_NAME, bond_dev->name); | ||
234 | else | ||
235 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | void bond_remove_proc_entry(struct bonding *bond) | ||
240 | { | ||
241 | struct net_device *bond_dev = bond->dev; | ||
242 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | ||
243 | |||
244 | if (bn->proc_dir && bond->proc_entry) { | ||
245 | remove_proc_entry(bond->proc_file_name, bn->proc_dir); | ||
246 | memset(bond->proc_file_name, 0, IFNAMSIZ); | ||
247 | bond->proc_entry = NULL; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | /* Create the bonding directory under /proc/net, if doesn't exist yet. | ||
252 | * Caller must hold rtnl_lock. | ||
253 | */ | ||
254 | void __net_init bond_create_proc_dir(struct bond_net *bn) | ||
255 | { | ||
256 | if (!bn->proc_dir) { | ||
257 | bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); | ||
258 | if (!bn->proc_dir) | ||
259 | pr_warning("Warning: cannot create /proc/net/%s\n", | ||
260 | DRV_NAME); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | /* Destroy the bonding directory under /proc/net, if empty. | ||
265 | * Caller must hold rtnl_lock. | ||
266 | */ | ||
267 | void __net_exit bond_destroy_proc_dir(struct bond_net *bn) | ||
268 | { | ||
269 | if (bn->proc_dir) { | ||
270 | remove_proc_entry(DRV_NAME, bn->net->proc_net); | ||
271 | bn->proc_dir = NULL; | ||
272 | } | ||
273 | } | ||
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index c311aed9bd02..88fcb25e554a 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -118,7 +118,10 @@ static ssize_t bonding_store_bonds(struct class *cls, | |||
118 | pr_info("%s is being created...\n", ifname); | 118 | pr_info("%s is being created...\n", ifname); |
119 | rv = bond_create(net, ifname); | 119 | rv = bond_create(net, ifname); |
120 | if (rv) { | 120 | if (rv) { |
121 | pr_info("Bond creation failed.\n"); | 121 | if (rv == -EEXIST) |
122 | pr_info("%s already exists.\n", ifname); | ||
123 | else | ||
124 | pr_info("%s creation failed.\n", ifname); | ||
122 | res = rv; | 125 | res = rv; |
123 | } | 126 | } |
124 | } else if (command[0] == '-') { | 127 | } else if (command[0] == '-') { |
@@ -224,12 +227,6 @@ static ssize_t bonding_store_slaves(struct device *d, | |||
224 | struct net_device *dev; | 227 | struct net_device *dev; |
225 | struct bonding *bond = to_bond(d); | 228 | struct bonding *bond = to_bond(d); |
226 | 229 | ||
227 | /* Quick sanity check -- is the bond interface up? */ | ||
228 | if (!(bond->dev->flags & IFF_UP)) { | ||
229 | pr_warning("%s: doing slave updates when interface is down.\n", | ||
230 | bond->dev->name); | ||
231 | } | ||
232 | |||
233 | if (!rtnl_trylock()) | 230 | if (!rtnl_trylock()) |
234 | return restart_syscall(); | 231 | return restart_syscall(); |
235 | 232 | ||
@@ -322,11 +319,6 @@ static ssize_t bonding_store_mode(struct device *d, | |||
322 | ret = -EINVAL; | 319 | ret = -EINVAL; |
323 | goto out; | 320 | goto out; |
324 | } | 321 | } |
325 | if (bond->params.mode == BOND_MODE_8023AD) | ||
326 | bond_unset_master_3ad_flags(bond); | ||
327 | |||
328 | if (bond->params.mode == BOND_MODE_ALB) | ||
329 | bond_unset_master_alb_flags(bond); | ||
330 | 322 | ||
331 | bond->params.mode = new_value; | 323 | bond->params.mode = new_value; |
332 | bond_set_mode_ops(bond, bond->params.mode); | 324 | bond_set_mode_ops(bond, bond->params.mode); |
@@ -424,11 +416,6 @@ static ssize_t bonding_store_arp_validate(struct device *d, | |||
424 | bond->dev->name, arp_validate_tbl[new_value].modename, | 416 | bond->dev->name, arp_validate_tbl[new_value].modename, |
425 | new_value); | 417 | new_value); |
426 | 418 | ||
427 | if (!bond->params.arp_validate && new_value) | ||
428 | bond_register_arp(bond); | ||
429 | else if (bond->params.arp_validate && !new_value) | ||
430 | bond_unregister_arp(bond); | ||
431 | |||
432 | bond->params.arp_validate = new_value; | 419 | bond->params.arp_validate = new_value; |
433 | 420 | ||
434 | return count; | 421 | return count; |
@@ -527,8 +514,6 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
527 | pr_info("%s: Setting ARP monitoring interval to %d.\n", | 514 | pr_info("%s: Setting ARP monitoring interval to %d.\n", |
528 | bond->dev->name, new_value); | 515 | bond->dev->name, new_value); |
529 | bond->params.arp_interval = new_value; | 516 | bond->params.arp_interval = new_value; |
530 | if (bond->params.arp_interval) | ||
531 | bond->dev->priv_flags |= IFF_MASTER_ARPMON; | ||
532 | if (bond->params.miimon) { | 517 | if (bond->params.miimon) { |
533 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", | 518 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", |
534 | bond->dev->name, bond->dev->name); | 519 | bond->dev->name, bond->dev->name); |
@@ -878,82 +863,28 @@ static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, | |||
878 | bonding_show_ad_select, bonding_store_ad_select); | 863 | bonding_show_ad_select, bonding_store_ad_select); |
879 | 864 | ||
880 | /* | 865 | /* |
881 | * Show and set the number of grat ARP to send after a failover event. | 866 | * Show and set the number of peer notifications to send after a failover event. |
882 | */ | 867 | */ |
883 | static ssize_t bonding_show_n_grat_arp(struct device *d, | 868 | static ssize_t bonding_show_num_peer_notif(struct device *d, |
884 | struct device_attribute *attr, | 869 | struct device_attribute *attr, |
885 | char *buf) | 870 | char *buf) |
886 | { | 871 | { |
887 | struct bonding *bond = to_bond(d); | 872 | struct bonding *bond = to_bond(d); |
888 | 873 | return sprintf(buf, "%d\n", bond->params.num_peer_notif); | |
889 | return sprintf(buf, "%d\n", bond->params.num_grat_arp); | ||
890 | } | 874 | } |
891 | 875 | ||
892 | static ssize_t bonding_store_n_grat_arp(struct device *d, | 876 | static ssize_t bonding_store_num_peer_notif(struct device *d, |
893 | struct device_attribute *attr, | 877 | struct device_attribute *attr, |
894 | const char *buf, size_t count) | 878 | const char *buf, size_t count) |
895 | { | 879 | { |
896 | int new_value, ret = count; | ||
897 | struct bonding *bond = to_bond(d); | 880 | struct bonding *bond = to_bond(d); |
898 | 881 | int err = kstrtou8(buf, 10, &bond->params.num_peer_notif); | |
899 | if (sscanf(buf, "%d", &new_value) != 1) { | 882 | return err ? err : count; |
900 | pr_err("%s: no num_grat_arp value specified.\n", | ||
901 | bond->dev->name); | ||
902 | ret = -EINVAL; | ||
903 | goto out; | ||
904 | } | ||
905 | if (new_value < 0 || new_value > 255) { | ||
906 | pr_err("%s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n", | ||
907 | bond->dev->name, new_value); | ||
908 | ret = -EINVAL; | ||
909 | goto out; | ||
910 | } else { | ||
911 | bond->params.num_grat_arp = new_value; | ||
912 | } | ||
913 | out: | ||
914 | return ret; | ||
915 | } | 883 | } |
916 | static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, | 884 | static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, |
917 | bonding_show_n_grat_arp, bonding_store_n_grat_arp); | 885 | bonding_show_num_peer_notif, bonding_store_num_peer_notif); |
918 | |||
919 | /* | ||
920 | * Show and set the number of unsolicited NA's to send after a failover event. | ||
921 | */ | ||
922 | static ssize_t bonding_show_n_unsol_na(struct device *d, | ||
923 | struct device_attribute *attr, | ||
924 | char *buf) | ||
925 | { | ||
926 | struct bonding *bond = to_bond(d); | ||
927 | |||
928 | return sprintf(buf, "%d\n", bond->params.num_unsol_na); | ||
929 | } | ||
930 | |||
931 | static ssize_t bonding_store_n_unsol_na(struct device *d, | ||
932 | struct device_attribute *attr, | ||
933 | const char *buf, size_t count) | ||
934 | { | ||
935 | int new_value, ret = count; | ||
936 | struct bonding *bond = to_bond(d); | ||
937 | |||
938 | if (sscanf(buf, "%d", &new_value) != 1) { | ||
939 | pr_err("%s: no num_unsol_na value specified.\n", | ||
940 | bond->dev->name); | ||
941 | ret = -EINVAL; | ||
942 | goto out; | ||
943 | } | ||
944 | |||
945 | if (new_value < 0 || new_value > 255) { | ||
946 | pr_err("%s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n", | ||
947 | bond->dev->name, new_value); | ||
948 | ret = -EINVAL; | ||
949 | goto out; | ||
950 | } else | ||
951 | bond->params.num_unsol_na = new_value; | ||
952 | out: | ||
953 | return ret; | ||
954 | } | ||
955 | static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, | 886 | static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, |
956 | bonding_show_n_unsol_na, bonding_store_n_unsol_na); | 887 | bonding_show_num_peer_notif, bonding_store_num_peer_notif); |
957 | 888 | ||
958 | /* | 889 | /* |
959 | * Show and set the MII monitor interval. There are two tricky bits | 890 | * Show and set the MII monitor interval. There are two tricky bits |
@@ -1004,9 +935,7 @@ static ssize_t bonding_store_miimon(struct device *d, | |||
1004 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", | 935 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", |
1005 | bond->dev->name); | 936 | bond->dev->name); |
1006 | bond->params.arp_interval = 0; | 937 | bond->params.arp_interval = 0; |
1007 | bond->dev->priv_flags &= ~IFF_MASTER_ARPMON; | ||
1008 | if (bond->params.arp_validate) { | 938 | if (bond->params.arp_validate) { |
1009 | bond_unregister_arp(bond); | ||
1010 | bond->params.arp_validate = | 939 | bond->params.arp_validate = |
1011 | BOND_ARP_VALIDATE_NONE; | 940 | BOND_ARP_VALIDATE_NONE; |
1012 | } | 941 | } |
@@ -1066,6 +995,7 @@ static ssize_t bonding_store_primary(struct device *d, | |||
1066 | 995 | ||
1067 | if (!rtnl_trylock()) | 996 | if (!rtnl_trylock()) |
1068 | return restart_syscall(); | 997 | return restart_syscall(); |
998 | block_netpoll_tx(); | ||
1069 | read_lock(&bond->lock); | 999 | read_lock(&bond->lock); |
1070 | write_lock_bh(&bond->curr_slave_lock); | 1000 | write_lock_bh(&bond->curr_slave_lock); |
1071 | 1001 | ||
@@ -1101,6 +1031,7 @@ static ssize_t bonding_store_primary(struct device *d, | |||
1101 | out: | 1031 | out: |
1102 | write_unlock_bh(&bond->curr_slave_lock); | 1032 | write_unlock_bh(&bond->curr_slave_lock); |
1103 | read_unlock(&bond->lock); | 1033 | read_unlock(&bond->lock); |
1034 | unblock_netpoll_tx(); | ||
1104 | rtnl_unlock(); | 1035 | rtnl_unlock(); |
1105 | 1036 | ||
1106 | return count; | 1037 | return count; |
@@ -1146,11 +1077,13 @@ static ssize_t bonding_store_primary_reselect(struct device *d, | |||
1146 | bond->dev->name, pri_reselect_tbl[new_value].modename, | 1077 | bond->dev->name, pri_reselect_tbl[new_value].modename, |
1147 | new_value); | 1078 | new_value); |
1148 | 1079 | ||
1080 | block_netpoll_tx(); | ||
1149 | read_lock(&bond->lock); | 1081 | read_lock(&bond->lock); |
1150 | write_lock_bh(&bond->curr_slave_lock); | 1082 | write_lock_bh(&bond->curr_slave_lock); |
1151 | bond_select_active_slave(bond); | 1083 | bond_select_active_slave(bond); |
1152 | write_unlock_bh(&bond->curr_slave_lock); | 1084 | write_unlock_bh(&bond->curr_slave_lock); |
1153 | read_unlock(&bond->lock); | 1085 | read_unlock(&bond->lock); |
1086 | unblock_netpoll_tx(); | ||
1154 | out: | 1087 | out: |
1155 | rtnl_unlock(); | 1088 | rtnl_unlock(); |
1156 | return ret; | 1089 | return ret; |
@@ -1194,7 +1127,7 @@ static ssize_t bonding_store_carrier(struct device *d, | |||
1194 | bond->dev->name, new_value); | 1127 | bond->dev->name, new_value); |
1195 | } | 1128 | } |
1196 | out: | 1129 | out: |
1197 | return count; | 1130 | return ret; |
1198 | } | 1131 | } |
1199 | static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, | 1132 | static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, |
1200 | bonding_show_carrier, bonding_store_carrier); | 1133 | bonding_show_carrier, bonding_store_carrier); |
@@ -1232,6 +1165,8 @@ static ssize_t bonding_store_active_slave(struct device *d, | |||
1232 | 1165 | ||
1233 | if (!rtnl_trylock()) | 1166 | if (!rtnl_trylock()) |
1234 | return restart_syscall(); | 1167 | return restart_syscall(); |
1168 | |||
1169 | block_netpoll_tx(); | ||
1235 | read_lock(&bond->lock); | 1170 | read_lock(&bond->lock); |
1236 | write_lock_bh(&bond->curr_slave_lock); | 1171 | write_lock_bh(&bond->curr_slave_lock); |
1237 | 1172 | ||
@@ -1288,6 +1223,8 @@ static ssize_t bonding_store_active_slave(struct device *d, | |||
1288 | out: | 1223 | out: |
1289 | write_unlock_bh(&bond->curr_slave_lock); | 1224 | write_unlock_bh(&bond->curr_slave_lock); |
1290 | read_unlock(&bond->lock); | 1225 | read_unlock(&bond->lock); |
1226 | unblock_netpoll_tx(); | ||
1227 | |||
1291 | rtnl_unlock(); | 1228 | rtnl_unlock(); |
1292 | 1229 | ||
1293 | return count; | 1230 | return count; |
@@ -1579,19 +1516,62 @@ static ssize_t bonding_store_slaves_active(struct device *d, | |||
1579 | } | 1516 | } |
1580 | 1517 | ||
1581 | bond_for_each_slave(bond, slave, i) { | 1518 | bond_for_each_slave(bond, slave, i) { |
1582 | if (slave->state == BOND_STATE_BACKUP) { | 1519 | if (!bond_is_active_slave(slave)) { |
1583 | if (new_value) | 1520 | if (new_value) |
1584 | slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; | 1521 | slave->inactive = 0; |
1585 | else | 1522 | else |
1586 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | 1523 | slave->inactive = 1; |
1587 | } | 1524 | } |
1588 | } | 1525 | } |
1589 | out: | 1526 | out: |
1590 | return count; | 1527 | return ret; |
1591 | } | 1528 | } |
1592 | static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, | 1529 | static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, |
1593 | bonding_show_slaves_active, bonding_store_slaves_active); | 1530 | bonding_show_slaves_active, bonding_store_slaves_active); |
1594 | 1531 | ||
1532 | /* | ||
1533 | * Show and set the number of IGMP membership reports to send on link failure | ||
1534 | */ | ||
1535 | static ssize_t bonding_show_resend_igmp(struct device *d, | ||
1536 | struct device_attribute *attr, | ||
1537 | char *buf) | ||
1538 | { | ||
1539 | struct bonding *bond = to_bond(d); | ||
1540 | |||
1541 | return sprintf(buf, "%d\n", bond->params.resend_igmp); | ||
1542 | } | ||
1543 | |||
1544 | static ssize_t bonding_store_resend_igmp(struct device *d, | ||
1545 | struct device_attribute *attr, | ||
1546 | const char *buf, size_t count) | ||
1547 | { | ||
1548 | int new_value, ret = count; | ||
1549 | struct bonding *bond = to_bond(d); | ||
1550 | |||
1551 | if (sscanf(buf, "%d", &new_value) != 1) { | ||
1552 | pr_err("%s: no resend_igmp value specified.\n", | ||
1553 | bond->dev->name); | ||
1554 | ret = -EINVAL; | ||
1555 | goto out; | ||
1556 | } | ||
1557 | |||
1558 | if (new_value < 0 || new_value > 255) { | ||
1559 | pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n", | ||
1560 | bond->dev->name, new_value); | ||
1561 | ret = -EINVAL; | ||
1562 | goto out; | ||
1563 | } | ||
1564 | |||
1565 | pr_info("%s: Setting resend_igmp to %d.\n", | ||
1566 | bond->dev->name, new_value); | ||
1567 | bond->params.resend_igmp = new_value; | ||
1568 | out: | ||
1569 | return ret; | ||
1570 | } | ||
1571 | |||
1572 | static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, | ||
1573 | bonding_show_resend_igmp, bonding_store_resend_igmp); | ||
1574 | |||
1595 | static struct attribute *per_bond_attrs[] = { | 1575 | static struct attribute *per_bond_attrs[] = { |
1596 | &dev_attr_slaves.attr, | 1576 | &dev_attr_slaves.attr, |
1597 | &dev_attr_mode.attr, | 1577 | &dev_attr_mode.attr, |
@@ -1619,6 +1599,7 @@ static struct attribute *per_bond_attrs[] = { | |||
1619 | &dev_attr_ad_partner_mac.attr, | 1599 | &dev_attr_ad_partner_mac.attr, |
1620 | &dev_attr_queue_id.attr, | 1600 | &dev_attr_queue_id.attr, |
1621 | &dev_attr_all_slaves_active.attr, | 1601 | &dev_attr_all_slaves_active.attr, |
1602 | &dev_attr_resend_igmp.attr, | ||
1622 | NULL, | 1603 | NULL, |
1623 | }; | 1604 | }; |
1624 | 1605 | ||
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index c6fdd851579a..ea1d005be92d 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -18,16 +18,19 @@ | |||
18 | #include <linux/timer.h> | 18 | #include <linux/timer.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/if_bonding.h> | 20 | #include <linux/if_bonding.h> |
21 | #include <linux/kobject.h> | 21 | #include <linux/cpumask.h> |
22 | #include <linux/in6.h> | 22 | #include <linux/in6.h> |
23 | #include <linux/netpoll.h> | ||
23 | #include "bond_3ad.h" | 24 | #include "bond_3ad.h" |
24 | #include "bond_alb.h" | 25 | #include "bond_alb.h" |
25 | 26 | ||
26 | #define DRV_VERSION "3.7.0" | 27 | #define DRV_VERSION "3.7.1" |
27 | #define DRV_RELDATE "June 2, 2010" | 28 | #define DRV_RELDATE "April 27, 2011" |
28 | #define DRV_NAME "bonding" | 29 | #define DRV_NAME "bonding" |
29 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 30 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
30 | 31 | ||
32 | #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" | ||
33 | |||
31 | #define BOND_MAX_ARP_TARGETS 16 | 34 | #define BOND_MAX_ARP_TARGETS 16 |
32 | 35 | ||
33 | #define IS_UP(dev) \ | 36 | #define IS_UP(dev) \ |
@@ -36,23 +39,13 @@ | |||
36 | netif_carrier_ok(dev)) | 39 | netif_carrier_ok(dev)) |
37 | 40 | ||
38 | /* | 41 | /* |
39 | * Checks whether bond is ready for transmit. | ||
40 | * | ||
41 | * Caller must hold bond->lock | ||
42 | */ | ||
43 | #define BOND_IS_OK(bond) \ | ||
44 | (((bond)->dev->flags & IFF_UP) && \ | ||
45 | netif_running((bond)->dev) && \ | ||
46 | ((bond)->slave_cnt > 0)) | ||
47 | |||
48 | /* | ||
49 | * Checks whether slave is ready for transmit. | 42 | * Checks whether slave is ready for transmit. |
50 | */ | 43 | */ |
51 | #define SLAVE_IS_OK(slave) \ | 44 | #define SLAVE_IS_OK(slave) \ |
52 | (((slave)->dev->flags & IFF_UP) && \ | 45 | (((slave)->dev->flags & IFF_UP) && \ |
53 | netif_running((slave)->dev) && \ | 46 | netif_running((slave)->dev) && \ |
54 | ((slave)->link == BOND_LINK_UP) && \ | 47 | ((slave)->link == BOND_LINK_UP) && \ |
55 | ((slave)->state == BOND_STATE_ACTIVE)) | 48 | bond_is_active_slave(slave)) |
56 | 49 | ||
57 | 50 | ||
58 | #define USES_PRIMARY(mode) \ | 51 | #define USES_PRIMARY(mode) \ |
@@ -117,12 +110,36 @@ | |||
117 | bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) | 110 | bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) |
118 | 111 | ||
119 | 112 | ||
113 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
114 | extern atomic_t netpoll_block_tx; | ||
115 | |||
116 | static inline void block_netpoll_tx(void) | ||
117 | { | ||
118 | atomic_inc(&netpoll_block_tx); | ||
119 | } | ||
120 | |||
121 | static inline void unblock_netpoll_tx(void) | ||
122 | { | ||
123 | atomic_dec(&netpoll_block_tx); | ||
124 | } | ||
125 | |||
126 | static inline int is_netpoll_tx_blocked(struct net_device *dev) | ||
127 | { | ||
128 | if (unlikely(netpoll_tx_running(dev))) | ||
129 | return atomic_read(&netpoll_block_tx); | ||
130 | return 0; | ||
131 | } | ||
132 | #else | ||
133 | #define block_netpoll_tx() | ||
134 | #define unblock_netpoll_tx() | ||
135 | #define is_netpoll_tx_blocked(dev) (0) | ||
136 | #endif | ||
137 | |||
120 | struct bond_params { | 138 | struct bond_params { |
121 | int mode; | 139 | int mode; |
122 | int xmit_policy; | 140 | int xmit_policy; |
123 | int miimon; | 141 | int miimon; |
124 | int num_grat_arp; | 142 | u8 num_peer_notif; |
125 | int num_unsol_na; | ||
126 | int arp_interval; | 143 | int arp_interval; |
127 | int arp_validate; | 144 | int arp_validate; |
128 | int use_carrier; | 145 | int use_carrier; |
@@ -136,6 +153,7 @@ struct bond_params { | |||
136 | __be32 arp_targets[BOND_MAX_ARP_TARGETS]; | 153 | __be32 arp_targets[BOND_MAX_ARP_TARGETS]; |
137 | int tx_queues; | 154 | int tx_queues; |
138 | int all_slaves_active; | 155 | int all_slaves_active; |
156 | int resend_igmp; | ||
139 | }; | 157 | }; |
140 | 158 | ||
141 | struct bond_parm_tbl { | 159 | struct bond_parm_tbl { |
@@ -149,29 +167,32 @@ struct vlan_entry { | |||
149 | struct list_head vlan_list; | 167 | struct list_head vlan_list; |
150 | __be32 vlan_ip; | 168 | __be32 vlan_ip; |
151 | unsigned short vlan_id; | 169 | unsigned short vlan_id; |
152 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
153 | struct in6_addr vlan_ipv6; | ||
154 | #endif | ||
155 | }; | 170 | }; |
156 | 171 | ||
157 | struct slave { | 172 | struct slave { |
158 | struct net_device *dev; /* first - useful for panic debug */ | 173 | struct net_device *dev; /* first - useful for panic debug */ |
159 | struct slave *next; | 174 | struct slave *next; |
160 | struct slave *prev; | 175 | struct slave *prev; |
176 | struct bonding *bond; /* our master */ | ||
161 | int delay; | 177 | int delay; |
162 | unsigned long jiffies; | 178 | unsigned long jiffies; |
163 | unsigned long last_arp_rx; | 179 | unsigned long last_arp_rx; |
164 | s8 link; /* one of BOND_LINK_XXXX */ | 180 | s8 link; /* one of BOND_LINK_XXXX */ |
165 | s8 new_link; | 181 | s8 new_link; |
166 | s8 state; /* one of BOND_STATE_XXXX */ | 182 | u8 backup:1, /* indicates backup slave. Value corresponds with |
183 | BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ | ||
184 | inactive:1; /* indicates inactive slave */ | ||
185 | u8 duplex; | ||
167 | u32 original_mtu; | 186 | u32 original_mtu; |
168 | u32 link_failure_count; | 187 | u32 link_failure_count; |
169 | u8 perm_hwaddr[ETH_ALEN]; | 188 | u32 speed; |
170 | u16 speed; | ||
171 | u8 duplex; | ||
172 | u16 queue_id; | 189 | u16 queue_id; |
190 | u8 perm_hwaddr[ETH_ALEN]; | ||
173 | struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ | 191 | struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ |
174 | struct tlb_slave_info tlb_info; | 192 | struct tlb_slave_info tlb_info; |
193 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
194 | struct netpoll *np; | ||
195 | #endif | ||
175 | }; | 196 | }; |
176 | 197 | ||
177 | /* | 198 | /* |
@@ -196,12 +217,14 @@ struct bonding { | |||
196 | struct slave *primary_slave; | 217 | struct slave *primary_slave; |
197 | bool force_primary; | 218 | bool force_primary; |
198 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ | 219 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ |
220 | void (*recv_probe)(struct sk_buff *, struct bonding *, | ||
221 | struct slave *); | ||
199 | rwlock_t lock; | 222 | rwlock_t lock; |
200 | rwlock_t curr_slave_lock; | 223 | rwlock_t curr_slave_lock; |
201 | s8 kill_timers; | 224 | s8 kill_timers; |
202 | s8 send_grat_arp; | 225 | u8 send_peer_notif; |
203 | s8 send_unsol_na; | ||
204 | s8 setup_by_slave; | 226 | s8 setup_by_slave; |
227 | s8 igmp_retrans; | ||
205 | #ifdef CONFIG_PROC_FS | 228 | #ifdef CONFIG_PROC_FS |
206 | struct proc_dir_entry *proc_entry; | 229 | struct proc_dir_entry *proc_entry; |
207 | char proc_file_name[IFNAMSIZ]; | 230 | char proc_file_name[IFNAMSIZ]; |
@@ -223,28 +246,34 @@ struct bonding { | |||
223 | struct delayed_work arp_work; | 246 | struct delayed_work arp_work; |
224 | struct delayed_work alb_work; | 247 | struct delayed_work alb_work; |
225 | struct delayed_work ad_work; | 248 | struct delayed_work ad_work; |
226 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 249 | struct delayed_work mcast_work; |
227 | struct in6_addr master_ipv6; | 250 | #ifdef CONFIG_DEBUG_FS |
228 | #endif | 251 | /* debugging suport via debugfs */ |
252 | struct dentry *debug_dir; | ||
253 | #endif /* CONFIG_DEBUG_FS */ | ||
229 | }; | 254 | }; |
230 | 255 | ||
256 | #define bond_slave_get_rcu(dev) \ | ||
257 | ((struct slave *) rcu_dereference(dev->rx_handler_data)) | ||
258 | |||
231 | /** | 259 | /** |
232 | * Returns NULL if the net_device does not belong to any of the bond's slaves | 260 | * Returns NULL if the net_device does not belong to any of the bond's slaves |
233 | * | 261 | * |
234 | * Caller must hold bond lock for read | 262 | * Caller must hold bond lock for read |
235 | */ | 263 | */ |
236 | static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) | 264 | static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, |
265 | struct net_device *slave_dev) | ||
237 | { | 266 | { |
238 | struct slave *slave = NULL; | 267 | struct slave *slave = NULL; |
239 | int i; | 268 | int i; |
240 | 269 | ||
241 | bond_for_each_slave(bond, slave, i) { | 270 | bond_for_each_slave(bond, slave, i) { |
242 | if (slave->dev == slave_dev) { | 271 | if (slave->dev == slave_dev) { |
243 | break; | 272 | return slave; |
244 | } | 273 | } |
245 | } | 274 | } |
246 | 275 | ||
247 | return slave; | 276 | return NULL; |
248 | } | 277 | } |
249 | 278 | ||
250 | static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) | 279 | static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) |
@@ -253,7 +282,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) | |||
253 | return NULL; | 282 | return NULL; |
254 | } | 283 | } |
255 | 284 | ||
256 | return (struct bonding *)netdev_priv(slave->dev->master); | 285 | return netdev_priv(slave->dev->master); |
257 | } | 286 | } |
258 | 287 | ||
259 | static inline bool bond_is_lb(const struct bonding *bond) | 288 | static inline bool bond_is_lb(const struct bonding *bond) |
@@ -262,6 +291,26 @@ static inline bool bond_is_lb(const struct bonding *bond) | |||
262 | bond->params.mode == BOND_MODE_ALB); | 291 | bond->params.mode == BOND_MODE_ALB); |
263 | } | 292 | } |
264 | 293 | ||
294 | static inline void bond_set_active_slave(struct slave *slave) | ||
295 | { | ||
296 | slave->backup = 0; | ||
297 | } | ||
298 | |||
299 | static inline void bond_set_backup_slave(struct slave *slave) | ||
300 | { | ||
301 | slave->backup = 1; | ||
302 | } | ||
303 | |||
304 | static inline int bond_slave_state(struct slave *slave) | ||
305 | { | ||
306 | return slave->backup; | ||
307 | } | ||
308 | |||
309 | static inline bool bond_is_active_slave(struct slave *slave) | ||
310 | { | ||
311 | return !bond_slave_state(slave); | ||
312 | } | ||
313 | |||
265 | #define BOND_PRI_RESELECT_ALWAYS 0 | 314 | #define BOND_PRI_RESELECT_ALWAYS 0 |
266 | #define BOND_PRI_RESELECT_BETTER 1 | 315 | #define BOND_PRI_RESELECT_BETTER 1 |
267 | #define BOND_PRI_RESELECT_FAILURE 2 | 316 | #define BOND_PRI_RESELECT_FAILURE 2 |
@@ -279,7 +328,7 @@ static inline bool bond_is_lb(const struct bonding *bond) | |||
279 | static inline int slave_do_arp_validate(struct bonding *bond, | 328 | static inline int slave_do_arp_validate(struct bonding *bond, |
280 | struct slave *slave) | 329 | struct slave *slave) |
281 | { | 330 | { |
282 | return bond->params.arp_validate & (1 << slave->state); | 331 | return bond->params.arp_validate & (1 << bond_slave_state(slave)); |
283 | } | 332 | } |
284 | 333 | ||
285 | static inline unsigned long slave_last_rx(struct bonding *bond, | 334 | static inline unsigned long slave_last_rx(struct bonding *bond, |
@@ -291,47 +340,45 @@ static inline unsigned long slave_last_rx(struct bonding *bond, | |||
291 | return slave->dev->last_rx; | 340 | return slave->dev->last_rx; |
292 | } | 341 | } |
293 | 342 | ||
294 | static inline void bond_set_slave_inactive_flags(struct slave *slave) | 343 | #ifdef CONFIG_NET_POLL_CONTROLLER |
344 | static inline void bond_netpoll_send_skb(const struct slave *slave, | ||
345 | struct sk_buff *skb) | ||
295 | { | 346 | { |
296 | struct bonding *bond = netdev_priv(slave->dev->master); | 347 | struct netpoll *np = slave->np; |
297 | if (!bond_is_lb(bond)) | ||
298 | slave->state = BOND_STATE_BACKUP; | ||
299 | if (!bond->params.all_slaves_active) | ||
300 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | ||
301 | if (slave_do_arp_validate(bond, slave)) | ||
302 | slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; | ||
303 | } | ||
304 | 348 | ||
305 | static inline void bond_set_slave_active_flags(struct slave *slave) | 349 | if (np) |
306 | { | 350 | netpoll_send_skb(np, skb); |
307 | slave->state = BOND_STATE_ACTIVE; | ||
308 | slave->dev->priv_flags &= ~(IFF_SLAVE_INACTIVE | IFF_SLAVE_NEEDARP); | ||
309 | } | 351 | } |
310 | 352 | #else | |
311 | static inline void bond_set_master_3ad_flags(struct bonding *bond) | 353 | static inline void bond_netpoll_send_skb(const struct slave *slave, |
354 | struct sk_buff *skb) | ||
312 | { | 355 | { |
313 | bond->dev->priv_flags |= IFF_MASTER_8023AD; | ||
314 | } | 356 | } |
357 | #endif | ||
315 | 358 | ||
316 | static inline void bond_unset_master_3ad_flags(struct bonding *bond) | 359 | static inline void bond_set_slave_inactive_flags(struct slave *slave) |
317 | { | 360 | { |
318 | bond->dev->priv_flags &= ~IFF_MASTER_8023AD; | 361 | struct bonding *bond = netdev_priv(slave->dev->master); |
362 | if (!bond_is_lb(bond)) | ||
363 | bond_set_backup_slave(slave); | ||
364 | if (!bond->params.all_slaves_active) | ||
365 | slave->inactive = 1; | ||
319 | } | 366 | } |
320 | 367 | ||
321 | static inline void bond_set_master_alb_flags(struct bonding *bond) | 368 | static inline void bond_set_slave_active_flags(struct slave *slave) |
322 | { | 369 | { |
323 | bond->dev->priv_flags |= IFF_MASTER_ALB; | 370 | bond_set_active_slave(slave); |
371 | slave->inactive = 0; | ||
324 | } | 372 | } |
325 | 373 | ||
326 | static inline void bond_unset_master_alb_flags(struct bonding *bond) | 374 | static inline bool bond_is_slave_inactive(struct slave *slave) |
327 | { | 375 | { |
328 | bond->dev->priv_flags &= ~IFF_MASTER_ALB; | 376 | return slave->inactive; |
329 | } | 377 | } |
330 | 378 | ||
331 | struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); | 379 | struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); |
332 | int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); | 380 | int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); |
333 | int bond_create(struct net *net, const char *name); | 381 | int bond_create(struct net *net, const char *name); |
334 | int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); | ||
335 | int bond_create_sysfs(void); | 382 | int bond_create_sysfs(void); |
336 | void bond_destroy_sysfs(void); | 383 | void bond_destroy_sysfs(void); |
337 | void bond_prepare_sysfs_group(struct bonding *bond); | 384 | void bond_prepare_sysfs_group(struct bonding *bond); |
@@ -346,8 +393,12 @@ void bond_set_mode_ops(struct bonding *bond, int mode); | |||
346 | int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl); | 393 | int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl); |
347 | void bond_select_active_slave(struct bonding *bond); | 394 | void bond_select_active_slave(struct bonding *bond); |
348 | void bond_change_active_slave(struct bonding *bond, struct slave *new_active); | 395 | void bond_change_active_slave(struct bonding *bond, struct slave *new_active); |
349 | void bond_register_arp(struct bonding *); | 396 | void bond_create_debugfs(void); |
350 | void bond_unregister_arp(struct bonding *); | 397 | void bond_destroy_debugfs(void); |
398 | void bond_debug_register(struct bonding *bond); | ||
399 | void bond_debug_unregister(struct bonding *bond); | ||
400 | void bond_debug_reregister(struct bonding *bond); | ||
401 | const char *bond_mode_name(int mode); | ||
351 | 402 | ||
352 | struct bond_net { | 403 | struct bond_net { |
353 | struct net * net; /* Associated network namespace */ | 404 | struct net * net; /* Associated network namespace */ |
@@ -357,6 +408,30 @@ struct bond_net { | |||
357 | #endif | 408 | #endif |
358 | }; | 409 | }; |
359 | 410 | ||
411 | #ifdef CONFIG_PROC_FS | ||
412 | void bond_create_proc_entry(struct bonding *bond); | ||
413 | void bond_remove_proc_entry(struct bonding *bond); | ||
414 | void bond_create_proc_dir(struct bond_net *bn); | ||
415 | void bond_destroy_proc_dir(struct bond_net *bn); | ||
416 | #else | ||
417 | static inline void bond_create_proc_entry(struct bonding *bond) | ||
418 | { | ||
419 | } | ||
420 | |||
421 | static inline void bond_remove_proc_entry(struct bonding *bond) | ||
422 | { | ||
423 | } | ||
424 | |||
425 | static inline void bond_create_proc_dir(struct bond_net *bn) | ||
426 | { | ||
427 | } | ||
428 | |||
429 | static inline void bond_destroy_proc_dir(struct bond_net *bn) | ||
430 | { | ||
431 | } | ||
432 | #endif | ||
433 | |||
434 | |||
360 | /* exported from bond_main.c */ | 435 | /* exported from bond_main.c */ |
361 | extern int bond_net_id; | 436 | extern int bond_net_id; |
362 | extern const struct bond_parm_tbl bond_lacp_tbl[]; | 437 | extern const struct bond_parm_tbl bond_lacp_tbl[]; |
@@ -367,23 +442,4 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[]; | |||
367 | extern const struct bond_parm_tbl pri_reselect_tbl[]; | 442 | extern const struct bond_parm_tbl pri_reselect_tbl[]; |
368 | extern struct bond_parm_tbl ad_select_tbl[]; | 443 | extern struct bond_parm_tbl ad_select_tbl[]; |
369 | 444 | ||
370 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
371 | void bond_send_unsolicited_na(struct bonding *bond); | ||
372 | void bond_register_ipv6_notifier(void); | ||
373 | void bond_unregister_ipv6_notifier(void); | ||
374 | #else | ||
375 | static inline void bond_send_unsolicited_na(struct bonding *bond) | ||
376 | { | ||
377 | return; | ||
378 | } | ||
379 | static inline void bond_register_ipv6_notifier(void) | ||
380 | { | ||
381 | return; | ||
382 | } | ||
383 | static inline void bond_unregister_ipv6_notifier(void) | ||
384 | { | ||
385 | return; | ||
386 | } | ||
387 | #endif | ||
388 | |||
389 | #endif /* _LINUX_BONDING_H */ | 445 | #endif /* _LINUX_BONDING_H */ |