diff options
author | David S. Miller <davem@davemloft.net> | 2014-09-13 16:29:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 16:29:57 -0400 |
commit | 8801d48cbde6ddd275c1e6b866e434a72cafeabd (patch) | |
tree | 40b4a29e8721c329dbc2d1b2332f4beecbef45e5 | |
parent | b25bd2515ea32cf5ddd5fd5a2a93b8c9dd875e4f (diff) | |
parent | 8c0bc550288d81e9ad8a2ed9136a72140b9ef507 (diff) |
Merge branch 'bonding-next'
Nikolay Aleksandrov says:
====================
bonding: get rid of curr_slave_lock
This is the second patch-set dealing with bond locking and the purpose here
is to convert curr_slave_lock into a spinlock called "mode_lock" which can
be used in the various modes for their specific needs. The first three
patches cleanup the use of curr_slave_lock and prepare it for the
conversion which is done in patch 4 and then the modes that were using
their own locks are converted to use the new "mode_lock" giving us the
opportunity to remove their locks.
This patch-set has been tested in each mode by running enslave/release of
slaves in parallel with traffic transmission and miimon=1 i.e. running
all the time. In fact this lead to the discovery of a subtle bug related to
RCU which will be fixed in -net.
Also did an allmodconfig test just in case :-)
v2: fix bond_3ad_state_machine_handler's use of mode_lock and
curr_slave_lock
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/bonding/bond_3ad.c | 84 | ||||
-rw-r--r-- | drivers/net/bonding/bond_3ad.h | 1 | ||||
-rw-r--r-- | drivers/net/bonding/bond_alb.c | 159 | ||||
-rw-r--r-- | drivers/net/bonding/bond_alb.h | 2 | ||||
-rw-r--r-- | drivers/net/bonding/bond_debugfs.c | 4 | ||||
-rw-r--r-- | drivers/net/bonding/bond_main.c | 89 | ||||
-rw-r--r-- | drivers/net/bonding/bond_options.c | 10 | ||||
-rw-r--r-- | drivers/net/bonding/bonding.h | 16 |
8 files changed, 91 insertions, 274 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 5d27a6207384..2bb360f32a64 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -234,24 +234,6 @@ static inline int __check_agg_selection_timer(struct port *port) | |||
234 | } | 234 | } |
235 | 235 | ||
236 | /** | 236 | /** |
237 | * __get_state_machine_lock - lock the port's state machines | ||
238 | * @port: the port we're looking at | ||
239 | */ | ||
240 | static inline void __get_state_machine_lock(struct port *port) | ||
241 | { | ||
242 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock)); | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * __release_state_machine_lock - unlock the port's state machines | ||
247 | * @port: the port we're looking at | ||
248 | */ | ||
249 | static inline void __release_state_machine_lock(struct port *port) | ||
250 | { | ||
251 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock)); | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * __get_link_speed - get a port's speed | 237 | * __get_link_speed - get a port's speed |
256 | * @port: the port we're looking at | 238 | * @port: the port we're looking at |
257 | * | 239 | * |
@@ -341,16 +323,6 @@ static u8 __get_duplex(struct port *port) | |||
341 | return retval; | 323 | return retval; |
342 | } | 324 | } |
343 | 325 | ||
344 | /** | ||
345 | * __initialize_port_locks - initialize a port's STATE machine spinlock | ||
346 | * @port: the slave of the port we're looking at | ||
347 | */ | ||
348 | static inline void __initialize_port_locks(struct slave *slave) | ||
349 | { | ||
350 | /* make sure it isn't called twice */ | ||
351 | spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock)); | ||
352 | } | ||
353 | |||
354 | /* Conversions */ | 326 | /* Conversions */ |
355 | 327 | ||
356 | /** | 328 | /** |
@@ -1843,7 +1815,6 @@ void bond_3ad_bind_slave(struct slave *slave) | |||
1843 | 1815 | ||
1844 | ad_initialize_port(port, bond->params.lacp_fast); | 1816 | ad_initialize_port(port, bond->params.lacp_fast); |
1845 | 1817 | ||
1846 | __initialize_port_locks(slave); | ||
1847 | port->slave = slave; | 1818 | port->slave = slave; |
1848 | port->actor_port_number = SLAVE_AD_INFO(slave)->id; | 1819 | port->actor_port_number = SLAVE_AD_INFO(slave)->id; |
1849 | /* key is determined according to the link speed, duplex and user key(which | 1820 | /* key is determined according to the link speed, duplex and user key(which |
@@ -1899,6 +1870,8 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
1899 | struct slave *slave_iter; | 1870 | struct slave *slave_iter; |
1900 | struct list_head *iter; | 1871 | struct list_head *iter; |
1901 | 1872 | ||
1873 | /* Sync against bond_3ad_state_machine_handler() */ | ||
1874 | spin_lock_bh(&bond->mode_lock); | ||
1902 | aggregator = &(SLAVE_AD_INFO(slave)->aggregator); | 1875 | aggregator = &(SLAVE_AD_INFO(slave)->aggregator); |
1903 | port = &(SLAVE_AD_INFO(slave)->port); | 1876 | port = &(SLAVE_AD_INFO(slave)->port); |
1904 | 1877 | ||
@@ -1906,7 +1879,7 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
1906 | if (!port->slave) { | 1879 | if (!port->slave) { |
1907 | netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n", | 1880 | netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n", |
1908 | slave->dev->name); | 1881 | slave->dev->name); |
1909 | return; | 1882 | goto out; |
1910 | } | 1883 | } |
1911 | 1884 | ||
1912 | netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n", | 1885 | netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n", |
@@ -2032,6 +2005,9 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
2032 | } | 2005 | } |
2033 | } | 2006 | } |
2034 | port->slave = NULL; | 2007 | port->slave = NULL; |
2008 | |||
2009 | out: | ||
2010 | spin_unlock_bh(&bond->mode_lock); | ||
2035 | } | 2011 | } |
2036 | 2012 | ||
2037 | /** | 2013 | /** |
@@ -2057,7 +2033,11 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2057 | struct port *port; | 2033 | struct port *port; |
2058 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; | 2034 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; |
2059 | 2035 | ||
2060 | read_lock(&bond->curr_slave_lock); | 2036 | /* Lock to protect data accessed by all (e.g., port->sm_vars) and |
2037 | * against running with bond_3ad_unbind_slave. ad_rx_machine may run | ||
2038 | * concurrently due to incoming LACPDU as well. | ||
2039 | */ | ||
2040 | spin_lock_bh(&bond->mode_lock); | ||
2061 | rcu_read_lock(); | 2041 | rcu_read_lock(); |
2062 | 2042 | ||
2063 | /* check if there are any slaves */ | 2043 | /* check if there are any slaves */ |
@@ -2093,12 +2073,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2093 | goto re_arm; | 2073 | goto re_arm; |
2094 | } | 2074 | } |
2095 | 2075 | ||
2096 | /* Lock around state machines to protect data accessed | ||
2097 | * by all (e.g., port->sm_vars). ad_rx_machine may run | ||
2098 | * concurrently due to incoming LACPDU. | ||
2099 | */ | ||
2100 | __get_state_machine_lock(port); | ||
2101 | |||
2102 | ad_rx_machine(NULL, port); | 2076 | ad_rx_machine(NULL, port); |
2103 | ad_periodic_machine(port); | 2077 | ad_periodic_machine(port); |
2104 | ad_port_selection_logic(port); | 2078 | ad_port_selection_logic(port); |
@@ -2108,8 +2082,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2108 | /* turn off the BEGIN bit, since we already handled it */ | 2082 | /* turn off the BEGIN bit, since we already handled it */ |
2109 | if (port->sm_vars & AD_PORT_BEGIN) | 2083 | if (port->sm_vars & AD_PORT_BEGIN) |
2110 | port->sm_vars &= ~AD_PORT_BEGIN; | 2084 | port->sm_vars &= ~AD_PORT_BEGIN; |
2111 | |||
2112 | __release_state_machine_lock(port); | ||
2113 | } | 2085 | } |
2114 | 2086 | ||
2115 | re_arm: | 2087 | re_arm: |
@@ -2120,7 +2092,7 @@ re_arm: | |||
2120 | } | 2092 | } |
2121 | } | 2093 | } |
2122 | rcu_read_unlock(); | 2094 | rcu_read_unlock(); |
2123 | read_unlock(&bond->curr_slave_lock); | 2095 | spin_unlock_bh(&bond->mode_lock); |
2124 | 2096 | ||
2125 | if (should_notify_rtnl && rtnl_trylock()) { | 2097 | if (should_notify_rtnl && rtnl_trylock()) { |
2126 | bond_slave_state_notify(bond); | 2098 | bond_slave_state_notify(bond); |
@@ -2161,9 +2133,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, | |||
2161 | netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n", | 2133 | netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n", |
2162 | port->actor_port_number); | 2134 | port->actor_port_number); |
2163 | /* Protect against concurrent state machines */ | 2135 | /* Protect against concurrent state machines */ |
2164 | __get_state_machine_lock(port); | 2136 | spin_lock(&slave->bond->mode_lock); |
2165 | ad_rx_machine(lacpdu, port); | 2137 | ad_rx_machine(lacpdu, port); |
2166 | __release_state_machine_lock(port); | 2138 | spin_unlock(&slave->bond->mode_lock); |
2167 | break; | 2139 | break; |
2168 | 2140 | ||
2169 | case AD_TYPE_MARKER: | 2141 | case AD_TYPE_MARKER: |
@@ -2213,7 +2185,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) | |||
2213 | return; | 2185 | return; |
2214 | } | 2186 | } |
2215 | 2187 | ||
2216 | __get_state_machine_lock(port); | 2188 | spin_lock_bh(&slave->bond->mode_lock); |
2217 | 2189 | ||
2218 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; | 2190 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; |
2219 | port->actor_oper_port_key = port->actor_admin_port_key |= | 2191 | port->actor_oper_port_key = port->actor_admin_port_key |= |
@@ -2224,7 +2196,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) | |||
2224 | */ | 2196 | */ |
2225 | port->sm_vars |= AD_PORT_BEGIN; | 2197 | port->sm_vars |= AD_PORT_BEGIN; |
2226 | 2198 | ||
2227 | __release_state_machine_lock(port); | 2199 | spin_unlock_bh(&slave->bond->mode_lock); |
2228 | } | 2200 | } |
2229 | 2201 | ||
2230 | /** | 2202 | /** |
@@ -2246,7 +2218,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) | |||
2246 | return; | 2218 | return; |
2247 | } | 2219 | } |
2248 | 2220 | ||
2249 | __get_state_machine_lock(port); | 2221 | spin_lock_bh(&slave->bond->mode_lock); |
2250 | 2222 | ||
2251 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2223 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
2252 | port->actor_oper_port_key = port->actor_admin_port_key |= | 2224 | port->actor_oper_port_key = port->actor_admin_port_key |= |
@@ -2257,7 +2229,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) | |||
2257 | */ | 2229 | */ |
2258 | port->sm_vars |= AD_PORT_BEGIN; | 2230 | port->sm_vars |= AD_PORT_BEGIN; |
2259 | 2231 | ||
2260 | __release_state_machine_lock(port); | 2232 | spin_unlock_bh(&slave->bond->mode_lock); |
2261 | } | 2233 | } |
2262 | 2234 | ||
2263 | /** | 2235 | /** |
@@ -2280,7 +2252,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
2280 | return; | 2252 | return; |
2281 | } | 2253 | } |
2282 | 2254 | ||
2283 | __get_state_machine_lock(port); | 2255 | spin_lock_bh(&slave->bond->mode_lock); |
2284 | /* on link down we are zeroing duplex and speed since | 2256 | /* on link down we are zeroing duplex and speed since |
2285 | * some of the adaptors(ce1000.lan) report full duplex/speed | 2257 | * some of the adaptors(ce1000.lan) report full duplex/speed |
2286 | * instead of N/A(duplex) / 0(speed). | 2258 | * instead of N/A(duplex) / 0(speed). |
@@ -2311,7 +2283,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
2311 | */ | 2283 | */ |
2312 | port->sm_vars |= AD_PORT_BEGIN; | 2284 | port->sm_vars |= AD_PORT_BEGIN; |
2313 | 2285 | ||
2314 | __release_state_machine_lock(port); | 2286 | spin_unlock_bh(&slave->bond->mode_lock); |
2315 | } | 2287 | } |
2316 | 2288 | ||
2317 | /** | 2289 | /** |
@@ -2476,20 +2448,16 @@ err_free: | |||
2476 | int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, | 2448 | int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, |
2477 | struct slave *slave) | 2449 | struct slave *slave) |
2478 | { | 2450 | { |
2479 | int ret = RX_HANDLER_ANOTHER; | ||
2480 | struct lacpdu *lacpdu, _lacpdu; | 2451 | struct lacpdu *lacpdu, _lacpdu; |
2481 | 2452 | ||
2482 | if (skb->protocol != PKT_TYPE_LACPDU) | 2453 | if (skb->protocol != PKT_TYPE_LACPDU) |
2483 | return ret; | 2454 | return RX_HANDLER_ANOTHER; |
2484 | 2455 | ||
2485 | lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); | 2456 | lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); |
2486 | if (!lacpdu) | 2457 | if (!lacpdu) |
2487 | return ret; | 2458 | return RX_HANDLER_ANOTHER; |
2488 | 2459 | ||
2489 | read_lock(&bond->curr_slave_lock); | 2460 | return bond_3ad_rx_indication(lacpdu, slave, skb->len); |
2490 | ret = bond_3ad_rx_indication(lacpdu, slave, skb->len); | ||
2491 | read_unlock(&bond->curr_slave_lock); | ||
2492 | return ret; | ||
2493 | } | 2461 | } |
2494 | 2462 | ||
2495 | /** | 2463 | /** |
@@ -2499,7 +2467,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, | |||
2499 | * When modify lacp_rate parameter via sysfs, | 2467 | * When modify lacp_rate parameter via sysfs, |
2500 | * update actor_oper_port_state of each port. | 2468 | * update actor_oper_port_state of each port. |
2501 | * | 2469 | * |
2502 | * Hold slave->state_machine_lock, | 2470 | * Hold bond->mode_lock, |
2503 | * so we can modify port->actor_oper_port_state, | 2471 | * so we can modify port->actor_oper_port_state, |
2504 | * no matter bond is up or down. | 2472 | * no matter bond is up or down. |
2505 | */ | 2473 | */ |
@@ -2511,13 +2479,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) | |||
2511 | int lacp_fast; | 2479 | int lacp_fast; |
2512 | 2480 | ||
2513 | lacp_fast = bond->params.lacp_fast; | 2481 | lacp_fast = bond->params.lacp_fast; |
2482 | spin_lock_bh(&bond->mode_lock); | ||
2514 | bond_for_each_slave(bond, slave, iter) { | 2483 | bond_for_each_slave(bond, slave, iter) { |
2515 | port = &(SLAVE_AD_INFO(slave)->port); | 2484 | port = &(SLAVE_AD_INFO(slave)->port); |
2516 | __get_state_machine_lock(port); | ||
2517 | if (lacp_fast) | 2485 | if (lacp_fast) |
2518 | port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; | 2486 | port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; |
2519 | else | 2487 | else |
2520 | port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; | 2488 | port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; |
2521 | __release_state_machine_lock(port); | ||
2522 | } | 2489 | } |
2490 | spin_unlock_bh(&bond->mode_lock); | ||
2523 | } | 2491 | } |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index bb03b1df2f3e..c5f14ac63f3e 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -259,7 +259,6 @@ struct ad_bond_info { | |||
259 | struct ad_slave_info { | 259 | struct ad_slave_info { |
260 | struct aggregator aggregator; /* 802.3ad aggregator structure */ | 260 | struct aggregator aggregator; /* 802.3ad aggregator structure */ |
261 | struct port port; /* 802.3ad port structure */ | 261 | struct port port; /* 802.3ad port structure */ |
262 | spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */ | ||
263 | u16 id; | 262 | u16 id; |
264 | }; | 263 | }; |
265 | 264 | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 028496205f39..85af961f1317 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -100,27 +100,6 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size) | |||
100 | 100 | ||
101 | /*********************** tlb specific functions ***************************/ | 101 | /*********************** tlb specific functions ***************************/ |
102 | 102 | ||
103 | static inline void _lock_tx_hashtbl_bh(struct bonding *bond) | ||
104 | { | ||
105 | spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | ||
106 | } | ||
107 | |||
108 | static inline void _unlock_tx_hashtbl_bh(struct bonding *bond) | ||
109 | { | ||
110 | spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | ||
111 | } | ||
112 | |||
113 | static inline void _lock_tx_hashtbl(struct bonding *bond) | ||
114 | { | ||
115 | spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | ||
116 | } | ||
117 | |||
118 | static inline void _unlock_tx_hashtbl(struct bonding *bond) | ||
119 | { | ||
120 | spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | ||
121 | } | ||
122 | |||
123 | /* Caller must hold tx_hashtbl lock */ | ||
124 | static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) | 103 | static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) |
125 | { | 104 | { |
126 | if (save_load) { | 105 | if (save_load) { |
@@ -140,7 +119,6 @@ static inline void tlb_init_slave(struct slave *slave) | |||
140 | SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; | 119 | SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; |
141 | } | 120 | } |
142 | 121 | ||
143 | /* Caller must hold bond lock for read, BH disabled */ | ||
144 | static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, | 122 | static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, |
145 | int save_load) | 123 | int save_load) |
146 | { | 124 | { |
@@ -163,13 +141,12 @@ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, | |||
163 | tlb_init_slave(slave); | 141 | tlb_init_slave(slave); |
164 | } | 142 | } |
165 | 143 | ||
166 | /* Caller must hold bond lock for read */ | ||
167 | static void tlb_clear_slave(struct bonding *bond, struct slave *slave, | 144 | static void tlb_clear_slave(struct bonding *bond, struct slave *slave, |
168 | int save_load) | 145 | int save_load) |
169 | { | 146 | { |
170 | _lock_tx_hashtbl_bh(bond); | 147 | spin_lock_bh(&bond->mode_lock); |
171 | __tlb_clear_slave(bond, slave, save_load); | 148 | __tlb_clear_slave(bond, slave, save_load); |
172 | _unlock_tx_hashtbl_bh(bond); | 149 | spin_unlock_bh(&bond->mode_lock); |
173 | } | 150 | } |
174 | 151 | ||
175 | /* Must be called before starting the monitor timer */ | 152 | /* Must be called before starting the monitor timer */ |
@@ -184,14 +161,14 @@ static int tlb_initialize(struct bonding *bond) | |||
184 | if (!new_hashtbl) | 161 | if (!new_hashtbl) |
185 | return -1; | 162 | return -1; |
186 | 163 | ||
187 | _lock_tx_hashtbl_bh(bond); | 164 | spin_lock_bh(&bond->mode_lock); |
188 | 165 | ||
189 | bond_info->tx_hashtbl = new_hashtbl; | 166 | bond_info->tx_hashtbl = new_hashtbl; |
190 | 167 | ||
191 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) | 168 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) |
192 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); | 169 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); |
193 | 170 | ||
194 | _unlock_tx_hashtbl_bh(bond); | 171 | spin_unlock_bh(&bond->mode_lock); |
195 | 172 | ||
196 | return 0; | 173 | return 0; |
197 | } | 174 | } |
@@ -202,12 +179,12 @@ static void tlb_deinitialize(struct bonding *bond) | |||
202 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 179 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
203 | struct tlb_up_slave *arr; | 180 | struct tlb_up_slave *arr; |
204 | 181 | ||
205 | _lock_tx_hashtbl_bh(bond); | 182 | spin_lock_bh(&bond->mode_lock); |
206 | 183 | ||
207 | kfree(bond_info->tx_hashtbl); | 184 | kfree(bond_info->tx_hashtbl); |
208 | bond_info->tx_hashtbl = NULL; | 185 | bond_info->tx_hashtbl = NULL; |
209 | 186 | ||
210 | _unlock_tx_hashtbl_bh(bond); | 187 | spin_unlock_bh(&bond->mode_lock); |
211 | 188 | ||
212 | arr = rtnl_dereference(bond_info->slave_arr); | 189 | arr = rtnl_dereference(bond_info->slave_arr); |
213 | if (arr) | 190 | if (arr) |
@@ -220,7 +197,6 @@ static long long compute_gap(struct slave *slave) | |||
220 | (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ | 197 | (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ |
221 | } | 198 | } |
222 | 199 | ||
223 | /* Caller must hold bond lock for read */ | ||
224 | static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) | 200 | static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) |
225 | { | 201 | { |
226 | struct slave *slave, *least_loaded; | 202 | struct slave *slave, *least_loaded; |
@@ -281,7 +257,6 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index, | |||
281 | return assigned_slave; | 257 | return assigned_slave; |
282 | } | 258 | } |
283 | 259 | ||
284 | /* Caller must hold bond lock for read */ | ||
285 | static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, | 260 | static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, |
286 | u32 skb_len) | 261 | u32 skb_len) |
287 | { | 262 | { |
@@ -291,32 +266,13 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, | |||
291 | * tlb_choose_channel() is only called by bond_alb_xmit() | 266 | * tlb_choose_channel() is only called by bond_alb_xmit() |
292 | * which already has softirq disabled. | 267 | * which already has softirq disabled. |
293 | */ | 268 | */ |
294 | _lock_tx_hashtbl(bond); | 269 | spin_lock(&bond->mode_lock); |
295 | tx_slave = __tlb_choose_channel(bond, hash_index, skb_len); | 270 | tx_slave = __tlb_choose_channel(bond, hash_index, skb_len); |
296 | _unlock_tx_hashtbl(bond); | 271 | spin_unlock(&bond->mode_lock); |
297 | return tx_slave; | 272 | return tx_slave; |
298 | } | 273 | } |
299 | 274 | ||
300 | /*********************** rlb specific functions ***************************/ | 275 | /*********************** rlb specific functions ***************************/ |
301 | static inline void _lock_rx_hashtbl_bh(struct bonding *bond) | ||
302 | { | ||
303 | spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
304 | } | ||
305 | |||
306 | static inline void _unlock_rx_hashtbl_bh(struct bonding *bond) | ||
307 | { | ||
308 | spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
309 | } | ||
310 | |||
311 | static inline void _lock_rx_hashtbl(struct bonding *bond) | ||
312 | { | ||
313 | spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
314 | } | ||
315 | |||
316 | static inline void _unlock_rx_hashtbl(struct bonding *bond) | ||
317 | { | ||
318 | spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
319 | } | ||
320 | 276 | ||
321 | /* when an ARP REPLY is received from a client update its info | 277 | /* when an ARP REPLY is received from a client update its info |
322 | * in the rx_hashtbl | 278 | * in the rx_hashtbl |
@@ -327,7 +283,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) | |||
327 | struct rlb_client_info *client_info; | 283 | struct rlb_client_info *client_info; |
328 | u32 hash_index; | 284 | u32 hash_index; |
329 | 285 | ||
330 | _lock_rx_hashtbl_bh(bond); | 286 | spin_lock_bh(&bond->mode_lock); |
331 | 287 | ||
332 | hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); | 288 | hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); |
333 | client_info = &(bond_info->rx_hashtbl[hash_index]); | 289 | client_info = &(bond_info->rx_hashtbl[hash_index]); |
@@ -342,7 +298,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) | |||
342 | bond_info->rx_ntt = 1; | 298 | bond_info->rx_ntt = 1; |
343 | } | 299 | } |
344 | 300 | ||
345 | _unlock_rx_hashtbl_bh(bond); | 301 | spin_unlock_bh(&bond->mode_lock); |
346 | } | 302 | } |
347 | 303 | ||
348 | static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, | 304 | static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, |
@@ -378,7 +334,6 @@ out: | |||
378 | return RX_HANDLER_ANOTHER; | 334 | return RX_HANDLER_ANOTHER; |
379 | } | 335 | } |
380 | 336 | ||
381 | /* Caller must hold bond lock for read */ | ||
382 | static struct slave *rlb_next_rx_slave(struct bonding *bond) | 337 | static struct slave *rlb_next_rx_slave(struct bonding *bond) |
383 | { | 338 | { |
384 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 339 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
@@ -411,7 +366,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond) | |||
411 | return rx_slave; | 366 | return rx_slave; |
412 | } | 367 | } |
413 | 368 | ||
414 | /* Caller must hold rcu_read_lock() for read */ | 369 | /* Caller must hold rcu_read_lock() */ |
415 | static struct slave *__rlb_next_rx_slave(struct bonding *bond) | 370 | static struct slave *__rlb_next_rx_slave(struct bonding *bond) |
416 | { | 371 | { |
417 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 372 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
@@ -447,11 +402,11 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond) | |||
447 | /* teach the switch the mac of a disabled slave | 402 | /* teach the switch the mac of a disabled slave |
448 | * on the primary for fault tolerance | 403 | * on the primary for fault tolerance |
449 | * | 404 | * |
450 | * Caller must hold bond->curr_slave_lock for write or bond lock for write | 405 | * Caller must hold RTNL |
451 | */ | 406 | */ |
452 | static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) | 407 | static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) |
453 | { | 408 | { |
454 | struct slave *curr_active = bond_deref_active_protected(bond); | 409 | struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); |
455 | 410 | ||
456 | if (!curr_active) | 411 | if (!curr_active) |
457 | return; | 412 | return; |
@@ -479,7 +434,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) | |||
479 | u32 index, next_index; | 434 | u32 index, next_index; |
480 | 435 | ||
481 | /* clear slave from rx_hashtbl */ | 436 | /* clear slave from rx_hashtbl */ |
482 | _lock_rx_hashtbl_bh(bond); | 437 | spin_lock_bh(&bond->mode_lock); |
483 | 438 | ||
484 | rx_hash_table = bond_info->rx_hashtbl; | 439 | rx_hash_table = bond_info->rx_hashtbl; |
485 | index = bond_info->rx_hashtbl_used_head; | 440 | index = bond_info->rx_hashtbl_used_head; |
@@ -510,14 +465,10 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) | |||
510 | } | 465 | } |
511 | } | 466 | } |
512 | 467 | ||
513 | _unlock_rx_hashtbl_bh(bond); | 468 | spin_unlock_bh(&bond->mode_lock); |
514 | 469 | ||
515 | write_lock_bh(&bond->curr_slave_lock); | 470 | if (slave != rtnl_dereference(bond->curr_active_slave)) |
516 | |||
517 | if (slave != bond_deref_active_protected(bond)) | ||
518 | rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); | 471 | rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); |
519 | |||
520 | write_unlock_bh(&bond->curr_slave_lock); | ||
521 | } | 472 | } |
522 | 473 | ||
523 | static void rlb_update_client(struct rlb_client_info *client_info) | 474 | static void rlb_update_client(struct rlb_client_info *client_info) |
@@ -565,7 +516,7 @@ static void rlb_update_rx_clients(struct bonding *bond) | |||
565 | struct rlb_client_info *client_info; | 516 | struct rlb_client_info *client_info; |
566 | u32 hash_index; | 517 | u32 hash_index; |
567 | 518 | ||
568 | _lock_rx_hashtbl_bh(bond); | 519 | spin_lock_bh(&bond->mode_lock); |
569 | 520 | ||
570 | hash_index = bond_info->rx_hashtbl_used_head; | 521 | hash_index = bond_info->rx_hashtbl_used_head; |
571 | for (; hash_index != RLB_NULL_INDEX; | 522 | for (; hash_index != RLB_NULL_INDEX; |
@@ -583,7 +534,7 @@ static void rlb_update_rx_clients(struct bonding *bond) | |||
583 | */ | 534 | */ |
584 | bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; | 535 | bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; |
585 | 536 | ||
586 | _unlock_rx_hashtbl_bh(bond); | 537 | spin_unlock_bh(&bond->mode_lock); |
587 | } | 538 | } |
588 | 539 | ||
589 | /* The slave was assigned a new mac address - update the clients */ | 540 | /* The slave was assigned a new mac address - update the clients */ |
@@ -594,7 +545,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla | |||
594 | int ntt = 0; | 545 | int ntt = 0; |
595 | u32 hash_index; | 546 | u32 hash_index; |
596 | 547 | ||
597 | _lock_rx_hashtbl_bh(bond); | 548 | spin_lock_bh(&bond->mode_lock); |
598 | 549 | ||
599 | hash_index = bond_info->rx_hashtbl_used_head; | 550 | hash_index = bond_info->rx_hashtbl_used_head; |
600 | for (; hash_index != RLB_NULL_INDEX; | 551 | for (; hash_index != RLB_NULL_INDEX; |
@@ -615,7 +566,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla | |||
615 | bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; | 566 | bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; |
616 | } | 567 | } |
617 | 568 | ||
618 | _unlock_rx_hashtbl_bh(bond); | 569 | spin_unlock_bh(&bond->mode_lock); |
619 | } | 570 | } |
620 | 571 | ||
621 | /* mark all clients using src_ip to be updated */ | 572 | /* mark all clients using src_ip to be updated */ |
@@ -625,7 +576,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) | |||
625 | struct rlb_client_info *client_info; | 576 | struct rlb_client_info *client_info; |
626 | u32 hash_index; | 577 | u32 hash_index; |
627 | 578 | ||
628 | _lock_rx_hashtbl(bond); | 579 | spin_lock(&bond->mode_lock); |
629 | 580 | ||
630 | hash_index = bond_info->rx_hashtbl_used_head; | 581 | hash_index = bond_info->rx_hashtbl_used_head; |
631 | for (; hash_index != RLB_NULL_INDEX; | 582 | for (; hash_index != RLB_NULL_INDEX; |
@@ -649,10 +600,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) | |||
649 | } | 600 | } |
650 | } | 601 | } |
651 | 602 | ||
652 | _unlock_rx_hashtbl(bond); | 603 | spin_unlock(&bond->mode_lock); |
653 | } | 604 | } |
654 | 605 | ||
655 | /* Caller must hold both bond and ptr locks for read */ | ||
656 | static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) | 606 | static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) |
657 | { | 607 | { |
658 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 608 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
@@ -661,7 +611,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
661 | struct rlb_client_info *client_info; | 611 | struct rlb_client_info *client_info; |
662 | u32 hash_index = 0; | 612 | u32 hash_index = 0; |
663 | 613 | ||
664 | _lock_rx_hashtbl(bond); | 614 | spin_lock(&bond->mode_lock); |
665 | 615 | ||
666 | curr_active_slave = rcu_dereference(bond->curr_active_slave); | 616 | curr_active_slave = rcu_dereference(bond->curr_active_slave); |
667 | 617 | ||
@@ -680,7 +630,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
680 | 630 | ||
681 | assigned_slave = client_info->slave; | 631 | assigned_slave = client_info->slave; |
682 | if (assigned_slave) { | 632 | if (assigned_slave) { |
683 | _unlock_rx_hashtbl(bond); | 633 | spin_unlock(&bond->mode_lock); |
684 | return assigned_slave; | 634 | return assigned_slave; |
685 | } | 635 | } |
686 | } else { | 636 | } else { |
@@ -742,7 +692,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
742 | } | 692 | } |
743 | } | 693 | } |
744 | 694 | ||
745 | _unlock_rx_hashtbl(bond); | 695 | spin_unlock(&bond->mode_lock); |
746 | 696 | ||
747 | return assigned_slave; | 697 | return assigned_slave; |
748 | } | 698 | } |
@@ -795,7 +745,6 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) | |||
795 | return tx_slave; | 745 | return tx_slave; |
796 | } | 746 | } |
797 | 747 | ||
798 | /* Caller must hold bond lock for read */ | ||
799 | static void rlb_rebalance(struct bonding *bond) | 748 | static void rlb_rebalance(struct bonding *bond) |
800 | { | 749 | { |
801 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 750 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
@@ -804,7 +753,7 @@ static void rlb_rebalance(struct bonding *bond) | |||
804 | int ntt; | 753 | int ntt; |
805 | u32 hash_index; | 754 | u32 hash_index; |
806 | 755 | ||
807 | _lock_rx_hashtbl_bh(bond); | 756 | spin_lock_bh(&bond->mode_lock); |
808 | 757 | ||
809 | ntt = 0; | 758 | ntt = 0; |
810 | hash_index = bond_info->rx_hashtbl_used_head; | 759 | hash_index = bond_info->rx_hashtbl_used_head; |
@@ -822,7 +771,7 @@ static void rlb_rebalance(struct bonding *bond) | |||
822 | /* update the team's flag only after the whole iteration */ | 771 | /* update the team's flag only after the whole iteration */ |
823 | if (ntt) | 772 | if (ntt) |
824 | bond_info->rx_ntt = 1; | 773 | bond_info->rx_ntt = 1; |
825 | _unlock_rx_hashtbl_bh(bond); | 774 | spin_unlock_bh(&bond->mode_lock); |
826 | } | 775 | } |
827 | 776 | ||
828 | /* Caller must hold rx_hashtbl lock */ | 777 | /* Caller must hold rx_hashtbl lock */ |
@@ -921,7 +870,7 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) | |||
921 | u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); | 870 | u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); |
922 | u32 index; | 871 | u32 index; |
923 | 872 | ||
924 | _lock_rx_hashtbl_bh(bond); | 873 | spin_lock_bh(&bond->mode_lock); |
925 | 874 | ||
926 | index = bond_info->rx_hashtbl[ip_src_hash].src_first; | 875 | index = bond_info->rx_hashtbl[ip_src_hash].src_first; |
927 | while (index != RLB_NULL_INDEX) { | 876 | while (index != RLB_NULL_INDEX) { |
@@ -932,7 +881,7 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) | |||
932 | rlb_delete_table_entry(bond, index); | 881 | rlb_delete_table_entry(bond, index); |
933 | index = next_index; | 882 | index = next_index; |
934 | } | 883 | } |
935 | _unlock_rx_hashtbl_bh(bond); | 884 | spin_unlock_bh(&bond->mode_lock); |
936 | } | 885 | } |
937 | 886 | ||
938 | static int rlb_initialize(struct bonding *bond) | 887 | static int rlb_initialize(struct bonding *bond) |
@@ -946,7 +895,7 @@ static int rlb_initialize(struct bonding *bond) | |||
946 | if (!new_hashtbl) | 895 | if (!new_hashtbl) |
947 | return -1; | 896 | return -1; |
948 | 897 | ||
949 | _lock_rx_hashtbl_bh(bond); | 898 | spin_lock_bh(&bond->mode_lock); |
950 | 899 | ||
951 | bond_info->rx_hashtbl = new_hashtbl; | 900 | bond_info->rx_hashtbl = new_hashtbl; |
952 | 901 | ||
@@ -955,7 +904,7 @@ static int rlb_initialize(struct bonding *bond) | |||
955 | for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) | 904 | for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) |
956 | rlb_init_table_entry(bond_info->rx_hashtbl + i); | 905 | rlb_init_table_entry(bond_info->rx_hashtbl + i); |
957 | 906 | ||
958 | _unlock_rx_hashtbl_bh(bond); | 907 | spin_unlock_bh(&bond->mode_lock); |
959 | 908 | ||
960 | /* register to receive ARPs */ | 909 | /* register to receive ARPs */ |
961 | bond->recv_probe = rlb_arp_recv; | 910 | bond->recv_probe = rlb_arp_recv; |
@@ -967,13 +916,13 @@ static void rlb_deinitialize(struct bonding *bond) | |||
967 | { | 916 | { |
968 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 917 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
969 | 918 | ||
970 | _lock_rx_hashtbl_bh(bond); | 919 | spin_lock_bh(&bond->mode_lock); |
971 | 920 | ||
972 | kfree(bond_info->rx_hashtbl); | 921 | kfree(bond_info->rx_hashtbl); |
973 | bond_info->rx_hashtbl = NULL; | 922 | bond_info->rx_hashtbl = NULL; |
974 | bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; | 923 | bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; |
975 | 924 | ||
976 | _unlock_rx_hashtbl_bh(bond); | 925 | spin_unlock_bh(&bond->mode_lock); |
977 | } | 926 | } |
978 | 927 | ||
979 | static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | 928 | static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) |
@@ -981,7 +930,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | |||
981 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 930 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
982 | u32 curr_index; | 931 | u32 curr_index; |
983 | 932 | ||
984 | _lock_rx_hashtbl_bh(bond); | 933 | spin_lock_bh(&bond->mode_lock); |
985 | 934 | ||
986 | curr_index = bond_info->rx_hashtbl_used_head; | 935 | curr_index = bond_info->rx_hashtbl_used_head; |
987 | while (curr_index != RLB_NULL_INDEX) { | 936 | while (curr_index != RLB_NULL_INDEX) { |
@@ -994,7 +943,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | |||
994 | curr_index = next_index; | 943 | curr_index = next_index; |
995 | } | 944 | } |
996 | 945 | ||
997 | _unlock_rx_hashtbl_bh(bond); | 946 | spin_unlock_bh(&bond->mode_lock); |
998 | } | 947 | } |
999 | 948 | ||
1000 | /*********************** tlb/rlb shared functions *********************/ | 949 | /*********************** tlb/rlb shared functions *********************/ |
@@ -1398,9 +1347,9 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, | |||
1398 | } | 1347 | } |
1399 | 1348 | ||
1400 | if (tx_slave && bond->params.tlb_dynamic_lb) { | 1349 | if (tx_slave && bond->params.tlb_dynamic_lb) { |
1401 | _lock_tx_hashtbl(bond); | 1350 | spin_lock(&bond->mode_lock); |
1402 | __tlb_clear_slave(bond, tx_slave, 0); | 1351 | __tlb_clear_slave(bond, tx_slave, 0); |
1403 | _unlock_tx_hashtbl(bond); | 1352 | spin_unlock(&bond->mode_lock); |
1404 | } | 1353 | } |
1405 | 1354 | ||
1406 | /* no suitable interface, frame not sent */ | 1355 | /* no suitable interface, frame not sent */ |
@@ -1595,13 +1544,6 @@ void bond_alb_monitor(struct work_struct *work) | |||
1595 | if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { | 1544 | if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { |
1596 | bool strict_match; | 1545 | bool strict_match; |
1597 | 1546 | ||
1598 | /* change of curr_active_slave involves swapping of mac addresses. | ||
1599 | * in order to avoid this swapping from happening while | ||
1600 | * sending the learning packets, the curr_slave_lock must be held for | ||
1601 | * read. | ||
1602 | */ | ||
1603 | read_lock(&bond->curr_slave_lock); | ||
1604 | |||
1605 | bond_for_each_slave_rcu(bond, slave, iter) { | 1547 | bond_for_each_slave_rcu(bond, slave, iter) { |
1606 | /* If updating current_active, use all currently | 1548 | /* If updating current_active, use all currently |
1607 | * user mac addreses (!strict_match). Otherwise, only | 1549 | * user mac addreses (!strict_match). Otherwise, only |
@@ -1613,17 +1555,11 @@ void bond_alb_monitor(struct work_struct *work) | |||
1613 | alb_send_learning_packets(slave, slave->dev->dev_addr, | 1555 | alb_send_learning_packets(slave, slave->dev->dev_addr, |
1614 | strict_match); | 1556 | strict_match); |
1615 | } | 1557 | } |
1616 | |||
1617 | read_unlock(&bond->curr_slave_lock); | ||
1618 | |||
1619 | bond_info->lp_counter = 0; | 1558 | bond_info->lp_counter = 0; |
1620 | } | 1559 | } |
1621 | 1560 | ||
1622 | /* rebalance tx traffic */ | 1561 | /* rebalance tx traffic */ |
1623 | if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { | 1562 | if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { |
1624 | |||
1625 | read_lock(&bond->curr_slave_lock); | ||
1626 | |||
1627 | bond_for_each_slave_rcu(bond, slave, iter) { | 1563 | bond_for_each_slave_rcu(bond, slave, iter) { |
1628 | tlb_clear_slave(bond, slave, 1); | 1564 | tlb_clear_slave(bond, slave, 1); |
1629 | if (slave == rcu_access_pointer(bond->curr_active_slave)) { | 1565 | if (slave == rcu_access_pointer(bond->curr_active_slave)) { |
@@ -1633,9 +1569,6 @@ void bond_alb_monitor(struct work_struct *work) | |||
1633 | bond_info->unbalanced_load = 0; | 1569 | bond_info->unbalanced_load = 0; |
1634 | } | 1570 | } |
1635 | } | 1571 | } |
1636 | |||
1637 | read_unlock(&bond->curr_slave_lock); | ||
1638 | |||
1639 | bond_info->tx_rebalance_counter = 0; | 1572 | bond_info->tx_rebalance_counter = 0; |
1640 | } | 1573 | } |
1641 | 1574 | ||
@@ -1739,7 +1672,6 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) | |||
1739 | 1672 | ||
1740 | } | 1673 | } |
1741 | 1674 | ||
1742 | /* Caller must hold bond lock for read */ | ||
1743 | void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link) | 1675 | void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link) |
1744 | { | 1676 | { |
1745 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 1677 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
@@ -1775,21 +1707,14 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char | |||
1775 | * Set the bond->curr_active_slave to @new_slave and handle | 1707 | * Set the bond->curr_active_slave to @new_slave and handle |
1776 | * mac address swapping and promiscuity changes as needed. | 1708 | * mac address swapping and promiscuity changes as needed. |
1777 | * | 1709 | * |
1778 | * If new_slave is NULL, caller must hold curr_slave_lock for write | 1710 | * Caller must hold RTNL |
1779 | * | ||
1780 | * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock | ||
1781 | * for write. Processing here may sleep, so no other locks may be held. | ||
1782 | */ | 1711 | */ |
1783 | void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) | 1712 | void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) |
1784 | __releases(&bond->curr_slave_lock) | ||
1785 | __acquires(&bond->curr_slave_lock) | ||
1786 | { | 1713 | { |
1787 | struct slave *swap_slave; | 1714 | struct slave *swap_slave; |
1788 | struct slave *curr_active; | 1715 | struct slave *curr_active; |
1789 | 1716 | ||
1790 | curr_active = rcu_dereference_protected(bond->curr_active_slave, | 1717 | curr_active = rtnl_dereference(bond->curr_active_slave); |
1791 | !new_slave || | ||
1792 | lockdep_is_held(&bond->curr_slave_lock)); | ||
1793 | if (curr_active == new_slave) | 1718 | if (curr_active == new_slave) |
1794 | return; | 1719 | return; |
1795 | 1720 | ||
@@ -1820,10 +1745,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave | |||
1820 | tlb_clear_slave(bond, swap_slave, 1); | 1745 | tlb_clear_slave(bond, swap_slave, 1); |
1821 | tlb_clear_slave(bond, new_slave, 1); | 1746 | tlb_clear_slave(bond, new_slave, 1); |
1822 | 1747 | ||
1823 | write_unlock_bh(&bond->curr_slave_lock); | ||
1824 | |||
1825 | ASSERT_RTNL(); | ||
1826 | |||
1827 | /* in TLB mode, the slave might flip down/up with the old dev_addr, | 1748 | /* in TLB mode, the slave might flip down/up with the old dev_addr, |
1828 | * and thus filter bond->dev_addr's packets, so force bond's mac | 1749 | * and thus filter bond->dev_addr's packets, so force bond's mac |
1829 | */ | 1750 | */ |
@@ -1852,8 +1773,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave | |||
1852 | alb_send_learning_packets(new_slave, bond->dev->dev_addr, | 1773 | alb_send_learning_packets(new_slave, bond->dev->dev_addr, |
1853 | false); | 1774 | false); |
1854 | } | 1775 | } |
1855 | |||
1856 | write_lock_bh(&bond->curr_slave_lock); | ||
1857 | } | 1776 | } |
1858 | 1777 | ||
1859 | /* Called with RTNL */ | 1778 | /* Called with RTNL */ |
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index aaeac61d03cf..3c6a7ff974d7 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h | |||
@@ -147,7 +147,6 @@ struct tlb_up_slave { | |||
147 | 147 | ||
148 | struct alb_bond_info { | 148 | struct alb_bond_info { |
149 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ | 149 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ |
150 | spinlock_t tx_hashtbl_lock; | ||
151 | u32 unbalanced_load; | 150 | u32 unbalanced_load; |
152 | int tx_rebalance_counter; | 151 | int tx_rebalance_counter; |
153 | int lp_counter; | 152 | int lp_counter; |
@@ -156,7 +155,6 @@ struct alb_bond_info { | |||
156 | /* -------- rlb parameters -------- */ | 155 | /* -------- rlb parameters -------- */ |
157 | int rlb_enabled; | 156 | int rlb_enabled; |
158 | struct rlb_client_info *rx_hashtbl; /* Receive hash table */ | 157 | struct rlb_client_info *rx_hashtbl; /* Receive hash table */ |
159 | spinlock_t rx_hashtbl_lock; | ||
160 | u32 rx_hashtbl_used_head; | 158 | u32 rx_hashtbl_used_head; |
161 | u8 rx_ntt; /* flag - need to transmit | 159 | u8 rx_ntt; /* flag - need to transmit |
162 | * to all rx clients | 160 | * to all rx clients |
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 280971b227ea..652f6c5d1bf7 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c | |||
@@ -29,7 +29,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) | |||
29 | seq_printf(m, "SourceIP DestinationIP " | 29 | seq_printf(m, "SourceIP DestinationIP " |
30 | "Destination MAC DEV\n"); | 30 | "Destination MAC DEV\n"); |
31 | 31 | ||
32 | spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | 32 | spin_lock_bh(&bond->mode_lock); |
33 | 33 | ||
34 | hash_index = bond_info->rx_hashtbl_used_head; | 34 | hash_index = bond_info->rx_hashtbl_used_head; |
35 | for (; hash_index != RLB_NULL_INDEX; | 35 | for (; hash_index != RLB_NULL_INDEX; |
@@ -42,7 +42,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) | |||
42 | client_info->slave->dev->name); | 42 | client_info->slave->dev->name); |
43 | } | 43 | } |
44 | 44 | ||
45 | spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | 45 | spin_unlock_bh(&bond->mode_lock); |
46 | 46 | ||
47 | return 0; | 47 | return 0; |
48 | } | 48 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b43b2df9e5d1..2d90a8b7f62e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -637,13 +637,11 @@ static void bond_set_dev_addr(struct net_device *bond_dev, | |||
637 | * | 637 | * |
638 | * Perform special MAC address swapping for fail_over_mac settings | 638 | * Perform special MAC address swapping for fail_over_mac settings |
639 | * | 639 | * |
640 | * Called with RTNL, curr_slave_lock for write_bh. | 640 | * Called with RTNL |
641 | */ | 641 | */ |
642 | static void bond_do_fail_over_mac(struct bonding *bond, | 642 | static void bond_do_fail_over_mac(struct bonding *bond, |
643 | struct slave *new_active, | 643 | struct slave *new_active, |
644 | struct slave *old_active) | 644 | struct slave *old_active) |
645 | __releases(&bond->curr_slave_lock) | ||
646 | __acquires(&bond->curr_slave_lock) | ||
647 | { | 645 | { |
648 | u8 tmp_mac[ETH_ALEN]; | 646 | u8 tmp_mac[ETH_ALEN]; |
649 | struct sockaddr saddr; | 647 | struct sockaddr saddr; |
@@ -651,11 +649,8 @@ static void bond_do_fail_over_mac(struct bonding *bond, | |||
651 | 649 | ||
652 | switch (bond->params.fail_over_mac) { | 650 | switch (bond->params.fail_over_mac) { |
653 | case BOND_FOM_ACTIVE: | 651 | case BOND_FOM_ACTIVE: |
654 | if (new_active) { | 652 | if (new_active) |
655 | write_unlock_bh(&bond->curr_slave_lock); | ||
656 | bond_set_dev_addr(bond->dev, new_active->dev); | 653 | bond_set_dev_addr(bond->dev, new_active->dev); |
657 | write_lock_bh(&bond->curr_slave_lock); | ||
658 | } | ||
659 | break; | 654 | break; |
660 | case BOND_FOM_FOLLOW: | 655 | case BOND_FOM_FOLLOW: |
661 | /* | 656 | /* |
@@ -666,8 +661,6 @@ static void bond_do_fail_over_mac(struct bonding *bond, | |||
666 | if (!new_active) | 661 | if (!new_active) |
667 | return; | 662 | return; |
668 | 663 | ||
669 | write_unlock_bh(&bond->curr_slave_lock); | ||
670 | |||
671 | if (old_active) { | 664 | if (old_active) { |
672 | ether_addr_copy(tmp_mac, new_active->dev->dev_addr); | 665 | ether_addr_copy(tmp_mac, new_active->dev->dev_addr); |
673 | ether_addr_copy(saddr.sa_data, | 666 | ether_addr_copy(saddr.sa_data, |
@@ -696,7 +689,6 @@ static void bond_do_fail_over_mac(struct bonding *bond, | |||
696 | netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", | 689 | netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", |
697 | -rv, new_active->dev->name); | 690 | -rv, new_active->dev->name); |
698 | out: | 691 | out: |
699 | write_lock_bh(&bond->curr_slave_lock); | ||
700 | break; | 692 | break; |
701 | default: | 693 | default: |
702 | netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", | 694 | netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", |
@@ -709,7 +701,7 @@ out: | |||
709 | static bool bond_should_change_active(struct bonding *bond) | 701 | static bool bond_should_change_active(struct bonding *bond) |
710 | { | 702 | { |
711 | struct slave *prim = rtnl_dereference(bond->primary_slave); | 703 | struct slave *prim = rtnl_dereference(bond->primary_slave); |
712 | struct slave *curr = bond_deref_active_protected(bond); | 704 | struct slave *curr = rtnl_dereference(bond->curr_active_slave); |
713 | 705 | ||
714 | if (!prim || !curr || curr->link != BOND_LINK_UP) | 706 | if (!prim || !curr || curr->link != BOND_LINK_UP) |
715 | return true; | 707 | return true; |
@@ -785,15 +777,15 @@ static bool bond_should_notify_peers(struct bonding *bond) | |||
785 | * because it is apparently the best available slave we have, even though its | 777 | * because it is apparently the best available slave we have, even though its |
786 | * updelay hasn't timed out yet. | 778 | * updelay hasn't timed out yet. |
787 | * | 779 | * |
788 | * If new_active is not NULL, caller must hold curr_slave_lock for write_bh. | 780 | * Caller must hold RTNL. |
789 | */ | 781 | */ |
790 | void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | 782 | void bond_change_active_slave(struct bonding *bond, struct slave *new_active) |
791 | { | 783 | { |
792 | struct slave *old_active; | 784 | struct slave *old_active; |
793 | 785 | ||
794 | old_active = rcu_dereference_protected(bond->curr_active_slave, | 786 | ASSERT_RTNL(); |
795 | !new_active || | 787 | |
796 | lockdep_is_held(&bond->curr_slave_lock)); | 788 | old_active = rtnl_dereference(bond->curr_active_slave); |
797 | 789 | ||
798 | if (old_active == new_active) | 790 | if (old_active == new_active) |
799 | return; | 791 | return; |
@@ -861,14 +853,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
861 | bond_should_notify_peers(bond); | 853 | bond_should_notify_peers(bond); |
862 | } | 854 | } |
863 | 855 | ||
864 | write_unlock_bh(&bond->curr_slave_lock); | ||
865 | |||
866 | call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); | 856 | call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); |
867 | if (should_notify_peers) | 857 | if (should_notify_peers) |
868 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, | 858 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, |
869 | bond->dev); | 859 | bond->dev); |
870 | |||
871 | write_lock_bh(&bond->curr_slave_lock); | ||
872 | } | 860 | } |
873 | } | 861 | } |
874 | 862 | ||
@@ -893,7 +881,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
893 | * - The primary_slave has got its link back. | 881 | * - The primary_slave has got its link back. |
894 | * - A slave has got its link back and there's no old curr_active_slave. | 882 | * - A slave has got its link back and there's no old curr_active_slave. |
895 | * | 883 | * |
896 | * Caller must hold curr_slave_lock for write_bh. | 884 | * Caller must hold RTNL. |
897 | */ | 885 | */ |
898 | void bond_select_active_slave(struct bonding *bond) | 886 | void bond_select_active_slave(struct bonding *bond) |
899 | { | 887 | { |
@@ -901,7 +889,7 @@ void bond_select_active_slave(struct bonding *bond) | |||
901 | int rv; | 889 | int rv; |
902 | 890 | ||
903 | best_slave = bond_find_best_slave(bond); | 891 | best_slave = bond_find_best_slave(bond); |
904 | if (best_slave != bond_deref_active_protected(bond)) { | 892 | if (best_slave != rtnl_dereference(bond->curr_active_slave)) { |
905 | bond_change_active_slave(bond, best_slave); | 893 | bond_change_active_slave(bond, best_slave); |
906 | rv = bond_set_carrier(bond); | 894 | rv = bond_set_carrier(bond); |
907 | if (!rv) | 895 | if (!rv) |
@@ -1571,9 +1559,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1571 | 1559 | ||
1572 | if (bond_uses_primary(bond)) { | 1560 | if (bond_uses_primary(bond)) { |
1573 | block_netpoll_tx(); | 1561 | block_netpoll_tx(); |
1574 | write_lock_bh(&bond->curr_slave_lock); | ||
1575 | bond_select_active_slave(bond); | 1562 | bond_select_active_slave(bond); |
1576 | write_unlock_bh(&bond->curr_slave_lock); | ||
1577 | unblock_netpoll_tx(); | 1563 | unblock_netpoll_tx(); |
1578 | } | 1564 | } |
1579 | 1565 | ||
@@ -1601,10 +1587,8 @@ err_detach: | |||
1601 | RCU_INIT_POINTER(bond->primary_slave, NULL); | 1587 | RCU_INIT_POINTER(bond->primary_slave, NULL); |
1602 | if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { | 1588 | if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { |
1603 | block_netpoll_tx(); | 1589 | block_netpoll_tx(); |
1604 | write_lock_bh(&bond->curr_slave_lock); | ||
1605 | bond_change_active_slave(bond, NULL); | 1590 | bond_change_active_slave(bond, NULL); |
1606 | bond_select_active_slave(bond); | 1591 | bond_select_active_slave(bond); |
1607 | write_unlock_bh(&bond->curr_slave_lock); | ||
1608 | unblock_netpoll_tx(); | 1592 | unblock_netpoll_tx(); |
1609 | } | 1593 | } |
1610 | /* either primary_slave or curr_active_slave might've changed */ | 1594 | /* either primary_slave or curr_active_slave might've changed */ |
@@ -1645,7 +1629,7 @@ err_undo_flags: | |||
1645 | /* | 1629 | /* |
1646 | * Try to release the slave device <slave> from the bond device <master> | 1630 | * Try to release the slave device <slave> from the bond device <master> |
1647 | * It is legal to access curr_active_slave without a lock because all the function | 1631 | * It is legal to access curr_active_slave without a lock because all the function |
1648 | * is write-locked. If "all" is true it means that the function is being called | 1632 | * is RTNL-locked. If "all" is true it means that the function is being called |
1649 | * while destroying a bond interface and all slaves are being released. | 1633 | * while destroying a bond interface and all slaves are being released. |
1650 | * | 1634 | * |
1651 | * The rules for slave state should be: | 1635 | * The rules for slave state should be: |
@@ -1691,14 +1675,8 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1691 | */ | 1675 | */ |
1692 | netdev_rx_handler_unregister(slave_dev); | 1676 | netdev_rx_handler_unregister(slave_dev); |
1693 | 1677 | ||
1694 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { | 1678 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
1695 | /* Sync against bond_3ad_rx_indication and | ||
1696 | * bond_3ad_state_machine_handler | ||
1697 | */ | ||
1698 | write_lock_bh(&bond->curr_slave_lock); | ||
1699 | bond_3ad_unbind_slave(slave); | 1679 | bond_3ad_unbind_slave(slave); |
1700 | write_unlock_bh(&bond->curr_slave_lock); | ||
1701 | } | ||
1702 | 1680 | ||
1703 | netdev_info(bond_dev, "Releasing %s interface %s\n", | 1681 | netdev_info(bond_dev, "Releasing %s interface %s\n", |
1704 | bond_is_active_slave(slave) ? "active" : "backup", | 1682 | bond_is_active_slave(slave) ? "active" : "backup", |
@@ -1720,11 +1698,8 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1720 | if (rtnl_dereference(bond->primary_slave) == slave) | 1698 | if (rtnl_dereference(bond->primary_slave) == slave) |
1721 | RCU_INIT_POINTER(bond->primary_slave, NULL); | 1699 | RCU_INIT_POINTER(bond->primary_slave, NULL); |
1722 | 1700 | ||
1723 | if (oldcurrent == slave) { | 1701 | if (oldcurrent == slave) |
1724 | write_lock_bh(&bond->curr_slave_lock); | ||
1725 | bond_change_active_slave(bond, NULL); | 1702 | bond_change_active_slave(bond, NULL); |
1726 | write_unlock_bh(&bond->curr_slave_lock); | ||
1727 | } | ||
1728 | 1703 | ||
1729 | if (bond_is_lb(bond)) { | 1704 | if (bond_is_lb(bond)) { |
1730 | /* Must be called only after the slave has been | 1705 | /* Must be called only after the slave has been |
@@ -1743,11 +1718,7 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1743 | * is no concern that another slave add/remove event | 1718 | * is no concern that another slave add/remove event |
1744 | * will interfere. | 1719 | * will interfere. |
1745 | */ | 1720 | */ |
1746 | write_lock_bh(&bond->curr_slave_lock); | ||
1747 | |||
1748 | bond_select_active_slave(bond); | 1721 | bond_select_active_slave(bond); |
1749 | |||
1750 | write_unlock_bh(&bond->curr_slave_lock); | ||
1751 | } | 1722 | } |
1752 | 1723 | ||
1753 | if (!bond_has_slaves(bond)) { | 1724 | if (!bond_has_slaves(bond)) { |
@@ -2058,9 +2029,7 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2058 | do_failover: | 2029 | do_failover: |
2059 | ASSERT_RTNL(); | 2030 | ASSERT_RTNL(); |
2060 | block_netpoll_tx(); | 2031 | block_netpoll_tx(); |
2061 | write_lock_bh(&bond->curr_slave_lock); | ||
2062 | bond_select_active_slave(bond); | 2032 | bond_select_active_slave(bond); |
2063 | write_unlock_bh(&bond->curr_slave_lock); | ||
2064 | unblock_netpoll_tx(); | 2033 | unblock_netpoll_tx(); |
2065 | } | 2034 | } |
2066 | 2035 | ||
@@ -2506,15 +2475,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2506 | if (slave_state_changed) { | 2475 | if (slave_state_changed) { |
2507 | bond_slave_state_change(bond); | 2476 | bond_slave_state_change(bond); |
2508 | } else if (do_failover) { | 2477 | } else if (do_failover) { |
2509 | /* the bond_select_active_slave must hold RTNL | ||
2510 | * and curr_slave_lock for write. | ||
2511 | */ | ||
2512 | block_netpoll_tx(); | 2478 | block_netpoll_tx(); |
2513 | write_lock_bh(&bond->curr_slave_lock); | ||
2514 | |||
2515 | bond_select_active_slave(bond); | 2479 | bond_select_active_slave(bond); |
2516 | |||
2517 | write_unlock_bh(&bond->curr_slave_lock); | ||
2518 | unblock_netpoll_tx(); | 2480 | unblock_netpoll_tx(); |
2519 | } | 2481 | } |
2520 | rtnl_unlock(); | 2482 | rtnl_unlock(); |
@@ -2532,7 +2494,7 @@ re_arm: | |||
2532 | * place for the slave. Returns 0 if no changes are found, >0 if changes | 2494 | * place for the slave. Returns 0 if no changes are found, >0 if changes |
2533 | * to link states must be committed. | 2495 | * to link states must be committed. |
2534 | * | 2496 | * |
2535 | * Called with rcu_read_lock hold. | 2497 | * Called with rcu_read_lock held. |
2536 | */ | 2498 | */ |
2537 | static int bond_ab_arp_inspect(struct bonding *bond) | 2499 | static int bond_ab_arp_inspect(struct bonding *bond) |
2538 | { | 2500 | { |
@@ -2670,9 +2632,7 @@ static void bond_ab_arp_commit(struct bonding *bond) | |||
2670 | do_failover: | 2632 | do_failover: |
2671 | ASSERT_RTNL(); | 2633 | ASSERT_RTNL(); |
2672 | block_netpoll_tx(); | 2634 | block_netpoll_tx(); |
2673 | write_lock_bh(&bond->curr_slave_lock); | ||
2674 | bond_select_active_slave(bond); | 2635 | bond_select_active_slave(bond); |
2675 | write_unlock_bh(&bond->curr_slave_lock); | ||
2676 | unblock_netpoll_tx(); | 2636 | unblock_netpoll_tx(); |
2677 | } | 2637 | } |
2678 | 2638 | ||
@@ -2682,7 +2642,7 @@ do_failover: | |||
2682 | /* | 2642 | /* |
2683 | * Send ARP probes for active-backup mode ARP monitor. | 2643 | * Send ARP probes for active-backup mode ARP monitor. |
2684 | * | 2644 | * |
2685 | * Called with rcu_read_lock hold. | 2645 | * Called with rcu_read_lock held. |
2686 | */ | 2646 | */ |
2687 | static bool bond_ab_arp_probe(struct bonding *bond) | 2647 | static bool bond_ab_arp_probe(struct bonding *bond) |
2688 | { | 2648 | { |
@@ -2939,9 +2899,7 @@ static int bond_slave_netdev_event(unsigned long event, | |||
2939 | primary ? slave_dev->name : "none"); | 2899 | primary ? slave_dev->name : "none"); |
2940 | 2900 | ||
2941 | block_netpoll_tx(); | 2901 | block_netpoll_tx(); |
2942 | write_lock_bh(&bond->curr_slave_lock); | ||
2943 | bond_select_active_slave(bond); | 2902 | bond_select_active_slave(bond); |
2944 | write_unlock_bh(&bond->curr_slave_lock); | ||
2945 | unblock_netpoll_tx(); | 2903 | unblock_netpoll_tx(); |
2946 | break; | 2904 | break; |
2947 | case NETDEV_FEAT_CHANGE: | 2905 | case NETDEV_FEAT_CHANGE: |
@@ -3106,7 +3064,6 @@ static int bond_open(struct net_device *bond_dev) | |||
3106 | 3064 | ||
3107 | /* reset slave->backup and slave->inactive */ | 3065 | /* reset slave->backup and slave->inactive */ |
3108 | if (bond_has_slaves(bond)) { | 3066 | if (bond_has_slaves(bond)) { |
3109 | read_lock(&bond->curr_slave_lock); | ||
3110 | bond_for_each_slave(bond, slave, iter) { | 3067 | bond_for_each_slave(bond, slave, iter) { |
3111 | if (bond_uses_primary(bond) && | 3068 | if (bond_uses_primary(bond) && |
3112 | slave != rcu_access_pointer(bond->curr_active_slave)) { | 3069 | slave != rcu_access_pointer(bond->curr_active_slave)) { |
@@ -3117,7 +3074,6 @@ static int bond_open(struct net_device *bond_dev) | |||
3117 | BOND_SLAVE_NOTIFY_NOW); | 3074 | BOND_SLAVE_NOTIFY_NOW); |
3118 | } | 3075 | } |
3119 | } | 3076 | } |
3120 | read_unlock(&bond->curr_slave_lock); | ||
3121 | } | 3077 | } |
3122 | 3078 | ||
3123 | bond_work_init_all(bond); | 3079 | bond_work_init_all(bond); |
@@ -3239,14 +3195,10 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd | |||
3239 | if (!mii) | 3195 | if (!mii) |
3240 | return -EINVAL; | 3196 | return -EINVAL; |
3241 | 3197 | ||
3242 | |||
3243 | if (mii->reg_num == 1) { | 3198 | if (mii->reg_num == 1) { |
3244 | mii->val_out = 0; | 3199 | mii->val_out = 0; |
3245 | read_lock(&bond->curr_slave_lock); | ||
3246 | if (netif_carrier_ok(bond->dev)) | 3200 | if (netif_carrier_ok(bond->dev)) |
3247 | mii->val_out = BMSR_LSTATUS; | 3201 | mii->val_out = BMSR_LSTATUS; |
3248 | |||
3249 | read_unlock(&bond->curr_slave_lock); | ||
3250 | } | 3202 | } |
3251 | 3203 | ||
3252 | return 0; | 3204 | return 0; |
@@ -3892,8 +3844,7 @@ void bond_setup(struct net_device *bond_dev) | |||
3892 | { | 3844 | { |
3893 | struct bonding *bond = netdev_priv(bond_dev); | 3845 | struct bonding *bond = netdev_priv(bond_dev); |
3894 | 3846 | ||
3895 | /* initialize rwlocks */ | 3847 | spin_lock_init(&bond->mode_lock); |
3896 | rwlock_init(&bond->curr_slave_lock); | ||
3897 | bond->params = bonding_defaults; | 3848 | bond->params = bonding_defaults; |
3898 | 3849 | ||
3899 | /* Initialize pointers */ | 3850 | /* Initialize pointers */ |
@@ -4340,19 +4291,9 @@ static int bond_init(struct net_device *bond_dev) | |||
4340 | { | 4291 | { |
4341 | struct bonding *bond = netdev_priv(bond_dev); | 4292 | struct bonding *bond = netdev_priv(bond_dev); |
4342 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); | 4293 | struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); |
4343 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | ||
4344 | 4294 | ||
4345 | netdev_dbg(bond_dev, "Begin bond_init\n"); | 4295 | netdev_dbg(bond_dev, "Begin bond_init\n"); |
4346 | 4296 | ||
4347 | /* | ||
4348 | * Initialize locks that may be required during | ||
4349 | * en/deslave operations. All of the bond_open work | ||
4350 | * (of which this is part) should really be moved to | ||
4351 | * a phase prior to dev_open | ||
4352 | */ | ||
4353 | spin_lock_init(&(bond_info->tx_hashtbl_lock)); | ||
4354 | spin_lock_init(&(bond_info->rx_hashtbl_lock)); | ||
4355 | |||
4356 | bond->wq = create_singlethread_workqueue(bond_dev->name); | 4297 | bond->wq = create_singlethread_workqueue(bond_dev->name); |
4357 | if (!bond->wq) | 4298 | if (!bond->wq) |
4358 | return -ENOMEM; | 4299 | return -ENOMEM; |
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 534c0600484e..b62697f4a3de 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
@@ -734,15 +734,13 @@ static int bond_option_active_slave_set(struct bonding *bond, | |||
734 | } | 734 | } |
735 | 735 | ||
736 | block_netpoll_tx(); | 736 | block_netpoll_tx(); |
737 | write_lock_bh(&bond->curr_slave_lock); | ||
738 | |||
739 | /* check to see if we are clearing active */ | 737 | /* check to see if we are clearing active */ |
740 | if (!slave_dev) { | 738 | if (!slave_dev) { |
741 | netdev_info(bond->dev, "Clearing current active slave\n"); | 739 | netdev_info(bond->dev, "Clearing current active slave\n"); |
742 | RCU_INIT_POINTER(bond->curr_active_slave, NULL); | 740 | RCU_INIT_POINTER(bond->curr_active_slave, NULL); |
743 | bond_select_active_slave(bond); | 741 | bond_select_active_slave(bond); |
744 | } else { | 742 | } else { |
745 | struct slave *old_active = bond_deref_active_protected(bond); | 743 | struct slave *old_active = rtnl_dereference(bond->curr_active_slave); |
746 | struct slave *new_active = bond_slave_get_rtnl(slave_dev); | 744 | struct slave *new_active = bond_slave_get_rtnl(slave_dev); |
747 | 745 | ||
748 | BUG_ON(!new_active); | 746 | BUG_ON(!new_active); |
@@ -765,8 +763,6 @@ static int bond_option_active_slave_set(struct bonding *bond, | |||
765 | } | 763 | } |
766 | } | 764 | } |
767 | } | 765 | } |
768 | |||
769 | write_unlock_bh(&bond->curr_slave_lock); | ||
770 | unblock_netpoll_tx(); | 766 | unblock_netpoll_tx(); |
771 | 767 | ||
772 | return ret; | 768 | return ret; |
@@ -1066,7 +1062,6 @@ static int bond_option_primary_set(struct bonding *bond, | |||
1066 | struct slave *slave; | 1062 | struct slave *slave; |
1067 | 1063 | ||
1068 | block_netpoll_tx(); | 1064 | block_netpoll_tx(); |
1069 | write_lock_bh(&bond->curr_slave_lock); | ||
1070 | 1065 | ||
1071 | p = strchr(primary, '\n'); | 1066 | p = strchr(primary, '\n'); |
1072 | if (p) | 1067 | if (p) |
@@ -1103,7 +1098,6 @@ static int bond_option_primary_set(struct bonding *bond, | |||
1103 | primary, bond->dev->name); | 1098 | primary, bond->dev->name); |
1104 | 1099 | ||
1105 | out: | 1100 | out: |
1106 | write_unlock_bh(&bond->curr_slave_lock); | ||
1107 | unblock_netpoll_tx(); | 1101 | unblock_netpoll_tx(); |
1108 | 1102 | ||
1109 | return 0; | 1103 | return 0; |
@@ -1117,9 +1111,7 @@ static int bond_option_primary_reselect_set(struct bonding *bond, | |||
1117 | bond->params.primary_reselect = newval->value; | 1111 | bond->params.primary_reselect = newval->value; |
1118 | 1112 | ||
1119 | block_netpoll_tx(); | 1113 | block_netpoll_tx(); |
1120 | write_lock_bh(&bond->curr_slave_lock); | ||
1121 | bond_select_active_slave(bond); | 1114 | bond_select_active_slave(bond); |
1122 | write_unlock_bh(&bond->curr_slave_lock); | ||
1123 | unblock_netpoll_tx(); | 1115 | unblock_netpoll_tx(); |
1124 | 1116 | ||
1125 | return 0; | 1117 | return 0; |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 78c461abaa09..3aff1a815e89 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -184,9 +184,7 @@ struct slave { | |||
184 | 184 | ||
185 | /* | 185 | /* |
186 | * Here are the locking policies for the two bonding locks: | 186 | * Here are the locking policies for the two bonding locks: |
187 | * | 187 | * Get rcu_read_lock when reading or RTNL when writing slave list. |
188 | * 1) Get rcu_read_lock when reading or RTNL when writing slave list. | ||
189 | * 2) Get bond->curr_slave_lock when reading/writing bond->curr_active_slave. | ||
190 | */ | 188 | */ |
191 | struct bonding { | 189 | struct bonding { |
192 | struct net_device *dev; /* first - useful for panic debug */ | 190 | struct net_device *dev; /* first - useful for panic debug */ |
@@ -197,7 +195,13 @@ struct bonding { | |||
197 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ | 195 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ |
198 | int (*recv_probe)(const struct sk_buff *, struct bonding *, | 196 | int (*recv_probe)(const struct sk_buff *, struct bonding *, |
199 | struct slave *); | 197 | struct slave *); |
200 | rwlock_t curr_slave_lock; | 198 | /* mode_lock is used for mode-specific locking needs, currently used by: |
199 | * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and | ||
200 | * bond_3ad_state_machine_handler() concurrently. | ||
201 | * TLB mode (5) - to sync the use and modifications of its hash table | ||
202 | * ALB mode (6) - to sync the use and modifications of its hash table | ||
203 | */ | ||
204 | spinlock_t mode_lock; | ||
201 | u8 send_peer_notif; | 205 | u8 send_peer_notif; |
202 | u8 igmp_retrans; | 206 | u8 igmp_retrans; |
203 | #ifdef CONFIG_PROC_FS | 207 | #ifdef CONFIG_PROC_FS |
@@ -227,10 +231,6 @@ struct bonding { | |||
227 | #define bond_slave_get_rtnl(dev) \ | 231 | #define bond_slave_get_rtnl(dev) \ |
228 | ((struct slave *) rtnl_dereference(dev->rx_handler_data)) | 232 | ((struct slave *) rtnl_dereference(dev->rx_handler_data)) |
229 | 233 | ||
230 | #define bond_deref_active_protected(bond) \ | ||
231 | rcu_dereference_protected(bond->curr_active_slave, \ | ||
232 | lockdep_is_held(&bond->curr_slave_lock)) | ||
233 | |||
234 | struct bond_vlan_tag { | 234 | struct bond_vlan_tag { |
235 | __be16 vlan_proto; | 235 | __be16 vlan_proto; |
236 | unsigned short vlan_id; | 236 | unsigned short vlan_id; |