aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-06-22 05:28:18 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-23 05:07:26 -0400
commitd4828d85d188dc70ed172802e798d3978bb6e29e (patch)
tree11e70b1c93b25c5e2c9a1547e23086fc9a76d287 /net
parent5e2707fa3aed8c24075087cbaea2628725adbe55 (diff)
[NET]: Prevent transmission after dev_deactivate
The dev_deactivate function has bit-rotted since the introduction of lockless drivers. In particular, the spin_unlock_wait call at the end has no effect on the xmit routine of lockless drivers. With a little bit of work, we can make it much more useful by providing the guarantee that when it returns, no more calls to the xmit routine of the underlying driver will be made. The idea is simple. There are two entry points in to the xmit routine. The first comes from dev_queue_xmit. That one is easily stopped by using synchronize_rcu. This works because we set the qdisc to noop_qdisc before the synchronize_rcu call. That in turn causes all subsequent packets sent to dev_queue_xmit to be dropped. The synchronize_rcu call also ensures all outstanding calls leave their critical section. The other entry point is from qdisc_run. Since we now have a bit that indicates whether it's running, all we have to do is to wait until the bit is off. I've removed the loop to wait for __LINK_STATE_SCHED to clear. This is useless because netif_wake_queue can cause it to be set again. It is also harmless because we've disarmed qdisc_run. I've also removed the spin_unlock_wait on xmit_lock because its only purpose of making sure that all outstanding xmit_lock holders have exited is also given by dev_watchdog_down. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c6
-rw-r--r--net/sched/sch_generic.c12
2 files changed, 12 insertions, 6 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ab39fe17cb58..29e3888102bc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1295,7 +1295,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1295 /* Disable soft irqs for various locks below. Also 1295 /* Disable soft irqs for various locks below. Also
1296 * stops preemption for RCU. 1296 * stops preemption for RCU.
1297 */ 1297 */
1298 local_bh_disable(); 1298 rcu_read_lock_bh();
1299 1299
1300 /* Updates of qdisc are serialized by queue_lock. 1300 /* Updates of qdisc are serialized by queue_lock.
1301 * The struct Qdisc which is pointed to by qdisc is now a 1301 * The struct Qdisc which is pointed to by qdisc is now a
@@ -1369,13 +1369,13 @@ int dev_queue_xmit(struct sk_buff *skb)
1369 } 1369 }
1370 1370
1371 rc = -ENETDOWN; 1371 rc = -ENETDOWN;
1372 local_bh_enable(); 1372 rcu_read_unlock_bh();
1373 1373
1374out_kfree_skb: 1374out_kfree_skb:
1375 kfree_skb(skb); 1375 kfree_skb(skb);
1376 return rc; 1376 return rc;
1377out: 1377out:
1378 local_bh_enable(); 1378 rcu_read_unlock_bh();
1379 return rc; 1379 return rc;
1380} 1380}
1381 1381
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d7aca8ef524a..7aad0121232c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -181,9 +181,13 @@ requeue:
181 181
182void __qdisc_run(struct net_device *dev) 182void __qdisc_run(struct net_device *dev)
183{ 183{
184 if (unlikely(dev->qdisc == &noop_qdisc))
185 goto out;
186
184 while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) 187 while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
185 /* NOTHING */; 188 /* NOTHING */;
186 189
190out:
187 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 191 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
188} 192}
189 193
@@ -583,10 +587,12 @@ void dev_deactivate(struct net_device *dev)
583 587
584 dev_watchdog_down(dev); 588 dev_watchdog_down(dev);
585 589
586 while (test_bit(__LINK_STATE_SCHED, &dev->state)) 590 /* Wait for outstanding dev_queue_xmit calls. */
587 yield(); 591 synchronize_rcu();
588 592
589 spin_unlock_wait(&dev->_xmit_lock); 593 /* Wait for outstanding qdisc_run calls. */
594 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
595 yield();
590} 596}
591 597
592void dev_init_scheduler(struct net_device *dev) 598void dev_init_scheduler(struct net_device *dev)