diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-31 19:58:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-31 19:58:50 -0400 |
commit | c3f26a269c2421f97f10cf8ed05d5099b573af4d (patch) | |
tree | d0602cbb48742b3e39ab6bdcaa08c342d4cd2cae /drivers/net/ifb.c | |
parent | 967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff) |
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the
netif_tx_lock() routines we converted to iterate over
all TX queues and grab each queue's spinlock.
This causes heartburn for lockdep and it's not a healthy
thing to do with lots of TX queues anyways.
So modify this to use a top-level lock and a "frozen"
state for the individual TX queues.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ifb.c')
-rw-r--r-- | drivers/net/ifb.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 0960e69b2da4..e4fbefc8c82f 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -69,18 +69,20 @@ static void ri_tasklet(unsigned long dev) | |||
69 | struct net_device *_dev = (struct net_device *)dev; | 69 | struct net_device *_dev = (struct net_device *)dev; |
70 | struct ifb_private *dp = netdev_priv(_dev); | 70 | struct ifb_private *dp = netdev_priv(_dev); |
71 | struct net_device_stats *stats = &_dev->stats; | 71 | struct net_device_stats *stats = &_dev->stats; |
72 | struct netdev_queue *txq; | ||
72 | struct sk_buff *skb; | 73 | struct sk_buff *skb; |
73 | 74 | ||
75 | txq = netdev_get_tx_queue(_dev, 0); | ||
74 | dp->st_task_enter++; | 76 | dp->st_task_enter++; |
75 | if ((skb = skb_peek(&dp->tq)) == NULL) { | 77 | if ((skb = skb_peek(&dp->tq)) == NULL) { |
76 | dp->st_txq_refl_try++; | 78 | dp->st_txq_refl_try++; |
77 | if (netif_tx_trylock(_dev)) { | 79 | if (__netif_tx_trylock(txq)) { |
78 | dp->st_rxq_enter++; | 80 | dp->st_rxq_enter++; |
79 | while ((skb = skb_dequeue(&dp->rq)) != NULL) { | 81 | while ((skb = skb_dequeue(&dp->rq)) != NULL) { |
80 | skb_queue_tail(&dp->tq, skb); | 82 | skb_queue_tail(&dp->tq, skb); |
81 | dp->st_rx2tx_tran++; | 83 | dp->st_rx2tx_tran++; |
82 | } | 84 | } |
83 | netif_tx_unlock(_dev); | 85 | __netif_tx_unlock(txq); |
84 | } else { | 86 | } else { |
85 | /* reschedule */ | 87 | /* reschedule */ |
86 | dp->st_rxq_notenter++; | 88 | dp->st_rxq_notenter++; |
@@ -115,7 +117,7 @@ static void ri_tasklet(unsigned long dev) | |||
115 | BUG(); | 117 | BUG(); |
116 | } | 118 | } |
117 | 119 | ||
118 | if (netif_tx_trylock(_dev)) { | 120 | if (__netif_tx_trylock(txq)) { |
119 | dp->st_rxq_check++; | 121 | dp->st_rxq_check++; |
120 | if ((skb = skb_peek(&dp->rq)) == NULL) { | 122 | if ((skb = skb_peek(&dp->rq)) == NULL) { |
121 | dp->tasklet_pending = 0; | 123 | dp->tasklet_pending = 0; |
@@ -123,10 +125,10 @@ static void ri_tasklet(unsigned long dev) | |||
123 | netif_wake_queue(_dev); | 125 | netif_wake_queue(_dev); |
124 | } else { | 126 | } else { |
125 | dp->st_rxq_rsch++; | 127 | dp->st_rxq_rsch++; |
126 | netif_tx_unlock(_dev); | 128 | __netif_tx_unlock(txq); |
127 | goto resched; | 129 | goto resched; |
128 | } | 130 | } |
129 | netif_tx_unlock(_dev); | 131 | __netif_tx_unlock(txq); |
130 | } else { | 132 | } else { |
131 | resched: | 133 | resched: |
132 | dp->tasklet_pending = 1; | 134 | dp->tasklet_pending = 1; |