aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-06-09 15:20:56 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:30:14 -0400
commit932ff279a43ab7257942cddff2595acd541cc49b (patch)
treee60130673a20d71becdac858c2589d8dfbf3ae1f /net/core
parentbf0857ea32addb6bc8b46383604b218b8ec09f19 (diff)
[NET]: Add netif_tx_lock
Various drivers use xmit_lock internally to synchronise with their transmission routines. They do so without setting xmit_lock_owner. This is fine as long as netpoll is not in use. With netpoll it is possible for deadlocks to occur if xmit_lock_owner isn't set. This is because if a printk occurs while xmit_lock is held and xmit_lock_owner is not set can cause netpoll to attempt to take xmit_lock recursively. While it is possible to resolve this by getting netpoll to use trylock, it is suboptimal because netpoll's sole objective is to maximise the chance of getting the printk out on the wire. So delaying or dropping the message is to be avoided as much as possible. So the only alternative is to always set xmit_lock_owner. The following patch does this by introducing the netif_tx_lock family of functions that take care of setting/unsetting xmit_lock_owner. I renamed xmit_lock to _xmit_lock to indicate that it should not be used directly. I didn't provide irq versions of the netif_tx_lock functions since xmit_lock is meant to be a BH-disabling lock. This is pretty much a straight text substitution except for a small bug fix in winbond. It currently uses netif_stop_queue/spin_unlock_wait to stop transmission. This is unsafe as an IRQ can potentially wake up the queue. So it is safer to use netif_tx_disable. The hamradio bits used spin_lock_irq but it is unnecessary as xmit_lock must never be taken in an IRQ handler. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dev_mcast.c28
-rw-r--r--net/core/netpoll.c9
-rw-r--r--net/core/pktgen.c4
4 files changed, 24 insertions, 29 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6bfa78c66c25..1b09f1cae46e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1282,15 +1282,13 @@ int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1282 1282
1283#define HARD_TX_LOCK(dev, cpu) { \ 1283#define HARD_TX_LOCK(dev, cpu) { \
1284 if ((dev->features & NETIF_F_LLTX) == 0) { \ 1284 if ((dev->features & NETIF_F_LLTX) == 0) { \
1285 spin_lock(&dev->xmit_lock); \ 1285 netif_tx_lock(dev); \
1286 dev->xmit_lock_owner = cpu; \
1287 } \ 1286 } \
1288} 1287}
1289 1288
1290#define HARD_TX_UNLOCK(dev) { \ 1289#define HARD_TX_UNLOCK(dev) { \
1291 if ((dev->features & NETIF_F_LLTX) == 0) { \ 1290 if ((dev->features & NETIF_F_LLTX) == 0) { \
1292 dev->xmit_lock_owner = -1; \ 1291 netif_tx_unlock(dev); \
1293 spin_unlock(&dev->xmit_lock); \
1294 } \ 1292 } \
1295} 1293}
1296 1294
@@ -1389,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
1389 /* The device has no queue. Common case for software devices: 1387 /* The device has no queue. Common case for software devices:
1390 loopback, all the sorts of tunnels... 1388 loopback, all the sorts of tunnels...
1391 1389
1392 Really, it is unlikely that xmit_lock protection is necessary here. 1390 Really, it is unlikely that netif_tx_lock protection is necessary
1393 (f.e. loopback and IP tunnels are clean ignoring statistics 1391 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1394 counters.) 1392 counters.)
1395 However, it is possible, that they rely on protection 1393 However, it is possible, that they rely on protection
1396 made by us here. 1394 made by us here.
@@ -2805,7 +2803,7 @@ int register_netdevice(struct net_device *dev)
2805 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 2803 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2806 2804
2807 spin_lock_init(&dev->queue_lock); 2805 spin_lock_init(&dev->queue_lock);
2808 spin_lock_init(&dev->xmit_lock); 2806 spin_lock_init(&dev->_xmit_lock);
2809 dev->xmit_lock_owner = -1; 2807 dev->xmit_lock_owner = -1;
2810#ifdef CONFIG_NET_CLS_ACT 2808#ifdef CONFIG_NET_CLS_ACT
2811 spin_lock_init(&dev->ingress_lock); 2809 spin_lock_init(&dev->ingress_lock);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 05d60850840e..c57d887da2ef 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -62,7 +62,7 @@
62 * Device mc lists are changed by bh at least if IPv6 is enabled, 62 * Device mc lists are changed by bh at least if IPv6 is enabled,
63 * so that it must be bh protected. 63 * so that it must be bh protected.
64 * 64 *
65 * We block accesses to device mc filters with dev->xmit_lock. 65 * We block accesses to device mc filters with netif_tx_lock.
66 */ 66 */
67 67
68/* 68/*
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
93 93
94void dev_mc_upload(struct net_device *dev) 94void dev_mc_upload(struct net_device *dev)
95{ 95{
96 spin_lock_bh(&dev->xmit_lock); 96 netif_tx_lock_bh(dev);
97 __dev_mc_upload(dev); 97 __dev_mc_upload(dev);
98 spin_unlock_bh(&dev->xmit_lock); 98 netif_tx_unlock_bh(dev);
99} 99}
100 100
101/* 101/*
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
107 int err = 0; 107 int err = 0;
108 struct dev_mc_list *dmi, **dmip; 108 struct dev_mc_list *dmi, **dmip;
109 109
110 spin_lock_bh(&dev->xmit_lock); 110 netif_tx_lock_bh(dev);
111 111
112 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { 112 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
113 /* 113 /*
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
139 */ 139 */
140 __dev_mc_upload(dev); 140 __dev_mc_upload(dev);
141 141
142 spin_unlock_bh(&dev->xmit_lock); 142 netif_tx_unlock_bh(dev);
143 return 0; 143 return 0;
144 } 144 }
145 } 145 }
146 err = -ENOENT; 146 err = -ENOENT;
147done: 147done:
148 spin_unlock_bh(&dev->xmit_lock); 148 netif_tx_unlock_bh(dev);
149 return err; 149 return err;
150} 150}
151 151
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
160 160
161 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); 161 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
162 162
163 spin_lock_bh(&dev->xmit_lock); 163 netif_tx_lock_bh(dev);
164 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 164 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
165 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && 165 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
166 dmi->dmi_addrlen == alen) { 166 dmi->dmi_addrlen == alen) {
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
176 } 176 }
177 177
178 if ((dmi = dmi1) == NULL) { 178 if ((dmi = dmi1) == NULL) {
179 spin_unlock_bh(&dev->xmit_lock); 179 netif_tx_unlock_bh(dev);
180 return -ENOMEM; 180 return -ENOMEM;
181 } 181 }
182 memcpy(dmi->dmi_addr, addr, alen); 182 memcpy(dmi->dmi_addr, addr, alen);
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
189 189
190 __dev_mc_upload(dev); 190 __dev_mc_upload(dev);
191 191
192 spin_unlock_bh(&dev->xmit_lock); 192 netif_tx_unlock_bh(dev);
193 return 0; 193 return 0;
194 194
195done: 195done:
196 spin_unlock_bh(&dev->xmit_lock); 196 netif_tx_unlock_bh(dev);
197 kfree(dmi1); 197 kfree(dmi1);
198 return err; 198 return err;
199} 199}
@@ -204,7 +204,7 @@ done:
204 204
205void dev_mc_discard(struct net_device *dev) 205void dev_mc_discard(struct net_device *dev)
206{ 206{
207 spin_lock_bh(&dev->xmit_lock); 207 netif_tx_lock_bh(dev);
208 208
209 while (dev->mc_list != NULL) { 209 while (dev->mc_list != NULL) {
210 struct dev_mc_list *tmp = dev->mc_list; 210 struct dev_mc_list *tmp = dev->mc_list;
@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev)
215 } 215 }
216 dev->mc_count = 0; 216 dev->mc_count = 0;
217 217
218 spin_unlock_bh(&dev->xmit_lock); 218 netif_tx_unlock_bh(dev);
219} 219}
220 220
221#ifdef CONFIG_PROC_FS 221#ifdef CONFIG_PROC_FS
@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
250 struct dev_mc_list *m; 250 struct dev_mc_list *m;
251 struct net_device *dev = v; 251 struct net_device *dev = v;
252 252
253 spin_lock_bh(&dev->xmit_lock); 253 netif_tx_lock_bh(dev);
254 for (m = dev->mc_list; m; m = m->next) { 254 for (m = dev->mc_list; m; m = m->next) {
255 int i; 255 int i;
256 256
@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
262 262
263 seq_putc(seq, '\n'); 263 seq_putc(seq, '\n');
264 } 264 }
265 spin_unlock_bh(&dev->xmit_lock); 265 netif_tx_unlock_bh(dev);
266 return 0; 266 return 0;
267} 267}
268 268
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e8e05cebd95a..9cb781830380 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
273 273
274 do { 274 do {
275 npinfo->tries--; 275 npinfo->tries--;
276 spin_lock(&np->dev->xmit_lock); 276 netif_tx_lock(np->dev);
277 np->dev->xmit_lock_owner = smp_processor_id();
278 277
279 /* 278 /*
280 * network drivers do not expect to be called if the queue is 279 * network drivers do not expect to be called if the queue is
281 * stopped. 280 * stopped.
282 */ 281 */
283 if (netif_queue_stopped(np->dev)) { 282 if (netif_queue_stopped(np->dev)) {
284 np->dev->xmit_lock_owner = -1; 283 netif_tx_unlock(np->dev);
285 spin_unlock(&np->dev->xmit_lock);
286 netpoll_poll(np); 284 netpoll_poll(np);
287 udelay(50); 285 udelay(50);
288 continue; 286 continue;
289 } 287 }
290 288
291 status = np->dev->hard_start_xmit(skb, np->dev); 289 status = np->dev->hard_start_xmit(skb, np->dev);
292 np->dev->xmit_lock_owner = -1; 290 netif_tx_unlock(np->dev);
293 spin_unlock(&np->dev->xmit_lock);
294 291
295 /* success */ 292 /* success */
296 if(!status) { 293 if(!status) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c23e9c06ee23..67ed14ddabd2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2897,7 +2897,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
2897 } 2897 }
2898 } 2898 }
2899 2899
2900 spin_lock_bh(&odev->xmit_lock); 2900 netif_tx_lock_bh(odev);
2901 if (!netif_queue_stopped(odev)) { 2901 if (!netif_queue_stopped(odev)) {
2902 2902
2903 atomic_inc(&(pkt_dev->skb->users)); 2903 atomic_inc(&(pkt_dev->skb->users));
@@ -2942,7 +2942,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
2942 pkt_dev->next_tx_ns = 0; 2942 pkt_dev->next_tx_ns = 0;
2943 } 2943 }
2944 2944
2945 spin_unlock_bh(&odev->xmit_lock); 2945 netif_tx_unlock_bh(odev);
2946 2946
2947 /* If pkt_dev->count is zero, then run forever */ 2947 /* If pkt_dev->count is zero, then run forever */
2948 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 2948 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {