diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/atm/clip.c | 4 | ||||
-rw-r--r-- | net/core/dev.c | 12 | ||||
-rw-r--r-- | net/core/dev_mcast.c | 28 | ||||
-rw-r--r-- | net/core/netpoll.c | 9 | ||||
-rw-r--r-- | net/core/pktgen.c | 4 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 28 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 9 |
7 files changed, 41 insertions, 53 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c index 72d852982664..f92f9c94d2c7 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -98,7 +98,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) | |||
98 | printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); | 98 | printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); |
99 | return; | 99 | return; |
100 | } | 100 | } |
101 | spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ | 101 | netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ |
102 | entry->neigh->used = jiffies; | 102 | entry->neigh->used = jiffies; |
103 | for (walk = &entry->vccs; *walk; walk = &(*walk)->next) | 103 | for (walk = &entry->vccs; *walk; walk = &(*walk)->next) |
104 | if (*walk == clip_vcc) { | 104 | if (*walk == clip_vcc) { |
@@ -122,7 +122,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) | |||
122 | printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " | 122 | printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " |
123 | "0x%p)\n", entry, clip_vcc); | 123 | "0x%p)\n", entry, clip_vcc); |
124 | out: | 124 | out: |
125 | spin_unlock_bh(&entry->neigh->dev->xmit_lock); | 125 | netif_tx_unlock_bh(entry->neigh->dev); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* The neighbour entry n->lock is held. */ | 128 | /* The neighbour entry n->lock is held. */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 6bfa78c66c25..1b09f1cae46e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1282,15 +1282,13 @@ int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) | |||
1282 | 1282 | ||
1283 | #define HARD_TX_LOCK(dev, cpu) { \ | 1283 | #define HARD_TX_LOCK(dev, cpu) { \ |
1284 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 1284 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
1285 | spin_lock(&dev->xmit_lock); \ | 1285 | netif_tx_lock(dev); \ |
1286 | dev->xmit_lock_owner = cpu; \ | ||
1287 | } \ | 1286 | } \ |
1288 | } | 1287 | } |
1289 | 1288 | ||
1290 | #define HARD_TX_UNLOCK(dev) { \ | 1289 | #define HARD_TX_UNLOCK(dev) { \ |
1291 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 1290 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
1292 | dev->xmit_lock_owner = -1; \ | 1291 | netif_tx_unlock(dev); \ |
1293 | spin_unlock(&dev->xmit_lock); \ | ||
1294 | } \ | 1292 | } \ |
1295 | } | 1293 | } |
1296 | 1294 | ||
@@ -1389,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
1389 | /* The device has no queue. Common case for software devices: | 1387 | /* The device has no queue. Common case for software devices: |
1390 | loopback, all the sorts of tunnels... | 1388 | loopback, all the sorts of tunnels... |
1391 | 1389 | ||
1392 | Really, it is unlikely that xmit_lock protection is necessary here. | 1390 | Really, it is unlikely that netif_tx_lock protection is necessary |
1393 | (f.e. loopback and IP tunnels are clean ignoring statistics | 1391 | here. (f.e. loopback and IP tunnels are clean ignoring statistics |
1394 | counters.) | 1392 | counters.) |
1395 | However, it is possible, that they rely on protection | 1393 | However, it is possible, that they rely on protection |
1396 | made by us here. | 1394 | made by us here. |
@@ -2805,7 +2803,7 @@ int register_netdevice(struct net_device *dev) | |||
2805 | BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); | 2803 | BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); |
2806 | 2804 | ||
2807 | spin_lock_init(&dev->queue_lock); | 2805 | spin_lock_init(&dev->queue_lock); |
2808 | spin_lock_init(&dev->xmit_lock); | 2806 | spin_lock_init(&dev->_xmit_lock); |
2809 | dev->xmit_lock_owner = -1; | 2807 | dev->xmit_lock_owner = -1; |
2810 | #ifdef CONFIG_NET_CLS_ACT | 2808 | #ifdef CONFIG_NET_CLS_ACT |
2811 | spin_lock_init(&dev->ingress_lock); | 2809 | spin_lock_init(&dev->ingress_lock); |
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 05d60850840e..c57d887da2ef 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
@@ -62,7 +62,7 @@ | |||
62 | * Device mc lists are changed by bh at least if IPv6 is enabled, | 62 | * Device mc lists are changed by bh at least if IPv6 is enabled, |
63 | * so that it must be bh protected. | 63 | * so that it must be bh protected. |
64 | * | 64 | * |
65 | * We block accesses to device mc filters with dev->xmit_lock. | 65 | * We block accesses to device mc filters with netif_tx_lock. |
66 | */ | 66 | */ |
67 | 67 | ||
68 | /* | 68 | /* |
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev) | |||
93 | 93 | ||
94 | void dev_mc_upload(struct net_device *dev) | 94 | void dev_mc_upload(struct net_device *dev) |
95 | { | 95 | { |
96 | spin_lock_bh(&dev->xmit_lock); | 96 | netif_tx_lock_bh(dev); |
97 | __dev_mc_upload(dev); | 97 | __dev_mc_upload(dev); |
98 | spin_unlock_bh(&dev->xmit_lock); | 98 | netif_tx_unlock_bh(dev); |
99 | } | 99 | } |
100 | 100 | ||
101 | /* | 101 | /* |
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | |||
107 | int err = 0; | 107 | int err = 0; |
108 | struct dev_mc_list *dmi, **dmip; | 108 | struct dev_mc_list *dmi, **dmip; |
109 | 109 | ||
110 | spin_lock_bh(&dev->xmit_lock); | 110 | netif_tx_lock_bh(dev); |
111 | 111 | ||
112 | for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { | 112 | for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { |
113 | /* | 113 | /* |
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | |||
139 | */ | 139 | */ |
140 | __dev_mc_upload(dev); | 140 | __dev_mc_upload(dev); |
141 | 141 | ||
142 | spin_unlock_bh(&dev->xmit_lock); | 142 | netif_tx_unlock_bh(dev); |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | } | 145 | } |
146 | err = -ENOENT; | 146 | err = -ENOENT; |
147 | done: | 147 | done: |
148 | spin_unlock_bh(&dev->xmit_lock); | 148 | netif_tx_unlock_bh(dev); |
149 | return err; | 149 | return err; |
150 | } | 150 | } |
151 | 151 | ||
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
160 | 160 | ||
161 | dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); | 161 | dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); |
162 | 162 | ||
163 | spin_lock_bh(&dev->xmit_lock); | 163 | netif_tx_lock_bh(dev); |
164 | for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { | 164 | for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { |
165 | if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && | 165 | if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && |
166 | dmi->dmi_addrlen == alen) { | 166 | dmi->dmi_addrlen == alen) { |
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | if ((dmi = dmi1) == NULL) { | 178 | if ((dmi = dmi1) == NULL) { |
179 | spin_unlock_bh(&dev->xmit_lock); | 179 | netif_tx_unlock_bh(dev); |
180 | return -ENOMEM; | 180 | return -ENOMEM; |
181 | } | 181 | } |
182 | memcpy(dmi->dmi_addr, addr, alen); | 182 | memcpy(dmi->dmi_addr, addr, alen); |
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
189 | 189 | ||
190 | __dev_mc_upload(dev); | 190 | __dev_mc_upload(dev); |
191 | 191 | ||
192 | spin_unlock_bh(&dev->xmit_lock); | 192 | netif_tx_unlock_bh(dev); |
193 | return 0; | 193 | return 0; |
194 | 194 | ||
195 | done: | 195 | done: |
196 | spin_unlock_bh(&dev->xmit_lock); | 196 | netif_tx_unlock_bh(dev); |
197 | kfree(dmi1); | 197 | kfree(dmi1); |
198 | return err; | 198 | return err; |
199 | } | 199 | } |
@@ -204,7 +204,7 @@ done: | |||
204 | 204 | ||
205 | void dev_mc_discard(struct net_device *dev) | 205 | void dev_mc_discard(struct net_device *dev) |
206 | { | 206 | { |
207 | spin_lock_bh(&dev->xmit_lock); | 207 | netif_tx_lock_bh(dev); |
208 | 208 | ||
209 | while (dev->mc_list != NULL) { | 209 | while (dev->mc_list != NULL) { |
210 | struct dev_mc_list *tmp = dev->mc_list; | 210 | struct dev_mc_list *tmp = dev->mc_list; |
@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev) | |||
215 | } | 215 | } |
216 | dev->mc_count = 0; | 216 | dev->mc_count = 0; |
217 | 217 | ||
218 | spin_unlock_bh(&dev->xmit_lock); | 218 | netif_tx_unlock_bh(dev); |
219 | } | 219 | } |
220 | 220 | ||
221 | #ifdef CONFIG_PROC_FS | 221 | #ifdef CONFIG_PROC_FS |
@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) | |||
250 | struct dev_mc_list *m; | 250 | struct dev_mc_list *m; |
251 | struct net_device *dev = v; | 251 | struct net_device *dev = v; |
252 | 252 | ||
253 | spin_lock_bh(&dev->xmit_lock); | 253 | netif_tx_lock_bh(dev); |
254 | for (m = dev->mc_list; m; m = m->next) { | 254 | for (m = dev->mc_list; m; m = m->next) { |
255 | int i; | 255 | int i; |
256 | 256 | ||
@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) | |||
262 | 262 | ||
263 | seq_putc(seq, '\n'); | 263 | seq_putc(seq, '\n'); |
264 | } | 264 | } |
265 | spin_unlock_bh(&dev->xmit_lock); | 265 | netif_tx_unlock_bh(dev); |
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
268 | 268 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index e8e05cebd95a..9cb781830380 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
273 | 273 | ||
274 | do { | 274 | do { |
275 | npinfo->tries--; | 275 | npinfo->tries--; |
276 | spin_lock(&np->dev->xmit_lock); | 276 | netif_tx_lock(np->dev); |
277 | np->dev->xmit_lock_owner = smp_processor_id(); | ||
278 | 277 | ||
279 | /* | 278 | /* |
280 | * network drivers do not expect to be called if the queue is | 279 | * network drivers do not expect to be called if the queue is |
281 | * stopped. | 280 | * stopped. |
282 | */ | 281 | */ |
283 | if (netif_queue_stopped(np->dev)) { | 282 | if (netif_queue_stopped(np->dev)) { |
284 | np->dev->xmit_lock_owner = -1; | 283 | netif_tx_unlock(np->dev); |
285 | spin_unlock(&np->dev->xmit_lock); | ||
286 | netpoll_poll(np); | 284 | netpoll_poll(np); |
287 | udelay(50); | 285 | udelay(50); |
288 | continue; | 286 | continue; |
289 | } | 287 | } |
290 | 288 | ||
291 | status = np->dev->hard_start_xmit(skb, np->dev); | 289 | status = np->dev->hard_start_xmit(skb, np->dev); |
292 | np->dev->xmit_lock_owner = -1; | 290 | netif_tx_unlock(np->dev); |
293 | spin_unlock(&np->dev->xmit_lock); | ||
294 | 291 | ||
295 | /* success */ | 292 | /* success */ |
296 | if(!status) { | 293 | if(!status) { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c23e9c06ee23..67ed14ddabd2 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2897,7 +2897,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
2897 | } | 2897 | } |
2898 | } | 2898 | } |
2899 | 2899 | ||
2900 | spin_lock_bh(&odev->xmit_lock); | 2900 | netif_tx_lock_bh(odev); |
2901 | if (!netif_queue_stopped(odev)) { | 2901 | if (!netif_queue_stopped(odev)) { |
2902 | 2902 | ||
2903 | atomic_inc(&(pkt_dev->skb->users)); | 2903 | atomic_inc(&(pkt_dev->skb->users)); |
@@ -2942,7 +2942,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
2942 | pkt_dev->next_tx_ns = 0; | 2942 | pkt_dev->next_tx_ns = 0; |
2943 | } | 2943 | } |
2944 | 2944 | ||
2945 | spin_unlock_bh(&odev->xmit_lock); | 2945 | netif_tx_unlock_bh(odev); |
2946 | 2946 | ||
2947 | /* If pkt_dev->count is zero, then run forever */ | 2947 | /* If pkt_dev->count is zero, then run forever */ |
2948 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { | 2948 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 138ea92ed268..b1e4c5e20ac7 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device *dev) | |||
72 | dev->queue_lock serializes queue accesses for this device | 72 | dev->queue_lock serializes queue accesses for this device |
73 | AND dev->qdisc pointer itself. | 73 | AND dev->qdisc pointer itself. |
74 | 74 | ||
75 | dev->xmit_lock serializes accesses to device driver. | 75 | netif_tx_lock serializes accesses to device driver. |
76 | 76 | ||
77 | dev->queue_lock and dev->xmit_lock are mutually exclusive, | 77 | dev->queue_lock and netif_tx_lock are mutually exclusive, |
78 | if one is grabbed, another must be free. | 78 | if one is grabbed, another must be free. |
79 | */ | 79 | */ |
80 | 80 | ||
@@ -108,7 +108,7 @@ int qdisc_restart(struct net_device *dev) | |||
108 | * will be requeued. | 108 | * will be requeued. |
109 | */ | 109 | */ |
110 | if (!nolock) { | 110 | if (!nolock) { |
111 | if (!spin_trylock(&dev->xmit_lock)) { | 111 | if (!netif_tx_trylock(dev)) { |
112 | collision: | 112 | collision: |
113 | /* So, someone grabbed the driver. */ | 113 | /* So, someone grabbed the driver. */ |
114 | 114 | ||
@@ -126,8 +126,6 @@ int qdisc_restart(struct net_device *dev) | |||
126 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 126 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
127 | goto requeue; | 127 | goto requeue; |
128 | } | 128 | } |
129 | /* Remember that the driver is grabbed by us. */ | ||
130 | dev->xmit_lock_owner = smp_processor_id(); | ||
131 | } | 129 | } |
132 | 130 | ||
133 | { | 131 | { |
@@ -142,8 +140,7 @@ int qdisc_restart(struct net_device *dev) | |||
142 | ret = dev->hard_start_xmit(skb, dev); | 140 | ret = dev->hard_start_xmit(skb, dev); |
143 | if (ret == NETDEV_TX_OK) { | 141 | if (ret == NETDEV_TX_OK) { |
144 | if (!nolock) { | 142 | if (!nolock) { |
145 | dev->xmit_lock_owner = -1; | 143 | netif_tx_unlock(dev); |
146 | spin_unlock(&dev->xmit_lock); | ||
147 | } | 144 | } |
148 | spin_lock(&dev->queue_lock); | 145 | spin_lock(&dev->queue_lock); |
149 | return -1; | 146 | return -1; |
@@ -157,8 +154,7 @@ int qdisc_restart(struct net_device *dev) | |||
157 | /* NETDEV_TX_BUSY - we need to requeue */ | 154 | /* NETDEV_TX_BUSY - we need to requeue */ |
158 | /* Release the driver */ | 155 | /* Release the driver */ |
159 | if (!nolock) { | 156 | if (!nolock) { |
160 | dev->xmit_lock_owner = -1; | 157 | netif_tx_unlock(dev); |
161 | spin_unlock(&dev->xmit_lock); | ||
162 | } | 158 | } |
163 | spin_lock(&dev->queue_lock); | 159 | spin_lock(&dev->queue_lock); |
164 | q = dev->qdisc; | 160 | q = dev->qdisc; |
@@ -187,7 +183,7 @@ static void dev_watchdog(unsigned long arg) | |||
187 | { | 183 | { |
188 | struct net_device *dev = (struct net_device *)arg; | 184 | struct net_device *dev = (struct net_device *)arg; |
189 | 185 | ||
190 | spin_lock(&dev->xmit_lock); | 186 | netif_tx_lock(dev); |
191 | if (dev->qdisc != &noop_qdisc) { | 187 | if (dev->qdisc != &noop_qdisc) { |
192 | if (netif_device_present(dev) && | 188 | if (netif_device_present(dev) && |
193 | netif_running(dev) && | 189 | netif_running(dev) && |
@@ -203,7 +199,7 @@ static void dev_watchdog(unsigned long arg) | |||
203 | dev_hold(dev); | 199 | dev_hold(dev); |
204 | } | 200 | } |
205 | } | 201 | } |
206 | spin_unlock(&dev->xmit_lock); | 202 | netif_tx_unlock(dev); |
207 | 203 | ||
208 | dev_put(dev); | 204 | dev_put(dev); |
209 | } | 205 | } |
@@ -227,17 +223,17 @@ void __netdev_watchdog_up(struct net_device *dev) | |||
227 | 223 | ||
228 | static void dev_watchdog_up(struct net_device *dev) | 224 | static void dev_watchdog_up(struct net_device *dev) |
229 | { | 225 | { |
230 | spin_lock_bh(&dev->xmit_lock); | 226 | netif_tx_lock_bh(dev); |
231 | __netdev_watchdog_up(dev); | 227 | __netdev_watchdog_up(dev); |
232 | spin_unlock_bh(&dev->xmit_lock); | 228 | netif_tx_unlock_bh(dev); |
233 | } | 229 | } |
234 | 230 | ||
235 | static void dev_watchdog_down(struct net_device *dev) | 231 | static void dev_watchdog_down(struct net_device *dev) |
236 | { | 232 | { |
237 | spin_lock_bh(&dev->xmit_lock); | 233 | netif_tx_lock_bh(dev); |
238 | if (del_timer(&dev->watchdog_timer)) | 234 | if (del_timer(&dev->watchdog_timer)) |
239 | dev_put(dev); | 235 | dev_put(dev); |
240 | spin_unlock_bh(&dev->xmit_lock); | 236 | netif_tx_unlock_bh(dev); |
241 | } | 237 | } |
242 | 238 | ||
243 | void netif_carrier_on(struct net_device *dev) | 239 | void netif_carrier_on(struct net_device *dev) |
@@ -582,7 +578,7 @@ void dev_deactivate(struct net_device *dev) | |||
582 | while (test_bit(__LINK_STATE_SCHED, &dev->state)) | 578 | while (test_bit(__LINK_STATE_SCHED, &dev->state)) |
583 | yield(); | 579 | yield(); |
584 | 580 | ||
585 | spin_unlock_wait(&dev->xmit_lock); | 581 | spin_unlock_wait(&dev->_xmit_lock); |
586 | } | 582 | } |
587 | 583 | ||
588 | void dev_init_scheduler(struct net_device *dev) | 584 | void dev_init_scheduler(struct net_device *dev) |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 79b8ef34c6e4..4c16ad57a3e4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -302,20 +302,17 @@ restart: | |||
302 | 302 | ||
303 | switch (teql_resolve(skb, skb_res, slave)) { | 303 | switch (teql_resolve(skb, skb_res, slave)) { |
304 | case 0: | 304 | case 0: |
305 | if (spin_trylock(&slave->xmit_lock)) { | 305 | if (netif_tx_trylock(slave)) { |
306 | slave->xmit_lock_owner = smp_processor_id(); | ||
307 | if (!netif_queue_stopped(slave) && | 306 | if (!netif_queue_stopped(slave) && |
308 | slave->hard_start_xmit(skb, slave) == 0) { | 307 | slave->hard_start_xmit(skb, slave) == 0) { |
309 | slave->xmit_lock_owner = -1; | 308 | netif_tx_unlock(slave); |
310 | spin_unlock(&slave->xmit_lock); | ||
311 | master->slaves = NEXT_SLAVE(q); | 309 | master->slaves = NEXT_SLAVE(q); |
312 | netif_wake_queue(dev); | 310 | netif_wake_queue(dev); |
313 | master->stats.tx_packets++; | 311 | master->stats.tx_packets++; |
314 | master->stats.tx_bytes += len; | 312 | master->stats.tx_bytes += len; |
315 | return 0; | 313 | return 0; |
316 | } | 314 | } |
317 | slave->xmit_lock_owner = -1; | 315 | netif_tx_unlock(slave); |
318 | spin_unlock(&slave->xmit_lock); | ||
319 | } | 316 | } |
320 | if (netif_queue_stopped(dev)) | 317 | if (netif_queue_stopped(dev)) |
321 | busy = 1; | 318 | busy = 1; |