aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/netdevices.txt8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c4
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/forcedeth.c18
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/ifb.c10
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/tulip/winbond-840.c9
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--include/linux/netdevice.h38
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dev_mcast.c28
-rw-r--r--net/core/netpoll.c9
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_teql.c9
21 files changed, 121 insertions, 98 deletions
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index 3c0a5ba614d7..847cedb238f6 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -42,9 +42,9 @@ dev->get_stats:
42 Context: nominally process, but don't sleep inside an rwlock 42 Context: nominally process, but don't sleep inside an rwlock
43 43
44dev->hard_start_xmit: 44dev->hard_start_xmit:
45 Synchronization: dev->xmit_lock spinlock. 45 Synchronization: netif_tx_lock spinlock.
46 When the driver sets NETIF_F_LLTX in dev->features this will be 46 When the driver sets NETIF_F_LLTX in dev->features this will be
47 called without holding xmit_lock. In this case the driver 47 called without holding netif_tx_lock. In this case the driver
48 has to lock by itself when needed. It is recommended to use a try lock 48 has to lock by itself when needed. It is recommended to use a try lock
49 for this and return -1 when the spin lock fails. 49 for this and return -1 when the spin lock fails.
50 The locking there should also properly protect against 50 The locking there should also properly protect against
@@ -62,12 +62,12 @@ dev->hard_start_xmit:
62 Only valid when NETIF_F_LLTX is set. 62 Only valid when NETIF_F_LLTX is set.
63 63
64dev->tx_timeout: 64dev->tx_timeout:
65 Synchronization: dev->xmit_lock spinlock. 65 Synchronization: netif_tx_lock spinlock.
66 Context: BHs disabled 66 Context: BHs disabled
67 Notes: netif_queue_stopped() is guaranteed true 67 Notes: netif_queue_stopped() is guaranteed true
68 68
69dev->set_multicast_list: 69dev->set_multicast_list:
70 Synchronization: dev->xmit_lock spinlock. 70 Synchronization: netif_tx_lock spinlock.
71 Context: BHs disabled 71 Context: BHs disabled
72 72
73dev->poll: 73dev->poll:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1dae4b238252..1d917edcf9ba 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
821 821
822 ipoib_mcast_stop_thread(dev, 0); 822 ipoib_mcast_stop_thread(dev, 0);
823 823
824 spin_lock_irqsave(&dev->xmit_lock, flags); 824 local_irq_save(flags);
825 netif_tx_lock(dev);
825 spin_lock(&priv->lock); 826 spin_lock(&priv->lock);
826 827
827 /* 828 /*
@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
896 } 897 }
897 898
898 spin_unlock(&priv->lock); 899 spin_unlock(&priv->lock);
899 spin_unlock_irqrestore(&dev->xmit_lock, flags); 900 netif_tx_unlock(dev);
901 local_irq_restore(flags);
900 902
901 /* We have to cancel outside of the spinlock */ 903 /* We have to cancel outside of the spinlock */
902 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 904 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 2f0f35811bf7..9fd87521a163 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void *data)
1052 1052
1053 dvb_net_feed_stop(dev); 1053 dvb_net_feed_stop(dev);
1054 priv->rx_mode = RX_MODE_UNI; 1054 priv->rx_mode = RX_MODE_UNI;
1055 spin_lock_bh(&dev->xmit_lock); 1055 netif_tx_lock_bh(dev);
1056 1056
1057 if (dev->flags & IFF_PROMISC) { 1057 if (dev->flags & IFF_PROMISC) {
1058 dprintk("%s: promiscuous mode\n", dev->name); 1058 dprintk("%s: promiscuous mode\n", dev->name);
@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void *data)
1077 } 1077 }
1078 } 1078 }
1079 1079
1080 spin_unlock_bh(&dev->xmit_lock); 1080 netif_tx_unlock_bh(dev);
1081 dvb_net_feed_start(dev); 1081 dvb_net_feed_start(dev);
1082} 1082}
1083 1083
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 54161aef3cac..9c5a8842ed0f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2009,7 +2009,7 @@ bnx2_poll(struct net_device *dev, int *budget)
2009 return 1; 2009 return 1;
2010} 2010}
2011 2011
2012/* Called with rtnl_lock from vlan functions and also dev->xmit_lock 2012/* Called with rtnl_lock from vlan functions and also netif_tx_lock
2013 * from set_multicast. 2013 * from set_multicast.
2014 */ 2014 */
2015static void 2015static void
@@ -4252,7 +4252,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4252} 4252}
4253#endif 4253#endif
4254 4254
4255/* Called with dev->xmit_lock. 4255/* Called with netif_tx_lock.
4256 * hard_start_xmit is pseudo-lockless - a lock is only required when 4256 * hard_start_xmit is pseudo-lockless - a lock is only required when
4257 * the tx queue is full. This way, we get the benefit of lockless 4257 * the tx queue is full. This way, we get the benefit of lockless
4258 * operations most of the time without the complexities to handle 4258 * operations most of the time without the complexities to handle
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 55d236726d11..46326cdfb277 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4191,7 +4191,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4191 */ 4191 */
4192 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 4192 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
4193 4193
4194 /* don't acquire bond device's xmit_lock when 4194 /* don't acquire bond device's netif_tx_lock when
4195 * transmitting */ 4195 * transmitting */
4196 bond_dev->features |= NETIF_F_LLTX; 4196 bond_dev->features |= NETIF_F_LLTX;
4197 4197
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index feb5b223cd60..5a8651b4b01d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -533,9 +533,9 @@ typedef union _ring_type {
533 * critical parts: 533 * critical parts:
534 * - rx is (pseudo-) lockless: it relies on the single-threading provided 534 * - rx is (pseudo-) lockless: it relies on the single-threading provided
535 * by the arch code for interrupts. 535 * by the arch code for interrupts.
536 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission 536 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
537 * needs dev->priv->lock :-( 537 * needs dev->priv->lock :-(
538 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. 538 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
539 */ 539 */
540 540
541/* in dev: base, irq */ 541/* in dev: base, irq */
@@ -1213,7 +1213,7 @@ static void drain_ring(struct net_device *dev)
1213 1213
1214/* 1214/*
1215 * nv_start_xmit: dev->hard_start_xmit function 1215 * nv_start_xmit: dev->hard_start_xmit function
1216 * Called with dev->xmit_lock held. 1216 * Called with netif_tx_lock held.
1217 */ 1217 */
1218static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1218static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1219{ 1219{
@@ -1407,7 +1407,7 @@ static void nv_tx_done(struct net_device *dev)
1407 1407
1408/* 1408/*
1409 * nv_tx_timeout: dev->tx_timeout function 1409 * nv_tx_timeout: dev->tx_timeout function
1410 * Called with dev->xmit_lock held. 1410 * Called with netif_tx_lock held.
1411 */ 1411 */
1412static void nv_tx_timeout(struct net_device *dev) 1412static void nv_tx_timeout(struct net_device *dev)
1413{ 1413{
@@ -1737,7 +1737,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1737 * Changing the MTU is a rare event, it shouldn't matter. 1737 * Changing the MTU is a rare event, it shouldn't matter.
1738 */ 1738 */
1739 nv_disable_irq(dev); 1739 nv_disable_irq(dev);
1740 spin_lock_bh(&dev->xmit_lock); 1740 netif_tx_lock_bh(dev);
1741 spin_lock(&np->lock); 1741 spin_lock(&np->lock);
1742 /* stop engines */ 1742 /* stop engines */
1743 nv_stop_rx(dev); 1743 nv_stop_rx(dev);
@@ -1768,7 +1768,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1768 nv_start_rx(dev); 1768 nv_start_rx(dev);
1769 nv_start_tx(dev); 1769 nv_start_tx(dev);
1770 spin_unlock(&np->lock); 1770 spin_unlock(&np->lock);
1771 spin_unlock_bh(&dev->xmit_lock); 1771 netif_tx_unlock_bh(dev);
1772 nv_enable_irq(dev); 1772 nv_enable_irq(dev);
1773 } 1773 }
1774 return 0; 1774 return 0;
@@ -1803,7 +1803,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1803 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 1803 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1804 1804
1805 if (netif_running(dev)) { 1805 if (netif_running(dev)) {
1806 spin_lock_bh(&dev->xmit_lock); 1806 netif_tx_lock_bh(dev);
1807 spin_lock_irq(&np->lock); 1807 spin_lock_irq(&np->lock);
1808 1808
1809 /* stop rx engine */ 1809 /* stop rx engine */
@@ -1815,7 +1815,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1815 /* restart rx engine */ 1815 /* restart rx engine */
1816 nv_start_rx(dev); 1816 nv_start_rx(dev);
1817 spin_unlock_irq(&np->lock); 1817 spin_unlock_irq(&np->lock);
1818 spin_unlock_bh(&dev->xmit_lock); 1818 netif_tx_unlock_bh(dev);
1819 } else { 1819 } else {
1820 nv_copy_mac_to_hw(dev); 1820 nv_copy_mac_to_hw(dev);
1821 } 1821 }
@@ -1824,7 +1824,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1824 1824
1825/* 1825/*
1826 * nv_set_multicast: dev->set_multicast function 1826 * nv_set_multicast: dev->set_multicast function
1827 * Called with dev->xmit_lock held. 1827 * Called with netif_tx_lock held.
1828 */ 1828 */
1829static void nv_set_multicast(struct net_device *dev) 1829static void nv_set_multicast(struct net_device *dev)
1830{ 1830{
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 102c1f0b90da..d12605f0ac7c 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
308{ 308{
309 struct sockaddr_ax25 *sa = addr; 309 struct sockaddr_ax25 *sa = addr;
310 310
311 spin_lock_irq(&dev->xmit_lock); 311 netif_tx_lock_bh(dev);
312 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 312 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
313 spin_unlock_irq(&dev->xmit_lock); 313 netif_tx_unlock_bh(dev);
314 314
315 return 0; 315 return 0;
316} 316}
@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
767 break; 767 break;
768 } 768 }
769 769
770 spin_lock_irq(&dev->xmit_lock); 770 netif_tx_lock_bh(dev);
771 memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); 771 memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
772 spin_unlock_irq(&dev->xmit_lock); 772 netif_tx_unlock_bh(dev);
773 773
774 err = 0; 774 err = 0;
775 break; 775 break;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index d81a8e1eeb8d..3ebbbe56b6e9 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
357{ 357{
358 struct sockaddr_ax25 *sa = addr; 358 struct sockaddr_ax25 *sa = addr;
359 359
360 spin_lock_irq(&dev->xmit_lock); 360 netif_tx_lock_bh(dev);
361 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 361 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
362 spin_unlock_irq(&dev->xmit_lock); 362 netif_tx_unlock_bh(dev);
363 363
364 return 0; 364 return 0;
365} 365}
@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
886 break; 886 break;
887 } 887 }
888 888
889 spin_lock_irq(&dev->xmit_lock); 889 netif_tx_lock_bh(dev);
890 memcpy(dev->dev_addr, addr, AX25_ADDR_LEN); 890 memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
891 spin_unlock_irq(&dev->xmit_lock); 891 netif_tx_unlock_bh(dev);
892 892
893 err = 0; 893 err = 0;
894 break; 894 break;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 31fb2d75dc44..2e222ef91e22 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev)
76 dp->st_task_enter++; 76 dp->st_task_enter++;
77 if ((skb = skb_peek(&dp->tq)) == NULL) { 77 if ((skb = skb_peek(&dp->tq)) == NULL) {
78 dp->st_txq_refl_try++; 78 dp->st_txq_refl_try++;
79 if (spin_trylock(&_dev->xmit_lock)) { 79 if (netif_tx_trylock(_dev)) {
80 dp->st_rxq_enter++; 80 dp->st_rxq_enter++;
81 while ((skb = skb_dequeue(&dp->rq)) != NULL) { 81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
82 skb_queue_tail(&dp->tq, skb); 82 skb_queue_tail(&dp->tq, skb);
83 dp->st_rx2tx_tran++; 83 dp->st_rx2tx_tran++;
84 } 84 }
85 spin_unlock(&_dev->xmit_lock); 85 netif_tx_unlock(_dev);
86 } else { 86 } else {
87 /* reschedule */ 87 /* reschedule */
88 dp->st_rxq_notenter++; 88 dp->st_rxq_notenter++;
@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev)
110 } 110 }
111 } 111 }
112 112
113 if (spin_trylock(&_dev->xmit_lock)) { 113 if (netif_tx_trylock(_dev)) {
114 dp->st_rxq_check++; 114 dp->st_rxq_check++;
115 if ((skb = skb_peek(&dp->rq)) == NULL) { 115 if ((skb = skb_peek(&dp->rq)) == NULL) {
116 dp->tasklet_pending = 0; 116 dp->tasklet_pending = 0;
@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev)
118 netif_wake_queue(_dev); 118 netif_wake_queue(_dev);
119 } else { 119 } else {
120 dp->st_rxq_rsch++; 120 dp->st_rxq_rsch++;
121 spin_unlock(&_dev->xmit_lock); 121 netif_tx_unlock(_dev);
122 goto resched; 122 goto resched;
123 } 123 }
124 spin_unlock(&_dev->xmit_lock); 124 netif_tx_unlock(_dev);
125 } else { 125 } else {
126resched: 126resched:
127 dp->tasklet_pending = 1; 127 dp->tasklet_pending = 1;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 97a49e0be76b..d70b9e8d6e60 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
959 || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 959 || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
960 break; 960 break;
961 udelay(100); 961 udelay(100);
962 /* must not sleep here - we are called under xmit_lock! */ 962 /* must not sleep here - called under netif_tx_lock! */
963 } 963 }
964 } 964 }
965 965
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 90627756d6fa..2e4ecedba057 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -318,12 +318,12 @@ performance critical codepaths:
318The rx process only runs in the interrupt handler. Access from outside 318The rx process only runs in the interrupt handler. Access from outside
319the interrupt handler is only permitted after disable_irq(). 319the interrupt handler is only permitted after disable_irq().
320 320
321The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap 321The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
322is set, then access is permitted under spin_lock_irq(&np->lock). 322is set, then access is permitted under spin_lock_irq(&np->lock).
323 323
324Thus configuration functions that want to access everything must call 324Thus configuration functions that want to access everything must call
325 disable_irq(dev->irq); 325 disable_irq(dev->irq);
326 spin_lock_bh(dev->xmit_lock); 326 netif_tx_lock_bh(dev);
327 spin_lock_irq(&np->lock); 327 spin_lock_irq(&np->lock);
328 328
329IV. Notes 329IV. Notes
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 136a70c4d5e4..56d86c7c0386 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (struct pci_dev *pdev)
1605 * - get_stats: 1605 * - get_stats:
1606 * spin_lock_irq(np->lock), doesn't touch hw if not present 1606 * spin_lock_irq(np->lock), doesn't touch hw if not present
1607 * - hard_start_xmit: 1607 * - hard_start_xmit:
1608 * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock); 1608 * synchronize_irq + netif_tx_disable;
1609 * - tx_timeout: 1609 * - tx_timeout:
1610 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock); 1610 * netif_device_detach + netif_tx_disable;
1611 * - set_multicast_list 1611 * - set_multicast_list
1612 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock); 1612 * netif_device_detach + netif_tx_disable;
1613 * - interrupt handler 1613 * - interrupt handler
1614 * doesn't touch hw if not present, synchronize_irq waits for 1614 * doesn't touch hw if not present, synchronize_irq waits for
1615 * running instances of the interrupt handler. 1615 * running instances of the interrupt handler.
@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1635 netif_device_detach(dev); 1635 netif_device_detach(dev);
1636 update_csr6(dev, 0); 1636 update_csr6(dev, 0);
1637 iowrite32(0, ioaddr + IntrEnable); 1637 iowrite32(0, ioaddr + IntrEnable);
1638 netif_stop_queue(dev);
1639 spin_unlock_irq(&np->lock); 1638 spin_unlock_irq(&np->lock);
1640 1639
1641 spin_unlock_wait(&dev->xmit_lock);
1642 synchronize_irq(dev->irq); 1640 synchronize_irq(dev->irq);
1641 netif_tx_disable(dev);
1643 1642
1644 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1643 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1645 1644
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index c2d0b09e0418..a5fcfcde63d1 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -1833,7 +1833,9 @@ static int __orinoco_program_rids(struct net_device *dev)
1833 /* Set promiscuity / multicast*/ 1833 /* Set promiscuity / multicast*/
1834 priv->promiscuous = 0; 1834 priv->promiscuous = 0;
1835 priv->mc_count = 0; 1835 priv->mc_count = 0;
1836 __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */ 1836
1837 /* FIXME: what about netif_tx_lock */
1838 __orinoco_set_multicast_list(dev);
1837 1839
1838 return 0; 1840 return 0;
1839} 1841}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b5760c67af9c..067b9ccafd87 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -407,7 +407,7 @@ struct net_device
407 * One part is mostly used on xmit path (device) 407 * One part is mostly used on xmit path (device)
408 */ 408 */
409 /* hard_start_xmit synchronizer */ 409 /* hard_start_xmit synchronizer */
410 spinlock_t xmit_lock ____cacheline_aligned_in_smp; 410 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
411 /* cpu id of processor entered to hard_start_xmit or -1, 411 /* cpu id of processor entered to hard_start_xmit or -1,
412 if nobody entered there. 412 if nobody entered there.
413 */ 413 */
@@ -893,11 +893,43 @@ static inline void __netif_rx_complete(struct net_device *dev)
893 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 893 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
894} 894}
895 895
896static inline void netif_tx_lock(struct net_device *dev)
897{
898 spin_lock(&dev->_xmit_lock);
899 dev->xmit_lock_owner = smp_processor_id();
900}
901
902static inline void netif_tx_lock_bh(struct net_device *dev)
903{
904 spin_lock_bh(&dev->_xmit_lock);
905 dev->xmit_lock_owner = smp_processor_id();
906}
907
908static inline int netif_tx_trylock(struct net_device *dev)
909{
910 int err = spin_trylock(&dev->_xmit_lock);
911 if (!err)
912 dev->xmit_lock_owner = smp_processor_id();
913 return err;
914}
915
916static inline void netif_tx_unlock(struct net_device *dev)
917{
918 dev->xmit_lock_owner = -1;
919 spin_unlock(&dev->_xmit_lock);
920}
921
922static inline void netif_tx_unlock_bh(struct net_device *dev)
923{
924 dev->xmit_lock_owner = -1;
925 spin_unlock_bh(&dev->_xmit_lock);
926}
927
896static inline void netif_tx_disable(struct net_device *dev) 928static inline void netif_tx_disable(struct net_device *dev)
897{ 929{
898 spin_lock_bh(&dev->xmit_lock); 930 netif_tx_lock_bh(dev);
899 netif_stop_queue(dev); 931 netif_stop_queue(dev);
900 spin_unlock_bh(&dev->xmit_lock); 932 netif_tx_unlock_bh(dev);
901} 933}
902 934
903/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 935/* These functions live elsewhere (drivers/net/net_init.c, but related) */
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 72d852982664..f92f9c94d2c7 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -98,7 +98,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
98 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 98 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
99 return; 99 return;
100 } 100 }
101 spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ 101 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
102 entry->neigh->used = jiffies; 102 entry->neigh->used = jiffies;
103 for (walk = &entry->vccs; *walk; walk = &(*walk)->next) 103 for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
104 if (*walk == clip_vcc) { 104 if (*walk == clip_vcc) {
@@ -122,7 +122,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
122 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 122 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
123 "0x%p)\n", entry, clip_vcc); 123 "0x%p)\n", entry, clip_vcc);
124 out: 124 out:
125 spin_unlock_bh(&entry->neigh->dev->xmit_lock); 125 netif_tx_unlock_bh(entry->neigh->dev);
126} 126}
127 127
128/* The neighbour entry n->lock is held. */ 128/* The neighbour entry n->lock is held. */
diff --git a/net/core/dev.c b/net/core/dev.c
index 6bfa78c66c25..1b09f1cae46e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1282,15 +1282,13 @@ int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1282 1282
1283#define HARD_TX_LOCK(dev, cpu) { \ 1283#define HARD_TX_LOCK(dev, cpu) { \
1284 if ((dev->features & NETIF_F_LLTX) == 0) { \ 1284 if ((dev->features & NETIF_F_LLTX) == 0) { \
1285 spin_lock(&dev->xmit_lock); \ 1285 netif_tx_lock(dev); \
1286 dev->xmit_lock_owner = cpu; \
1287 } \ 1286 } \
1288} 1287}
1289 1288
1290#define HARD_TX_UNLOCK(dev) { \ 1289#define HARD_TX_UNLOCK(dev) { \
1291 if ((dev->features & NETIF_F_LLTX) == 0) { \ 1290 if ((dev->features & NETIF_F_LLTX) == 0) { \
1292 dev->xmit_lock_owner = -1; \ 1291 netif_tx_unlock(dev); \
1293 spin_unlock(&dev->xmit_lock); \
1294 } \ 1292 } \
1295} 1293}
1296 1294
@@ -1389,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
1389 /* The device has no queue. Common case for software devices: 1387 /* The device has no queue. Common case for software devices:
1390 loopback, all the sorts of tunnels... 1388 loopback, all the sorts of tunnels...
1391 1389
1392 Really, it is unlikely that xmit_lock protection is necessary here. 1390 Really, it is unlikely that netif_tx_lock protection is necessary
1393 (f.e. loopback and IP tunnels are clean ignoring statistics 1391 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1394 counters.) 1392 counters.)
1395 However, it is possible, that they rely on protection 1393 However, it is possible, that they rely on protection
1396 made by us here. 1394 made by us here.
@@ -2805,7 +2803,7 @@ int register_netdevice(struct net_device *dev)
2805 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 2803 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2806 2804
2807 spin_lock_init(&dev->queue_lock); 2805 spin_lock_init(&dev->queue_lock);
2808 spin_lock_init(&dev->xmit_lock); 2806 spin_lock_init(&dev->_xmit_lock);
2809 dev->xmit_lock_owner = -1; 2807 dev->xmit_lock_owner = -1;
2810#ifdef CONFIG_NET_CLS_ACT 2808#ifdef CONFIG_NET_CLS_ACT
2811 spin_lock_init(&dev->ingress_lock); 2809 spin_lock_init(&dev->ingress_lock);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 05d60850840e..c57d887da2ef 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -62,7 +62,7 @@
62 * Device mc lists are changed by bh at least if IPv6 is enabled, 62 * Device mc lists are changed by bh at least if IPv6 is enabled,
63 * so that it must be bh protected. 63 * so that it must be bh protected.
64 * 64 *
65 * We block accesses to device mc filters with dev->xmit_lock. 65 * We block accesses to device mc filters with netif_tx_lock.
66 */ 66 */
67 67
68/* 68/*
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
93 93
94void dev_mc_upload(struct net_device *dev) 94void dev_mc_upload(struct net_device *dev)
95{ 95{
96 spin_lock_bh(&dev->xmit_lock); 96 netif_tx_lock_bh(dev);
97 __dev_mc_upload(dev); 97 __dev_mc_upload(dev);
98 spin_unlock_bh(&dev->xmit_lock); 98 netif_tx_unlock_bh(dev);
99} 99}
100 100
101/* 101/*
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
107 int err = 0; 107 int err = 0;
108 struct dev_mc_list *dmi, **dmip; 108 struct dev_mc_list *dmi, **dmip;
109 109
110 spin_lock_bh(&dev->xmit_lock); 110 netif_tx_lock_bh(dev);
111 111
112 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { 112 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
113 /* 113 /*
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
139 */ 139 */
140 __dev_mc_upload(dev); 140 __dev_mc_upload(dev);
141 141
142 spin_unlock_bh(&dev->xmit_lock); 142 netif_tx_unlock_bh(dev);
143 return 0; 143 return 0;
144 } 144 }
145 } 145 }
146 err = -ENOENT; 146 err = -ENOENT;
147done: 147done:
148 spin_unlock_bh(&dev->xmit_lock); 148 netif_tx_unlock_bh(dev);
149 return err; 149 return err;
150} 150}
151 151
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
160 160
161 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); 161 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
162 162
163 spin_lock_bh(&dev->xmit_lock); 163 netif_tx_lock_bh(dev);
164 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 164 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
165 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && 165 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
166 dmi->dmi_addrlen == alen) { 166 dmi->dmi_addrlen == alen) {
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
176 } 176 }
177 177
178 if ((dmi = dmi1) == NULL) { 178 if ((dmi = dmi1) == NULL) {
179 spin_unlock_bh(&dev->xmit_lock); 179 netif_tx_unlock_bh(dev);
180 return -ENOMEM; 180 return -ENOMEM;
181 } 181 }
182 memcpy(dmi->dmi_addr, addr, alen); 182 memcpy(dmi->dmi_addr, addr, alen);
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
189 189
190 __dev_mc_upload(dev); 190 __dev_mc_upload(dev);
191 191
192 spin_unlock_bh(&dev->xmit_lock); 192 netif_tx_unlock_bh(dev);
193 return 0; 193 return 0;
194 194
195done: 195done:
196 spin_unlock_bh(&dev->xmit_lock); 196 netif_tx_unlock_bh(dev);
197 kfree(dmi1); 197 kfree(dmi1);
198 return err; 198 return err;
199} 199}
@@ -204,7 +204,7 @@ done:
204 204
205void dev_mc_discard(struct net_device *dev) 205void dev_mc_discard(struct net_device *dev)
206{ 206{
207 spin_lock_bh(&dev->xmit_lock); 207 netif_tx_lock_bh(dev);
208 208
209 while (dev->mc_list != NULL) { 209 while (dev->mc_list != NULL) {
210 struct dev_mc_list *tmp = dev->mc_list; 210 struct dev_mc_list *tmp = dev->mc_list;
@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev)
215 } 215 }
216 dev->mc_count = 0; 216 dev->mc_count = 0;
217 217
218 spin_unlock_bh(&dev->xmit_lock); 218 netif_tx_unlock_bh(dev);
219} 219}
220 220
221#ifdef CONFIG_PROC_FS 221#ifdef CONFIG_PROC_FS
@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
250 struct dev_mc_list *m; 250 struct dev_mc_list *m;
251 struct net_device *dev = v; 251 struct net_device *dev = v;
252 252
253 spin_lock_bh(&dev->xmit_lock); 253 netif_tx_lock_bh(dev);
254 for (m = dev->mc_list; m; m = m->next) { 254 for (m = dev->mc_list; m; m = m->next) {
255 int i; 255 int i;
256 256
@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
262 262
263 seq_putc(seq, '\n'); 263 seq_putc(seq, '\n');
264 } 264 }
265 spin_unlock_bh(&dev->xmit_lock); 265 netif_tx_unlock_bh(dev);
266 return 0; 266 return 0;
267} 267}
268 268
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e8e05cebd95a..9cb781830380 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
273 273
274 do { 274 do {
275 npinfo->tries--; 275 npinfo->tries--;
276 spin_lock(&np->dev->xmit_lock); 276 netif_tx_lock(np->dev);
277 np->dev->xmit_lock_owner = smp_processor_id();
278 277
279 /* 278 /*
280 * network drivers do not expect to be called if the queue is 279 * network drivers do not expect to be called if the queue is
281 * stopped. 280 * stopped.
282 */ 281 */
283 if (netif_queue_stopped(np->dev)) { 282 if (netif_queue_stopped(np->dev)) {
284 np->dev->xmit_lock_owner = -1; 283 netif_tx_unlock(np->dev);
285 spin_unlock(&np->dev->xmit_lock);
286 netpoll_poll(np); 284 netpoll_poll(np);
287 udelay(50); 285 udelay(50);
288 continue; 286 continue;
289 } 287 }
290 288
291 status = np->dev->hard_start_xmit(skb, np->dev); 289 status = np->dev->hard_start_xmit(skb, np->dev);
292 np->dev->xmit_lock_owner = -1; 290 netif_tx_unlock(np->dev);
293 spin_unlock(&np->dev->xmit_lock);
294 291
295 /* success */ 292 /* success */
296 if(!status) { 293 if(!status) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c23e9c06ee23..67ed14ddabd2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2897,7 +2897,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
2897 } 2897 }
2898 } 2898 }
2899 2899
2900 spin_lock_bh(&odev->xmit_lock); 2900 netif_tx_lock_bh(odev);
2901 if (!netif_queue_stopped(odev)) { 2901 if (!netif_queue_stopped(odev)) {
2902 2902
2903 atomic_inc(&(pkt_dev->skb->users)); 2903 atomic_inc(&(pkt_dev->skb->users));
@@ -2942,7 +2942,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
2942 pkt_dev->next_tx_ns = 0; 2942 pkt_dev->next_tx_ns = 0;
2943 } 2943 }
2944 2944
2945 spin_unlock_bh(&odev->xmit_lock); 2945 netif_tx_unlock_bh(odev);
2946 2946
2947 /* If pkt_dev->count is zero, then run forever */ 2947 /* If pkt_dev->count is zero, then run forever */
2948 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 2948 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 138ea92ed268..b1e4c5e20ac7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device *dev)
72 dev->queue_lock serializes queue accesses for this device 72 dev->queue_lock serializes queue accesses for this device
73 AND dev->qdisc pointer itself. 73 AND dev->qdisc pointer itself.
74 74
75 dev->xmit_lock serializes accesses to device driver. 75 netif_tx_lock serializes accesses to device driver.
76 76
77 dev->queue_lock and dev->xmit_lock are mutually exclusive, 77 dev->queue_lock and netif_tx_lock are mutually exclusive,
78 if one is grabbed, another must be free. 78 if one is grabbed, another must be free.
79 */ 79 */
80 80
@@ -108,7 +108,7 @@ int qdisc_restart(struct net_device *dev)
108 * will be requeued. 108 * will be requeued.
109 */ 109 */
110 if (!nolock) { 110 if (!nolock) {
111 if (!spin_trylock(&dev->xmit_lock)) { 111 if (!netif_tx_trylock(dev)) {
112 collision: 112 collision:
113 /* So, someone grabbed the driver. */ 113 /* So, someone grabbed the driver. */
114 114
@@ -126,8 +126,6 @@ int qdisc_restart(struct net_device *dev)
126 __get_cpu_var(netdev_rx_stat).cpu_collision++; 126 __get_cpu_var(netdev_rx_stat).cpu_collision++;
127 goto requeue; 127 goto requeue;
128 } 128 }
129 /* Remember that the driver is grabbed by us. */
130 dev->xmit_lock_owner = smp_processor_id();
131 } 129 }
132 130
133 { 131 {
@@ -142,8 +140,7 @@ int qdisc_restart(struct net_device *dev)
142 ret = dev->hard_start_xmit(skb, dev); 140 ret = dev->hard_start_xmit(skb, dev);
143 if (ret == NETDEV_TX_OK) { 141 if (ret == NETDEV_TX_OK) {
144 if (!nolock) { 142 if (!nolock) {
145 dev->xmit_lock_owner = -1; 143 netif_tx_unlock(dev);
146 spin_unlock(&dev->xmit_lock);
147 } 144 }
148 spin_lock(&dev->queue_lock); 145 spin_lock(&dev->queue_lock);
149 return -1; 146 return -1;
@@ -157,8 +154,7 @@ int qdisc_restart(struct net_device *dev)
157 /* NETDEV_TX_BUSY - we need to requeue */ 154 /* NETDEV_TX_BUSY - we need to requeue */
158 /* Release the driver */ 155 /* Release the driver */
159 if (!nolock) { 156 if (!nolock) {
160 dev->xmit_lock_owner = -1; 157 netif_tx_unlock(dev);
161 spin_unlock(&dev->xmit_lock);
162 } 158 }
163 spin_lock(&dev->queue_lock); 159 spin_lock(&dev->queue_lock);
164 q = dev->qdisc; 160 q = dev->qdisc;
@@ -187,7 +183,7 @@ static void dev_watchdog(unsigned long arg)
187{ 183{
188 struct net_device *dev = (struct net_device *)arg; 184 struct net_device *dev = (struct net_device *)arg;
189 185
190 spin_lock(&dev->xmit_lock); 186 netif_tx_lock(dev);
191 if (dev->qdisc != &noop_qdisc) { 187 if (dev->qdisc != &noop_qdisc) {
192 if (netif_device_present(dev) && 188 if (netif_device_present(dev) &&
193 netif_running(dev) && 189 netif_running(dev) &&
@@ -203,7 +199,7 @@ static void dev_watchdog(unsigned long arg)
203 dev_hold(dev); 199 dev_hold(dev);
204 } 200 }
205 } 201 }
206 spin_unlock(&dev->xmit_lock); 202 netif_tx_unlock(dev);
207 203
208 dev_put(dev); 204 dev_put(dev);
209} 205}
@@ -227,17 +223,17 @@ void __netdev_watchdog_up(struct net_device *dev)
227 223
228static void dev_watchdog_up(struct net_device *dev) 224static void dev_watchdog_up(struct net_device *dev)
229{ 225{
230 spin_lock_bh(&dev->xmit_lock); 226 netif_tx_lock_bh(dev);
231 __netdev_watchdog_up(dev); 227 __netdev_watchdog_up(dev);
232 spin_unlock_bh(&dev->xmit_lock); 228 netif_tx_unlock_bh(dev);
233} 229}
234 230
235static void dev_watchdog_down(struct net_device *dev) 231static void dev_watchdog_down(struct net_device *dev)
236{ 232{
237 spin_lock_bh(&dev->xmit_lock); 233 netif_tx_lock_bh(dev);
238 if (del_timer(&dev->watchdog_timer)) 234 if (del_timer(&dev->watchdog_timer))
239 dev_put(dev); 235 dev_put(dev);
240 spin_unlock_bh(&dev->xmit_lock); 236 netif_tx_unlock_bh(dev);
241} 237}
242 238
243void netif_carrier_on(struct net_device *dev) 239void netif_carrier_on(struct net_device *dev)
@@ -582,7 +578,7 @@ void dev_deactivate(struct net_device *dev)
582 while (test_bit(__LINK_STATE_SCHED, &dev->state)) 578 while (test_bit(__LINK_STATE_SCHED, &dev->state))
583 yield(); 579 yield();
584 580
585 spin_unlock_wait(&dev->xmit_lock); 581 spin_unlock_wait(&dev->_xmit_lock);
586} 582}
587 583
588void dev_init_scheduler(struct net_device *dev) 584void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 79b8ef34c6e4..4c16ad57a3e4 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -302,20 +302,17 @@ restart:
302 302
303 switch (teql_resolve(skb, skb_res, slave)) { 303 switch (teql_resolve(skb, skb_res, slave)) {
304 case 0: 304 case 0:
305 if (spin_trylock(&slave->xmit_lock)) { 305 if (netif_tx_trylock(slave)) {
306 slave->xmit_lock_owner = smp_processor_id();
307 if (!netif_queue_stopped(slave) && 306 if (!netif_queue_stopped(slave) &&
308 slave->hard_start_xmit(skb, slave) == 0) { 307 slave->hard_start_xmit(skb, slave) == 0) {
309 slave->xmit_lock_owner = -1; 308 netif_tx_unlock(slave);
310 spin_unlock(&slave->xmit_lock);
311 master->slaves = NEXT_SLAVE(q); 309 master->slaves = NEXT_SLAVE(q);
312 netif_wake_queue(dev); 310 netif_wake_queue(dev);
313 master->stats.tx_packets++; 311 master->stats.tx_packets++;
314 master->stats.tx_bytes += len; 312 master->stats.tx_bytes += len;
315 return 0; 313 return 0;
316 } 314 }
317 slave->xmit_lock_owner = -1; 315 netif_tx_unlock(slave);
318 spin_unlock(&slave->xmit_lock);
319 } 316 }
320 if (netif_queue_stopped(dev)) 317 if (netif_queue_stopped(dev))
321 busy = 1; 318 busy = 1;