diff options
Diffstat (limited to 'net/sched/sch_generic.c')
| -rw-r--r-- | net/sched/sch_generic.c | 28 |
1 files changed, 12 insertions, 16 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 138ea92ed268..b1e4c5e20ac7 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device *dev) | |||
| 72 | dev->queue_lock serializes queue accesses for this device | 72 | dev->queue_lock serializes queue accesses for this device |
| 73 | AND dev->qdisc pointer itself. | 73 | AND dev->qdisc pointer itself. |
| 74 | 74 | ||
| 75 | dev->xmit_lock serializes accesses to device driver. | 75 | netif_tx_lock serializes accesses to device driver. |
| 76 | 76 | ||
| 77 | dev->queue_lock and dev->xmit_lock are mutually exclusive, | 77 | dev->queue_lock and netif_tx_lock are mutually exclusive, |
| 78 | if one is grabbed, another must be free. | 78 | if one is grabbed, another must be free. |
| 79 | */ | 79 | */ |
| 80 | 80 | ||
| @@ -108,7 +108,7 @@ int qdisc_restart(struct net_device *dev) | |||
| 108 | * will be requeued. | 108 | * will be requeued. |
| 109 | */ | 109 | */ |
| 110 | if (!nolock) { | 110 | if (!nolock) { |
| 111 | if (!spin_trylock(&dev->xmit_lock)) { | 111 | if (!netif_tx_trylock(dev)) { |
| 112 | collision: | 112 | collision: |
| 113 | /* So, someone grabbed the driver. */ | 113 | /* So, someone grabbed the driver. */ |
| 114 | 114 | ||
| @@ -126,8 +126,6 @@ int qdisc_restart(struct net_device *dev) | |||
| 126 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 126 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
| 127 | goto requeue; | 127 | goto requeue; |
| 128 | } | 128 | } |
| 129 | /* Remember that the driver is grabbed by us. */ | ||
| 130 | dev->xmit_lock_owner = smp_processor_id(); | ||
| 131 | } | 129 | } |
| 132 | 130 | ||
| 133 | { | 131 | { |
| @@ -142,8 +140,7 @@ int qdisc_restart(struct net_device *dev) | |||
| 142 | ret = dev->hard_start_xmit(skb, dev); | 140 | ret = dev->hard_start_xmit(skb, dev); |
| 143 | if (ret == NETDEV_TX_OK) { | 141 | if (ret == NETDEV_TX_OK) { |
| 144 | if (!nolock) { | 142 | if (!nolock) { |
| 145 | dev->xmit_lock_owner = -1; | 143 | netif_tx_unlock(dev); |
| 146 | spin_unlock(&dev->xmit_lock); | ||
| 147 | } | 144 | } |
| 148 | spin_lock(&dev->queue_lock); | 145 | spin_lock(&dev->queue_lock); |
| 149 | return -1; | 146 | return -1; |
| @@ -157,8 +154,7 @@ int qdisc_restart(struct net_device *dev) | |||
| 157 | /* NETDEV_TX_BUSY - we need to requeue */ | 154 | /* NETDEV_TX_BUSY - we need to requeue */ |
| 158 | /* Release the driver */ | 155 | /* Release the driver */ |
| 159 | if (!nolock) { | 156 | if (!nolock) { |
| 160 | dev->xmit_lock_owner = -1; | 157 | netif_tx_unlock(dev); |
| 161 | spin_unlock(&dev->xmit_lock); | ||
| 162 | } | 158 | } |
| 163 | spin_lock(&dev->queue_lock); | 159 | spin_lock(&dev->queue_lock); |
| 164 | q = dev->qdisc; | 160 | q = dev->qdisc; |
| @@ -187,7 +183,7 @@ static void dev_watchdog(unsigned long arg) | |||
| 187 | { | 183 | { |
| 188 | struct net_device *dev = (struct net_device *)arg; | 184 | struct net_device *dev = (struct net_device *)arg; |
| 189 | 185 | ||
| 190 | spin_lock(&dev->xmit_lock); | 186 | netif_tx_lock(dev); |
| 191 | if (dev->qdisc != &noop_qdisc) { | 187 | if (dev->qdisc != &noop_qdisc) { |
| 192 | if (netif_device_present(dev) && | 188 | if (netif_device_present(dev) && |
| 193 | netif_running(dev) && | 189 | netif_running(dev) && |
| @@ -203,7 +199,7 @@ static void dev_watchdog(unsigned long arg) | |||
| 203 | dev_hold(dev); | 199 | dev_hold(dev); |
| 204 | } | 200 | } |
| 205 | } | 201 | } |
| 206 | spin_unlock(&dev->xmit_lock); | 202 | netif_tx_unlock(dev); |
| 207 | 203 | ||
| 208 | dev_put(dev); | 204 | dev_put(dev); |
| 209 | } | 205 | } |
| @@ -227,17 +223,17 @@ void __netdev_watchdog_up(struct net_device *dev) | |||
| 227 | 223 | ||
| 228 | static void dev_watchdog_up(struct net_device *dev) | 224 | static void dev_watchdog_up(struct net_device *dev) |
| 229 | { | 225 | { |
| 230 | spin_lock_bh(&dev->xmit_lock); | 226 | netif_tx_lock_bh(dev); |
| 231 | __netdev_watchdog_up(dev); | 227 | __netdev_watchdog_up(dev); |
| 232 | spin_unlock_bh(&dev->xmit_lock); | 228 | netif_tx_unlock_bh(dev); |
| 233 | } | 229 | } |
| 234 | 230 | ||
| 235 | static void dev_watchdog_down(struct net_device *dev) | 231 | static void dev_watchdog_down(struct net_device *dev) |
| 236 | { | 232 | { |
| 237 | spin_lock_bh(&dev->xmit_lock); | 233 | netif_tx_lock_bh(dev); |
| 238 | if (del_timer(&dev->watchdog_timer)) | 234 | if (del_timer(&dev->watchdog_timer)) |
| 239 | dev_put(dev); | 235 | dev_put(dev); |
| 240 | spin_unlock_bh(&dev->xmit_lock); | 236 | netif_tx_unlock_bh(dev); |
| 241 | } | 237 | } |
| 242 | 238 | ||
| 243 | void netif_carrier_on(struct net_device *dev) | 239 | void netif_carrier_on(struct net_device *dev) |
| @@ -582,7 +578,7 @@ void dev_deactivate(struct net_device *dev) | |||
| 582 | while (test_bit(__LINK_STATE_SCHED, &dev->state)) | 578 | while (test_bit(__LINK_STATE_SCHED, &dev->state)) |
| 583 | yield(); | 579 | yield(); |
| 584 | 580 | ||
| 585 | spin_unlock_wait(&dev->xmit_lock); | 581 | spin_unlock_wait(&dev->_xmit_lock); |
| 586 | } | 582 | } |
| 587 | 583 | ||
| 588 | void dev_init_scheduler(struct net_device *dev) | 584 | void dev_init_scheduler(struct net_device *dev) |
