diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 47 |
1 files changed, 36 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index f51f940a077c..f15346b7b5ba 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2064,11 +2064,12 @@ gso: | |||
2064 | Either shot noqueue qdisc, it is even simpler 8) | 2064 | Either shot noqueue qdisc, it is even simpler 8) |
2065 | */ | 2065 | */ |
2066 | if (dev->flags & IFF_UP) { | 2066 | if (dev->flags & IFF_UP) { |
2067 | int cpu = smp_processor_id(); /* ok because BHs are off */ | 2067 | /* |
2068 | 2068 | * No need to check for recursion with threaded interrupts: | |
2069 | if (txq->xmit_lock_owner != cpu) { | 2069 | */ |
2070 | if (!netif_tx_lock_recursion(txq)) { | ||
2070 | 2071 | ||
2071 | HARD_TX_LOCK(dev, txq, cpu); | 2072 | HARD_TX_LOCK(dev, txq); |
2072 | 2073 | ||
2073 | if (!netif_tx_queue_stopped(txq)) { | 2074 | if (!netif_tx_queue_stopped(txq)) { |
2074 | rc = dev_hard_start_xmit(skb, dev, txq); | 2075 | rc = dev_hard_start_xmit(skb, dev, txq); |
@@ -2173,8 +2174,8 @@ int netif_rx_ni(struct sk_buff *skb) | |||
2173 | { | 2174 | { |
2174 | int err; | 2175 | int err; |
2175 | 2176 | ||
2176 | preempt_disable(); | ||
2177 | err = netif_rx(skb); | 2177 | err = netif_rx(skb); |
2178 | preempt_disable(); | ||
2178 | if (local_softirq_pending()) | 2179 | if (local_softirq_pending()) |
2179 | do_softirq(); | 2180 | do_softirq(); |
2180 | preempt_enable(); | 2181 | preempt_enable(); |
@@ -2185,7 +2186,8 @@ EXPORT_SYMBOL(netif_rx_ni); | |||
2185 | 2186 | ||
2186 | static void net_tx_action(struct softirq_action *h) | 2187 | static void net_tx_action(struct softirq_action *h) |
2187 | { | 2188 | { |
2188 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 2189 | struct softnet_data *sd = &per_cpu(softnet_data, |
2190 | raw_smp_processor_id()); | ||
2189 | 2191 | ||
2190 | if (sd->completion_queue) { | 2192 | if (sd->completion_queue) { |
2191 | struct sk_buff *clist; | 2193 | struct sk_buff *clist; |
@@ -2201,6 +2203,11 @@ static void net_tx_action(struct softirq_action *h) | |||
2201 | 2203 | ||
2202 | WARN_ON(atomic_read(&skb->users)); | 2204 | WARN_ON(atomic_read(&skb->users)); |
2203 | __kfree_skb(skb); | 2205 | __kfree_skb(skb); |
2206 | /* | ||
2207 | * Safe to reschedule - the list is private | ||
2208 | * at this point. | ||
2209 | */ | ||
2210 | cond_resched_softirq_context(); | ||
2204 | } | 2211 | } |
2205 | } | 2212 | } |
2206 | 2213 | ||
@@ -2219,6 +2226,22 @@ static void net_tx_action(struct softirq_action *h) | |||
2219 | head = head->next_sched; | 2226 | head = head->next_sched; |
2220 | 2227 | ||
2221 | root_lock = qdisc_lock(q); | 2228 | root_lock = qdisc_lock(q); |
2229 | /* | ||
2230 | * We are executing in softirq context here, and | ||
2231 | * if softirqs are preemptible, we must avoid | ||
2232 | * infinite reactivation of the softirq by | ||
2233 | * either the tx handler, or by netif_schedule(). | ||
2234 | * (it would result in an infinitely looping | ||
2235 | * softirq context) | ||
2236 | * So we take the spinlock unconditionally. | ||
2237 | */ | ||
2238 | #ifdef CONFIG_PREEMPT_SOFTIRQS | ||
2239 | spin_lock(root_lock); | ||
2240 | smp_mb__before_clear_bit(); | ||
2241 | clear_bit(__QDISC_STATE_SCHED, &q->state); | ||
2242 | qdisc_run(q); | ||
2243 | spin_unlock(root_lock); | ||
2244 | #else | ||
2222 | if (spin_trylock(root_lock)) { | 2245 | if (spin_trylock(root_lock)) { |
2223 | smp_mb__before_clear_bit(); | 2246 | smp_mb__before_clear_bit(); |
2224 | clear_bit(__QDISC_STATE_SCHED, | 2247 | clear_bit(__QDISC_STATE_SCHED, |
@@ -2235,6 +2258,7 @@ static void net_tx_action(struct softirq_action *h) | |||
2235 | &q->state); | 2258 | &q->state); |
2236 | } | 2259 | } |
2237 | } | 2260 | } |
2261 | #endif | ||
2238 | } | 2262 | } |
2239 | } | 2263 | } |
2240 | } | 2264 | } |
@@ -2449,7 +2473,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2449 | skb->dev = master; | 2473 | skb->dev = master; |
2450 | } | 2474 | } |
2451 | 2475 | ||
2452 | __get_cpu_var(netdev_rx_stat).total++; | 2476 | per_cpu(netdev_rx_stat, raw_smp_processor_id()).total++; |
2453 | 2477 | ||
2454 | skb_reset_network_header(skb); | 2478 | skb_reset_network_header(skb); |
2455 | skb_reset_transport_header(skb); | 2479 | skb_reset_transport_header(skb); |
@@ -2835,9 +2859,10 @@ EXPORT_SYMBOL(napi_gro_frags); | |||
2835 | static int process_backlog(struct napi_struct *napi, int quota) | 2859 | static int process_backlog(struct napi_struct *napi, int quota) |
2836 | { | 2860 | { |
2837 | int work = 0; | 2861 | int work = 0; |
2838 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2862 | struct softnet_data *queue; |
2839 | unsigned long start_time = jiffies; | 2863 | unsigned long start_time = jiffies; |
2840 | 2864 | ||
2865 | queue = &per_cpu(softnet_data, raw_smp_processor_id()); | ||
2841 | napi->weight = weight_p; | 2866 | napi->weight = weight_p; |
2842 | do { | 2867 | do { |
2843 | struct sk_buff *skb; | 2868 | struct sk_buff *skb; |
@@ -2869,7 +2894,7 @@ void __napi_schedule(struct napi_struct *n) | |||
2869 | 2894 | ||
2870 | local_irq_save(flags); | 2895 | local_irq_save(flags); |
2871 | list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); | 2896 | list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); |
2872 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 2897 | raise_softirq_irqoff(NET_RX_SOFTIRQ); |
2873 | local_irq_restore(flags); | 2898 | local_irq_restore(flags); |
2874 | } | 2899 | } |
2875 | EXPORT_SYMBOL(__napi_schedule); | 2900 | EXPORT_SYMBOL(__napi_schedule); |
@@ -3023,7 +3048,7 @@ out: | |||
3023 | 3048 | ||
3024 | softnet_break: | 3049 | softnet_break: |
3025 | __get_cpu_var(netdev_rx_stat).time_squeeze++; | 3050 | __get_cpu_var(netdev_rx_stat).time_squeeze++; |
3026 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 3051 | raise_softirq_irqoff(NET_RX_SOFTIRQ); |
3027 | goto out; | 3052 | goto out; |
3028 | } | 3053 | } |
3029 | 3054 | ||
@@ -4855,7 +4880,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev, | |||
4855 | { | 4880 | { |
4856 | spin_lock_init(&dev_queue->_xmit_lock); | 4881 | spin_lock_init(&dev_queue->_xmit_lock); |
4857 | netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); | 4882 | netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); |
4858 | dev_queue->xmit_lock_owner = -1; | 4883 | dev_queue->xmit_lock_owner = (void *)-1; |
4859 | } | 4884 | } |
4860 | 4885 | ||
4861 | static void netdev_init_queue_locks(struct net_device *dev) | 4886 | static void netdev_init_queue_locks(struct net_device *dev) |