aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-04-15 03:14:07 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-15 03:14:07 -0400
commitb0e28f1effd1d840b36e961edc1def81e01b1ca1 (patch)
tree41e4299d3039dda9e3977c1a8625d09554e6fe4e /net
parentfea069152614cdeefba4b2bf80afcddb9c217fc8 (diff)
net: netif_rx() must disable preemption
Eric Paris reported netif_rx() is calling smp_processor_id() from preemptible context, in particular when caller is ip_dev_loopback_xmit(). RPS commit added this smp_processor_id() call, this patch makes sure preemption is disabled. rps_get_cpus() wants rcu_read_lock() anyway, we can dot it a bit earlier. Reported-by: Eric Paris <eparis@redhat.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 876b1112d5ba..e8041eb76ac1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2206,6 +2206,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2206/* 2206/*
2207 * get_rps_cpu is called from netif_receive_skb and returns the target 2207 * get_rps_cpu is called from netif_receive_skb and returns the target
2208 * CPU from the RPS map of the receiving queue for a given skb. 2208 * CPU from the RPS map of the receiving queue for a given skb.
2209 * rcu_read_lock must be held on entry.
2209 */ 2210 */
2210static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) 2211static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2211{ 2212{
@@ -2217,8 +2218,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2217 u8 ip_proto; 2218 u8 ip_proto;
2218 u32 addr1, addr2, ports, ihl; 2219 u32 addr1, addr2, ports, ihl;
2219 2220
2220 rcu_read_lock();
2221
2222 if (skb_rx_queue_recorded(skb)) { 2221 if (skb_rx_queue_recorded(skb)) {
2223 u16 index = skb_get_rx_queue(skb); 2222 u16 index = skb_get_rx_queue(skb);
2224 if (unlikely(index >= dev->num_rx_queues)) { 2223 if (unlikely(index >= dev->num_rx_queues)) {
@@ -2296,7 +2295,6 @@ got_hash:
2296 } 2295 }
2297 2296
2298done: 2297done:
2299 rcu_read_unlock();
2300 return cpu; 2298 return cpu;
2301} 2299}
2302 2300
@@ -2392,7 +2390,7 @@ enqueue:
2392 2390
2393int netif_rx(struct sk_buff *skb) 2391int netif_rx(struct sk_buff *skb)
2394{ 2392{
2395 int cpu; 2393 int ret;
2396 2394
2397 /* if netpoll wants it, pretend we never saw it */ 2395 /* if netpoll wants it, pretend we never saw it */
2398 if (netpoll_rx(skb)) 2396 if (netpoll_rx(skb))
@@ -2402,14 +2400,21 @@ int netif_rx(struct sk_buff *skb)
2402 net_timestamp(skb); 2400 net_timestamp(skb);
2403 2401
2404#ifdef CONFIG_RPS 2402#ifdef CONFIG_RPS
2405 cpu = get_rps_cpu(skb->dev, skb); 2403 {
2406 if (cpu < 0) 2404 int cpu;
2407 cpu = smp_processor_id(); 2405
2406 rcu_read_lock();
2407 cpu = get_rps_cpu(skb->dev, skb);
2408 if (cpu < 0)
2409 cpu = smp_processor_id();
2410 ret = enqueue_to_backlog(skb, cpu);
2411 rcu_read_unlock();
2412 }
2408#else 2413#else
2409 cpu = smp_processor_id(); 2414 ret = enqueue_to_backlog(skb, get_cpu());
2415 put_cpu();
2410#endif 2416#endif
2411 2417 return ret;
2412 return enqueue_to_backlog(skb, cpu);
2413} 2418}
2414EXPORT_SYMBOL(netif_rx); 2419EXPORT_SYMBOL(netif_rx);
2415 2420