aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-11-16 22:13:26 -0500
committerDavid S. Miller <davem@davemloft.net>2011-11-17 17:06:08 -0500
commitadc9300e78e6091a7eaa1821213836379d4dbaa8 (patch)
treeb2d075990b32e8c21129851bf1b1adf4cdc441f1 /net/core/dev.c
parentd6f144830bdfa5fcf116e9ab8fc6a60d23fa623d (diff)
net: use jump_label to shortcut RPS if not setup
Most machines dont use RPS/RFS, and pay a fair amount of instructions in netif_receive_skb() / netif_rx() / get_rps_cpu() just to discover RPS/RFS is not setup. Add a jump_label named rps_needed. If no device rps_map or global rps_sock_flow_table is setup, netif_receive_skb() / netif_rx() do a single instruction instead of many ones, including conditional jumps. jmp +0 (if CONFIG_JUMP_LABEL=y) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 26c49d55e79d..f78959996148 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2711,6 +2711,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2711struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2711struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2712EXPORT_SYMBOL(rps_sock_flow_table); 2712EXPORT_SYMBOL(rps_sock_flow_table);
2713 2713
2714struct jump_label_key rps_needed __read_mostly;
2715
2714static struct rps_dev_flow * 2716static struct rps_dev_flow *
2715set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2717set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2716 struct rps_dev_flow *rflow, u16 next_cpu) 2718 struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2994,7 +2996,7 @@ int netif_rx(struct sk_buff *skb)
2994 2996
2995 trace_netif_rx(skb); 2997 trace_netif_rx(skb);
2996#ifdef CONFIG_RPS 2998#ifdef CONFIG_RPS
2997 { 2999 if (static_branch(&rps_needed)) {
2998 struct rps_dev_flow voidflow, *rflow = &voidflow; 3000 struct rps_dev_flow voidflow, *rflow = &voidflow;
2999 int cpu; 3001 int cpu;
3000 3002
@@ -3009,14 +3011,13 @@ int netif_rx(struct sk_buff *skb)
3009 3011
3010 rcu_read_unlock(); 3012 rcu_read_unlock();
3011 preempt_enable(); 3013 preempt_enable();
3012 } 3014 } else
3013#else 3015#endif
3014 { 3016 {
3015 unsigned int qtail; 3017 unsigned int qtail;
3016 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 3018 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3017 put_cpu(); 3019 put_cpu();
3018 } 3020 }
3019#endif
3020 return ret; 3021 return ret;
3021} 3022}
3022EXPORT_SYMBOL(netif_rx); 3023EXPORT_SYMBOL(netif_rx);
@@ -3359,7 +3360,7 @@ int netif_receive_skb(struct sk_buff *skb)
3359 return NET_RX_SUCCESS; 3360 return NET_RX_SUCCESS;
3360 3361
3361#ifdef CONFIG_RPS 3362#ifdef CONFIG_RPS
3362 { 3363 if (static_branch(&rps_needed)) {
3363 struct rps_dev_flow voidflow, *rflow = &voidflow; 3364 struct rps_dev_flow voidflow, *rflow = &voidflow;
3364 int cpu, ret; 3365 int cpu, ret;
3365 3366
@@ -3370,16 +3371,12 @@ int netif_receive_skb(struct sk_buff *skb)
3370 if (cpu >= 0) { 3371 if (cpu >= 0) {
3371 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3372 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3372 rcu_read_unlock(); 3373 rcu_read_unlock();
3373 } else { 3374 return ret;
3374 rcu_read_unlock();
3375 ret = __netif_receive_skb(skb);
3376 } 3375 }
3377 3376 rcu_read_unlock();
3378 return ret;
3379 } 3377 }
3380#else
3381 return __netif_receive_skb(skb);
3382#endif 3378#endif
3379 return __netif_receive_skb(skb);
3383} 3380}
3384EXPORT_SYMBOL(netif_receive_skb); 3381EXPORT_SYMBOL(netif_receive_skb);
3385 3382