aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-04-10 17:07:54 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-13 13:34:40 -0400
commit4577139b2dabf58973d59d157aae4ddd3bde863a (patch)
treee3ddced4285dbca3263f5e9e65c69550f8184e88 /net
parentdfc96c192ad48a16b0d5bba43165d9893a00fe37 (diff)
net: use jump label patching for ingress qdisc in __netif_receive_skb_core
Even if we make use of classifier and actions from the egress path, we're going into handle_ing() executing additional code on a per-packet cost for ingress qdisc, just to realize that nothing is attached on ingress. Instead, this can just be blinded out as a no-op entirely with the use of a static key. On input fast-path, we already make use of static keys in various places, e.g. skb time stamping, in RPS, etc. It makes sense to not waste time when we're assured that no ingress qdisc is attached anywhere. Enabling/disabling of that code path is being done via two helpers, namely net_{inc,dec}_ingress_queue(), that are being invoked under RTNL mutex when a ingress qdisc is being either initialized or destructed. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c31
-rw-r--r--net/sched/sch_ingress.c9
2 files changed, 33 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index b2775f06c710..af4a1b0adc10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1630,6 +1630,22 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1630} 1630}
1631EXPORT_SYMBOL(call_netdevice_notifiers); 1631EXPORT_SYMBOL(call_netdevice_notifiers);
1632 1632
1633#ifdef CONFIG_NET_CLS_ACT
1634static struct static_key ingress_needed __read_mostly;
1635
1636void net_inc_ingress_queue(void)
1637{
1638 static_key_slow_inc(&ingress_needed);
1639}
1640EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1641
1642void net_dec_ingress_queue(void)
1643{
1644 static_key_slow_dec(&ingress_needed);
1645}
1646EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1647#endif
1648
1633static struct static_key netstamp_needed __read_mostly; 1649static struct static_key netstamp_needed __read_mostly;
1634#ifdef HAVE_JUMP_LABEL 1650#ifdef HAVE_JUMP_LABEL
1635/* We are not allowed to call static_key_slow_dec() from irq context 1651/* We are not allowed to call static_key_slow_dec() from irq context
@@ -3547,7 +3563,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3547 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3563 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3548 3564
3549 if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) 3565 if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
3550 goto out; 3566 return skb;
3551 3567
3552 if (*pt_prev) { 3568 if (*pt_prev) {
3553 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3569 *ret = deliver_skb(skb, *pt_prev, orig_dev);
@@ -3561,8 +3577,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3561 return NULL; 3577 return NULL;
3562 } 3578 }
3563 3579
3564out:
3565 skb->tc_verd = 0;
3566 return skb; 3580 return skb;
3567} 3581}
3568#endif 3582#endif
@@ -3698,12 +3712,15 @@ another_round:
3698 3712
3699skip_taps: 3713skip_taps:
3700#ifdef CONFIG_NET_CLS_ACT 3714#ifdef CONFIG_NET_CLS_ACT
3701 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3715 if (static_key_false(&ingress_needed)) {
3702 if (!skb) 3716 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3703 goto unlock; 3717 if (!skb)
3718 goto unlock;
3719 }
3720
3721 skb->tc_verd = 0;
3704ncls: 3722ncls:
3705#endif 3723#endif
3706
3707 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 3724 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3708 goto drop; 3725 goto drop;
3709 3726
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index eb5b8445fef9..4cdbfb85686a 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -88,11 +88,19 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 88
89/* ------------------------------------------------------------- */ 89/* ------------------------------------------------------------- */
90 90
91static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
92{
93 net_inc_ingress_queue();
94
95 return 0;
96}
97
91static void ingress_destroy(struct Qdisc *sch) 98static void ingress_destroy(struct Qdisc *sch)
92{ 99{
93 struct ingress_qdisc_data *p = qdisc_priv(sch); 100 struct ingress_qdisc_data *p = qdisc_priv(sch);
94 101
95 tcf_destroy_chain(&p->filter_list); 102 tcf_destroy_chain(&p->filter_list);
103 net_dec_ingress_queue();
96} 104}
97 105
98static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) 106static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -124,6 +132,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
124 .id = "ingress", 132 .id = "ingress",
125 .priv_size = sizeof(struct ingress_qdisc_data), 133 .priv_size = sizeof(struct ingress_qdisc_data),
126 .enqueue = ingress_enqueue, 134 .enqueue = ingress_enqueue,
135 .init = ingress_init,
127 .destroy = ingress_destroy, 136 .destroy = ingress_destroy,
128 .dump = ingress_dump, 137 .dump = ingress_dump,
129 .owner = THIS_MODULE, 138 .owner = THIS_MODULE,