aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-07-06 08:18:08 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-08 16:50:42 -0400
commit56e5d1ca183d8616fab377d7d466c244b4dbb3b9 (patch)
tree9896c9a5de0b99e32c2d3a06f5a0c4503b1fffbd /net/sched
parent8f2ae965b7ef4f4ddab6110f06388e270723d694 (diff)
net_sched: act_gact: remove spinlock in fast path
Final step for gact RCU operation : 1) Use percpu stats 2) update lastuse only every clock tick to avoid false sharing 3) Remove spinlock acquisition, as it is no longer needed. Since this is the last contended lock in packet RX when tc gact is used, this gives impressive gain. My host with 8 RX queues was handling 5 Mpps before the patch, and more than 11 Mpps after patch. Tested: On receiver : dev=eth0 tc qdisc del dev $dev ingress 2>/dev/null tc qdisc add dev $dev ingress tc filter del dev $dev root pref 10 2>/dev/null tc filter del dev $dev pref 10 2>/dev/null tc filter add dev $dev est 1sec 4sec parent ffff: protocol ip prio 1 \ u32 match ip src 7.0.0.0/8 flowid 1:15 action drop Sender sends packets flood from 7/8 network Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_gact.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e4eb88d3d8dc..5c1b05170736 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -90,7 +90,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
90 90
91 if (!tcf_hash_check(parm->index, a, bind)) { 91 if (!tcf_hash_check(parm->index, a, bind)) {
92 ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), 92 ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
93 bind, false); 93 bind, true);
94 if (ret) 94 if (ret)
95 return ret; 95 return ret;
96 ret = ACT_P_CREATED; 96 ret = ACT_P_CREATED;
@@ -104,7 +104,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
104 104
105 gact = to_gact(a); 105 gact = to_gact(a);
106 106
107 spin_lock_bh(&gact->tcf_lock); 107 ASSERT_RTNL();
108 gact->tcf_action = parm->action; 108 gact->tcf_action = parm->action;
109#ifdef CONFIG_GACT_PROB 109#ifdef CONFIG_GACT_PROB
110 if (p_parm) { 110 if (p_parm) {
@@ -117,7 +117,6 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
117 gact->tcfg_ptype = p_parm->ptype; 117 gact->tcfg_ptype = p_parm->ptype;
118 } 118 }
119#endif 119#endif
120 spin_unlock_bh(&gact->tcf_lock);
121 if (ret == ACT_P_CREATED) 120 if (ret == ACT_P_CREATED)
122 tcf_hash_insert(a); 121 tcf_hash_insert(a);
123 return ret; 122 return ret;
@@ -127,9 +126,8 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
127 struct tcf_result *res) 126 struct tcf_result *res)
128{ 127{
129 struct tcf_gact *gact = a->priv; 128 struct tcf_gact *gact = a->priv;
130 int action = gact->tcf_action; 129 int action = READ_ONCE(gact->tcf_action);
131 130
132 spin_lock(&gact->tcf_lock);
133#ifdef CONFIG_GACT_PROB 131#ifdef CONFIG_GACT_PROB
134 { 132 {
135 u32 ptype = READ_ONCE(gact->tcfg_ptype); 133 u32 ptype = READ_ONCE(gact->tcfg_ptype);
@@ -138,12 +136,11 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
138 action = gact_rand[ptype](gact); 136 action = gact_rand[ptype](gact);
139 } 137 }
140#endif 138#endif
141 gact->tcf_bstats.bytes += qdisc_pkt_len(skb); 139 bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
142 gact->tcf_bstats.packets++;
143 if (action == TC_ACT_SHOT) 140 if (action == TC_ACT_SHOT)
144 gact->tcf_qstats.drops++; 141 qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
145 gact->tcf_tm.lastuse = jiffies; 142
146 spin_unlock(&gact->tcf_lock); 143 tcf_lastuse_update(&gact->tcf_tm);
147 144
148 return action; 145 return action;
149} 146}