diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_api.c | 44 | ||||
-rw-r--r-- | net/sched/act_bpf.c | 53 | ||||
-rw-r--r-- | net/sched/act_connmark.c | 3 | ||||
-rw-r--r-- | net/sched/act_csum.c | 3 | ||||
-rw-r--r-- | net/sched/act_gact.c | 44 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 2 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 58 | ||||
-rw-r--r-- | net/sched/act_nat.c | 3 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 3 | ||||
-rw-r--r-- | net/sched/act_simple.c | 3 | ||||
-rw-r--r-- | net/sched/act_skbedit.c | 3 | ||||
-rw-r--r-- | net/sched/act_vlan.c | 3 | ||||
-rw-r--r-- | net/sched/cls_cgroup.c | 23 | ||||
-rw-r--r-- | net/sched/sch_qfq.c | 1 |
14 files changed, 135 insertions, 111 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 43ec92680ae8..b087087ccfa9 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -27,6 +27,15 @@ | |||
27 | #include <net/act_api.h> | 27 | #include <net/act_api.h> |
28 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
29 | 29 | ||
30 | static void free_tcf(struct rcu_head *head) | ||
31 | { | ||
32 | struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu); | ||
33 | |||
34 | free_percpu(p->cpu_bstats); | ||
35 | free_percpu(p->cpu_qstats); | ||
36 | kfree(p); | ||
37 | } | ||
38 | |||
30 | void tcf_hash_destroy(struct tc_action *a) | 39 | void tcf_hash_destroy(struct tc_action *a) |
31 | { | 40 | { |
32 | struct tcf_common *p = a->priv; | 41 | struct tcf_common *p = a->priv; |
@@ -41,7 +50,7 @@ void tcf_hash_destroy(struct tc_action *a) | |||
41 | * gen_estimator est_timer() might access p->tcfc_lock | 50 | * gen_estimator est_timer() might access p->tcfc_lock |
42 | * or bstats, wait a RCU grace period before freeing p | 51 | * or bstats, wait a RCU grace period before freeing p |
43 | */ | 52 | */ |
44 | kfree_rcu(p, tcfc_rcu); | 53 | call_rcu(&p->tcfc_rcu, free_tcf); |
45 | } | 54 | } |
46 | EXPORT_SYMBOL(tcf_hash_destroy); | 55 | EXPORT_SYMBOL(tcf_hash_destroy); |
47 | 56 | ||
@@ -231,15 +240,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est) | |||
231 | if (est) | 240 | if (est) |
232 | gen_kill_estimator(&pc->tcfc_bstats, | 241 | gen_kill_estimator(&pc->tcfc_bstats, |
233 | &pc->tcfc_rate_est); | 242 | &pc->tcfc_rate_est); |
234 | kfree_rcu(pc, tcfc_rcu); | 243 | call_rcu(&pc->tcfc_rcu, free_tcf); |
235 | } | 244 | } |
236 | EXPORT_SYMBOL(tcf_hash_cleanup); | 245 | EXPORT_SYMBOL(tcf_hash_cleanup); |
237 | 246 | ||
238 | int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, | 247 | int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, |
239 | int size, int bind) | 248 | int size, int bind, bool cpustats) |
240 | { | 249 | { |
241 | struct tcf_hashinfo *hinfo = a->ops->hinfo; | 250 | struct tcf_hashinfo *hinfo = a->ops->hinfo; |
242 | struct tcf_common *p = kzalloc(size, GFP_KERNEL); | 251 | struct tcf_common *p = kzalloc(size, GFP_KERNEL); |
252 | int err = -ENOMEM; | ||
243 | 253 | ||
244 | if (unlikely(!p)) | 254 | if (unlikely(!p)) |
245 | return -ENOMEM; | 255 | return -ENOMEM; |
@@ -247,18 +257,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, | |||
247 | if (bind) | 257 | if (bind) |
248 | p->tcfc_bindcnt = 1; | 258 | p->tcfc_bindcnt = 1; |
249 | 259 | ||
260 | if (cpustats) { | ||
261 | p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); | ||
262 | if (!p->cpu_bstats) { | ||
263 | err1: | ||
264 | kfree(p); | ||
265 | return err; | ||
266 | } | ||
267 | p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); | ||
268 | if (!p->cpu_qstats) { | ||
269 | err2: | ||
270 | free_percpu(p->cpu_bstats); | ||
271 | goto err1; | ||
272 | } | ||
273 | } | ||
250 | spin_lock_init(&p->tcfc_lock); | 274 | spin_lock_init(&p->tcfc_lock); |
251 | INIT_HLIST_NODE(&p->tcfc_head); | 275 | INIT_HLIST_NODE(&p->tcfc_head); |
252 | p->tcfc_index = index ? index : tcf_hash_new_index(hinfo); | 276 | p->tcfc_index = index ? index : tcf_hash_new_index(hinfo); |
253 | p->tcfc_tm.install = jiffies; | 277 | p->tcfc_tm.install = jiffies; |
254 | p->tcfc_tm.lastuse = jiffies; | 278 | p->tcfc_tm.lastuse = jiffies; |
255 | if (est) { | 279 | if (est) { |
256 | int err = gen_new_estimator(&p->tcfc_bstats, NULL, | 280 | err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, |
257 | &p->tcfc_rate_est, | 281 | &p->tcfc_rate_est, |
258 | &p->tcfc_lock, est); | 282 | &p->tcfc_lock, est); |
259 | if (err) { | 283 | if (err) { |
260 | kfree(p); | 284 | free_percpu(p->cpu_qstats); |
261 | return err; | 285 | goto err2; |
262 | } | 286 | } |
263 | } | 287 | } |
264 | 288 | ||
@@ -616,10 +640,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
616 | if (err < 0) | 640 | if (err < 0) |
617 | goto errout; | 641 | goto errout; |
618 | 642 | ||
619 | if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 || | 643 | if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || |
620 | gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, | 644 | gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, |
621 | &p->tcfc_rate_est) < 0 || | 645 | &p->tcfc_rate_est) < 0 || |
622 | gnet_stats_copy_queue(&d, NULL, | 646 | gnet_stats_copy_queue(&d, p->cpu_qstats, |
623 | &p->tcfc_qstats, | 647 | &p->tcfc_qstats, |
624 | p->tcfc_qstats.qlen) < 0) | 648 | p->tcfc_qstats.qlen) < 0) |
625 | goto errout; | 649 | goto errout; |
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d0edeb7a1950..1b97dabc621a 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -278,7 +278,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
278 | struct tc_act_bpf *parm; | 278 | struct tc_act_bpf *parm; |
279 | struct tcf_bpf *prog; | 279 | struct tcf_bpf *prog; |
280 | bool is_bpf, is_ebpf; | 280 | bool is_bpf, is_ebpf; |
281 | int ret; | 281 | int ret, res = 0; |
282 | 282 | ||
283 | if (!nla) | 283 | if (!nla) |
284 | return -EINVAL; | 284 | return -EINVAL; |
@@ -287,41 +287,43 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
287 | if (ret < 0) | 287 | if (ret < 0) |
288 | return ret; | 288 | return ret; |
289 | 289 | ||
290 | is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; | 290 | if (!tb[TCA_ACT_BPF_PARMS]) |
291 | is_ebpf = tb[TCA_ACT_BPF_FD]; | ||
292 | |||
293 | if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) || | ||
294 | !tb[TCA_ACT_BPF_PARMS]) | ||
295 | return -EINVAL; | 291 | return -EINVAL; |
296 | 292 | ||
297 | parm = nla_data(tb[TCA_ACT_BPF_PARMS]); | 293 | parm = nla_data(tb[TCA_ACT_BPF_PARMS]); |
298 | 294 | ||
299 | memset(&cfg, 0, sizeof(cfg)); | ||
300 | |||
301 | ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : | ||
302 | tcf_bpf_init_from_efd(tb, &cfg); | ||
303 | if (ret < 0) | ||
304 | return ret; | ||
305 | |||
306 | if (!tcf_hash_check(parm->index, act, bind)) { | 295 | if (!tcf_hash_check(parm->index, act, bind)) { |
307 | ret = tcf_hash_create(parm->index, est, act, | 296 | ret = tcf_hash_create(parm->index, est, act, |
308 | sizeof(*prog), bind); | 297 | sizeof(*prog), bind, false); |
309 | if (ret < 0) | 298 | if (ret < 0) |
310 | goto destroy_fp; | 299 | return ret; |
311 | 300 | ||
312 | ret = ACT_P_CREATED; | 301 | res = ACT_P_CREATED; |
313 | } else { | 302 | } else { |
314 | /* Don't override defaults. */ | 303 | /* Don't override defaults. */ |
315 | if (bind) | 304 | if (bind) |
316 | goto destroy_fp; | 305 | return 0; |
317 | 306 | ||
318 | tcf_hash_release(act, bind); | 307 | tcf_hash_release(act, bind); |
319 | if (!replace) { | 308 | if (!replace) |
320 | ret = -EEXIST; | 309 | return -EEXIST; |
321 | goto destroy_fp; | ||
322 | } | ||
323 | } | 310 | } |
324 | 311 | ||
312 | is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; | ||
313 | is_ebpf = tb[TCA_ACT_BPF_FD]; | ||
314 | |||
315 | if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { | ||
316 | ret = -EINVAL; | ||
317 | goto out; | ||
318 | } | ||
319 | |||
320 | memset(&cfg, 0, sizeof(cfg)); | ||
321 | |||
322 | ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : | ||
323 | tcf_bpf_init_from_efd(tb, &cfg); | ||
324 | if (ret < 0) | ||
325 | goto out; | ||
326 | |||
325 | prog = to_bpf(act); | 327 | prog = to_bpf(act); |
326 | spin_lock_bh(&prog->tcf_lock); | 328 | spin_lock_bh(&prog->tcf_lock); |
327 | 329 | ||
@@ -341,15 +343,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
341 | 343 | ||
342 | spin_unlock_bh(&prog->tcf_lock); | 344 | spin_unlock_bh(&prog->tcf_lock); |
343 | 345 | ||
344 | if (ret == ACT_P_CREATED) | 346 | if (res == ACT_P_CREATED) |
345 | tcf_hash_insert(act); | 347 | tcf_hash_insert(act); |
346 | else | 348 | else |
347 | tcf_bpf_cfg_cleanup(&old); | 349 | tcf_bpf_cfg_cleanup(&old); |
348 | 350 | ||
349 | return ret; | 351 | return res; |
352 | out: | ||
353 | if (res == ACT_P_CREATED) | ||
354 | tcf_hash_cleanup(act, est); | ||
350 | 355 | ||
351 | destroy_fp: | ||
352 | tcf_bpf_cfg_cleanup(&cfg); | ||
353 | return ret; | 356 | return ret; |
354 | } | 357 | } |
355 | 358 | ||
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 295d14bd6c67..f2b540220ad0 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c | |||
@@ -108,7 +108,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, | |||
108 | parm = nla_data(tb[TCA_CONNMARK_PARMS]); | 108 | parm = nla_data(tb[TCA_CONNMARK_PARMS]); |
109 | 109 | ||
110 | if (!tcf_hash_check(parm->index, a, bind)) { | 110 | if (!tcf_hash_check(parm->index, a, bind)) { |
111 | ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind); | 111 | ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), |
112 | bind, false); | ||
112 | if (ret) | 113 | if (ret) |
113 | return ret; | 114 | return ret; |
114 | 115 | ||
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 4cd5cf1aedf8..b07c535ba8e7 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -62,7 +62,8 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est, | |||
62 | parm = nla_data(tb[TCA_CSUM_PARMS]); | 62 | parm = nla_data(tb[TCA_CSUM_PARMS]); |
63 | 63 | ||
64 | if (!tcf_hash_check(parm->index, a, bind)) { | 64 | if (!tcf_hash_check(parm->index, a, bind)) { |
65 | ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind); | 65 | ret = tcf_hash_create(parm->index, est, a, sizeof(*p), |
66 | bind, false); | ||
66 | if (ret) | 67 | if (ret) |
67 | return ret; | 68 | return ret; |
68 | ret = ACT_P_CREATED; | 69 | ret = ACT_P_CREATED; |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 7fffc2272701..5c1b05170736 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -28,14 +28,18 @@ | |||
28 | #ifdef CONFIG_GACT_PROB | 28 | #ifdef CONFIG_GACT_PROB |
29 | static int gact_net_rand(struct tcf_gact *gact) | 29 | static int gact_net_rand(struct tcf_gact *gact) |
30 | { | 30 | { |
31 | if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval) | 31 | smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ |
32 | if (prandom_u32() % gact->tcfg_pval) | ||
32 | return gact->tcf_action; | 33 | return gact->tcf_action; |
33 | return gact->tcfg_paction; | 34 | return gact->tcfg_paction; |
34 | } | 35 | } |
35 | 36 | ||
36 | static int gact_determ(struct tcf_gact *gact) | 37 | static int gact_determ(struct tcf_gact *gact) |
37 | { | 38 | { |
38 | if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval) | 39 | u32 pack = atomic_inc_return(&gact->packets); |
40 | |||
41 | smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ | ||
42 | if (pack % gact->tcfg_pval) | ||
39 | return gact->tcf_action; | 43 | return gact->tcf_action; |
40 | return gact->tcfg_paction; | 44 | return gact->tcfg_paction; |
41 | } | 45 | } |
@@ -85,7 +89,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
85 | #endif | 89 | #endif |
86 | 90 | ||
87 | if (!tcf_hash_check(parm->index, a, bind)) { | 91 | if (!tcf_hash_check(parm->index, a, bind)) { |
88 | ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind); | 92 | ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), |
93 | bind, true); | ||
89 | if (ret) | 94 | if (ret) |
90 | return ret; | 95 | return ret; |
91 | ret = ACT_P_CREATED; | 96 | ret = ACT_P_CREATED; |
@@ -99,16 +104,19 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
99 | 104 | ||
100 | gact = to_gact(a); | 105 | gact = to_gact(a); |
101 | 106 | ||
102 | spin_lock_bh(&gact->tcf_lock); | 107 | ASSERT_RTNL(); |
103 | gact->tcf_action = parm->action; | 108 | gact->tcf_action = parm->action; |
104 | #ifdef CONFIG_GACT_PROB | 109 | #ifdef CONFIG_GACT_PROB |
105 | if (p_parm) { | 110 | if (p_parm) { |
106 | gact->tcfg_paction = p_parm->paction; | 111 | gact->tcfg_paction = p_parm->paction; |
107 | gact->tcfg_pval = p_parm->pval; | 112 | gact->tcfg_pval = max_t(u16, 1, p_parm->pval); |
113 | /* Make sure tcfg_pval is written before tcfg_ptype | ||
114 | * coupled with smp_rmb() in gact_net_rand() & gact_determ() | ||
115 | */ | ||
116 | smp_wmb(); | ||
108 | gact->tcfg_ptype = p_parm->ptype; | 117 | gact->tcfg_ptype = p_parm->ptype; |
109 | } | 118 | } |
110 | #endif | 119 | #endif |
111 | spin_unlock_bh(&gact->tcf_lock); | ||
112 | if (ret == ACT_P_CREATED) | 120 | if (ret == ACT_P_CREATED) |
113 | tcf_hash_insert(a); | 121 | tcf_hash_insert(a); |
114 | return ret; | 122 | return ret; |
@@ -118,23 +126,21 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, | |||
118 | struct tcf_result *res) | 126 | struct tcf_result *res) |
119 | { | 127 | { |
120 | struct tcf_gact *gact = a->priv; | 128 | struct tcf_gact *gact = a->priv; |
121 | int action = TC_ACT_SHOT; | 129 | int action = READ_ONCE(gact->tcf_action); |
122 | 130 | ||
123 | spin_lock(&gact->tcf_lock); | ||
124 | #ifdef CONFIG_GACT_PROB | 131 | #ifdef CONFIG_GACT_PROB |
125 | if (gact->tcfg_ptype) | 132 | { |
126 | action = gact_rand[gact->tcfg_ptype](gact); | 133 | u32 ptype = READ_ONCE(gact->tcfg_ptype); |
127 | else | 134 | |
128 | action = gact->tcf_action; | 135 | if (ptype) |
129 | #else | 136 | action = gact_rand[ptype](gact); |
130 | action = gact->tcf_action; | 137 | } |
131 | #endif | 138 | #endif |
132 | gact->tcf_bstats.bytes += qdisc_pkt_len(skb); | 139 | bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb); |
133 | gact->tcf_bstats.packets++; | ||
134 | if (action == TC_ACT_SHOT) | 140 | if (action == TC_ACT_SHOT) |
135 | gact->tcf_qstats.drops++; | 141 | qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats)); |
136 | gact->tcf_tm.lastuse = jiffies; | 142 | |
137 | spin_unlock(&gact->tcf_lock); | 143 | tcf_lastuse_update(&gact->tcf_tm); |
138 | 144 | ||
139 | return action; | 145 | return action; |
140 | } | 146 | } |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index cbc8dd7dd48a..99c9cc1c7af9 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -114,7 +114,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
114 | index = nla_get_u32(tb[TCA_IPT_INDEX]); | 114 | index = nla_get_u32(tb[TCA_IPT_INDEX]); |
115 | 115 | ||
116 | if (!tcf_hash_check(index, a, bind) ) { | 116 | if (!tcf_hash_check(index, a, bind) ) { |
117 | ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind); | 117 | ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false); |
118 | if (ret) | 118 | if (ret) |
119 | return ret; | 119 | return ret; |
120 | ret = ACT_P_CREATED; | 120 | ret = ACT_P_CREATED; |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 268545050ddb..2d1be4a760fd 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -35,9 +35,11 @@ static LIST_HEAD(mirred_list); | |||
35 | static void tcf_mirred_release(struct tc_action *a, int bind) | 35 | static void tcf_mirred_release(struct tc_action *a, int bind) |
36 | { | 36 | { |
37 | struct tcf_mirred *m = to_mirred(a); | 37 | struct tcf_mirred *m = to_mirred(a); |
38 | struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); | ||
39 | |||
38 | list_del(&m->tcfm_list); | 40 | list_del(&m->tcfm_list); |
39 | if (m->tcfm_dev) | 41 | if (dev) |
40 | dev_put(m->tcfm_dev); | 42 | dev_put(dev); |
41 | } | 43 | } |
42 | 44 | ||
43 | static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { | 45 | static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { |
@@ -93,7 +95,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, | |||
93 | if (!tcf_hash_check(parm->index, a, bind)) { | 95 | if (!tcf_hash_check(parm->index, a, bind)) { |
94 | if (dev == NULL) | 96 | if (dev == NULL) |
95 | return -EINVAL; | 97 | return -EINVAL; |
96 | ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind); | 98 | ret = tcf_hash_create(parm->index, est, a, sizeof(*m), |
99 | bind, true); | ||
97 | if (ret) | 100 | if (ret) |
98 | return ret; | 101 | return ret; |
99 | ret = ACT_P_CREATED; | 102 | ret = ACT_P_CREATED; |
@@ -107,18 +110,18 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, | |||
107 | } | 110 | } |
108 | m = to_mirred(a); | 111 | m = to_mirred(a); |
109 | 112 | ||
110 | spin_lock_bh(&m->tcf_lock); | 113 | ASSERT_RTNL(); |
111 | m->tcf_action = parm->action; | 114 | m->tcf_action = parm->action; |
112 | m->tcfm_eaction = parm->eaction; | 115 | m->tcfm_eaction = parm->eaction; |
113 | if (dev != NULL) { | 116 | if (dev != NULL) { |
114 | m->tcfm_ifindex = parm->ifindex; | 117 | m->tcfm_ifindex = parm->ifindex; |
115 | if (ret != ACT_P_CREATED) | 118 | if (ret != ACT_P_CREATED) |
116 | dev_put(m->tcfm_dev); | 119 | dev_put(rcu_dereference_protected(m->tcfm_dev, 1)); |
117 | dev_hold(dev); | 120 | dev_hold(dev); |
118 | m->tcfm_dev = dev; | 121 | rcu_assign_pointer(m->tcfm_dev, dev); |
119 | m->tcfm_ok_push = ok_push; | 122 | m->tcfm_ok_push = ok_push; |
120 | } | 123 | } |
121 | spin_unlock_bh(&m->tcf_lock); | 124 | |
122 | if (ret == ACT_P_CREATED) { | 125 | if (ret == ACT_P_CREATED) { |
123 | list_add(&m->tcfm_list, &mirred_list); | 126 | list_add(&m->tcfm_list, &mirred_list); |
124 | tcf_hash_insert(a); | 127 | tcf_hash_insert(a); |
@@ -133,20 +136,22 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, | |||
133 | struct tcf_mirred *m = a->priv; | 136 | struct tcf_mirred *m = a->priv; |
134 | struct net_device *dev; | 137 | struct net_device *dev; |
135 | struct sk_buff *skb2; | 138 | struct sk_buff *skb2; |
139 | int retval, err; | ||
136 | u32 at; | 140 | u32 at; |
137 | int retval, err = 1; | ||
138 | 141 | ||
139 | spin_lock(&m->tcf_lock); | 142 | tcf_lastuse_update(&m->tcf_tm); |
140 | m->tcf_tm.lastuse = jiffies; | 143 | |
141 | bstats_update(&m->tcf_bstats, skb); | 144 | bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); |
142 | 145 | ||
143 | dev = m->tcfm_dev; | 146 | rcu_read_lock(); |
144 | if (!dev) { | 147 | retval = READ_ONCE(m->tcf_action); |
145 | printk_once(KERN_NOTICE "tc mirred: target device is gone\n"); | 148 | dev = rcu_dereference(m->tcfm_dev); |
149 | if (unlikely(!dev)) { | ||
150 | pr_notice_once("tc mirred: target device is gone\n"); | ||
146 | goto out; | 151 | goto out; |
147 | } | 152 | } |
148 | 153 | ||
149 | if (!(dev->flags & IFF_UP)) { | 154 | if (unlikely(!(dev->flags & IFF_UP))) { |
150 | net_notice_ratelimited("tc mirred to Houston: device %s is down\n", | 155 | net_notice_ratelimited("tc mirred to Houston: device %s is down\n", |
151 | dev->name); | 156 | dev->name); |
152 | goto out; | 157 | goto out; |
@@ -154,7 +159,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, | |||
154 | 159 | ||
155 | at = G_TC_AT(skb->tc_verd); | 160 | at = G_TC_AT(skb->tc_verd); |
156 | skb2 = skb_clone(skb, GFP_ATOMIC); | 161 | skb2 = skb_clone(skb, GFP_ATOMIC); |
157 | if (skb2 == NULL) | 162 | if (!skb2) |
158 | goto out; | 163 | goto out; |
159 | 164 | ||
160 | if (!(at & AT_EGRESS)) { | 165 | if (!(at & AT_EGRESS)) { |
@@ -170,16 +175,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, | |||
170 | skb2->dev = dev; | 175 | skb2->dev = dev; |
171 | err = dev_queue_xmit(skb2); | 176 | err = dev_queue_xmit(skb2); |
172 | 177 | ||
173 | out: | ||
174 | if (err) { | 178 | if (err) { |
175 | m->tcf_qstats.overlimits++; | 179 | out: |
180 | qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats)); | ||
176 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) | 181 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) |
177 | retval = TC_ACT_SHOT; | 182 | retval = TC_ACT_SHOT; |
178 | else | 183 | } |
179 | retval = m->tcf_action; | 184 | rcu_read_unlock(); |
180 | } else | ||
181 | retval = m->tcf_action; | ||
182 | spin_unlock(&m->tcf_lock); | ||
183 | 185 | ||
184 | return retval; | 186 | return retval; |
185 | } | 187 | } |
@@ -218,14 +220,16 @@ static int mirred_device_event(struct notifier_block *unused, | |||
218 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 220 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
219 | struct tcf_mirred *m; | 221 | struct tcf_mirred *m; |
220 | 222 | ||
223 | ASSERT_RTNL(); | ||
221 | if (event == NETDEV_UNREGISTER) | 224 | if (event == NETDEV_UNREGISTER) |
222 | list_for_each_entry(m, &mirred_list, tcfm_list) { | 225 | list_for_each_entry(m, &mirred_list, tcfm_list) { |
223 | spin_lock_bh(&m->tcf_lock); | 226 | if (rcu_access_pointer(m->tcfm_dev) == dev) { |
224 | if (m->tcfm_dev == dev) { | ||
225 | dev_put(dev); | 227 | dev_put(dev); |
226 | m->tcfm_dev = NULL; | 228 | /* Note : no rcu grace period necessary, as |
229 | * net_device are already rcu protected. | ||
230 | */ | ||
231 | RCU_INIT_POINTER(m->tcfm_dev, NULL); | ||
227 | } | 232 | } |
228 | spin_unlock_bh(&m->tcf_lock); | ||
229 | } | 233 | } |
230 | 234 | ||
231 | return NOTIFY_DONE; | 235 | return NOTIFY_DONE; |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 270a030d5fd0..5be0b3c1c5b0 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -55,7 +55,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
55 | parm = nla_data(tb[TCA_NAT_PARMS]); | 55 | parm = nla_data(tb[TCA_NAT_PARMS]); |
56 | 56 | ||
57 | if (!tcf_hash_check(parm->index, a, bind)) { | 57 | if (!tcf_hash_check(parm->index, a, bind)) { |
58 | ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind); | 58 | ret = tcf_hash_create(parm->index, est, a, sizeof(*p), |
59 | bind, false); | ||
59 | if (ret) | 60 | if (ret) |
60 | return ret; | 61 | return ret; |
61 | ret = ACT_P_CREATED; | 62 | ret = ACT_P_CREATED; |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index ff8b466a73f6..e38a7701f154 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -57,7 +57,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
57 | if (!tcf_hash_check(parm->index, a, bind)) { | 57 | if (!tcf_hash_check(parm->index, a, bind)) { |
58 | if (!parm->nkeys) | 58 | if (!parm->nkeys) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind); | 60 | ret = tcf_hash_create(parm->index, est, a, sizeof(*p), |
61 | bind, false); | ||
61 | if (ret) | 62 | if (ret) |
62 | return ret; | 63 | return ret; |
63 | p = to_pedit(a); | 64 | p = to_pedit(a); |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 6a8d9488613a..d6b708d6afdf 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -103,7 +103,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, | |||
103 | defdata = nla_data(tb[TCA_DEF_DATA]); | 103 | defdata = nla_data(tb[TCA_DEF_DATA]); |
104 | 104 | ||
105 | if (!tcf_hash_check(parm->index, a, bind)) { | 105 | if (!tcf_hash_check(parm->index, a, bind)) { |
106 | ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind); | 106 | ret = tcf_hash_create(parm->index, est, a, sizeof(*d), |
107 | bind, false); | ||
107 | if (ret) | 108 | if (ret) |
108 | return ret; | 109 | return ret; |
109 | 110 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index fcfeeaf838be..6751b5f8c046 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -99,7 +99,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
99 | parm = nla_data(tb[TCA_SKBEDIT_PARMS]); | 99 | parm = nla_data(tb[TCA_SKBEDIT_PARMS]); |
100 | 100 | ||
101 | if (!tcf_hash_check(parm->index, a, bind)) { | 101 | if (!tcf_hash_check(parm->index, a, bind)) { |
102 | ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind); | 102 | ret = tcf_hash_create(parm->index, est, a, sizeof(*d), |
103 | bind, false); | ||
103 | if (ret) | 104 | if (ret) |
104 | return ret; | 105 | return ret; |
105 | 106 | ||
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index d735ecf0b1a7..796785e0bf96 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
@@ -116,7 +116,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, | |||
116 | action = parm->v_action; | 116 | action = parm->v_action; |
117 | 117 | ||
118 | if (!tcf_hash_check(parm->index, a, bind)) { | 118 | if (!tcf_hash_check(parm->index, a, bind)) { |
119 | ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind); | 119 | ret = tcf_hash_create(parm->index, est, a, sizeof(*v), |
120 | bind, false); | ||
120 | if (ret) | 121 | if (ret) |
121 | return ret; | 122 | return ret; |
122 | 123 | ||
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index ea611b216412..4c85bd3a750c 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
30 | struct tcf_result *res) | 30 | struct tcf_result *res) |
31 | { | 31 | { |
32 | struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); | 32 | struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); |
33 | u32 classid; | 33 | u32 classid = task_get_classid(skb); |
34 | |||
35 | classid = task_cls_state(current)->classid; | ||
36 | |||
37 | /* | ||
38 | * Due to the nature of the classifier it is required to ignore all | ||
39 | * packets originating from softirq context as accessing `current' | ||
40 | * would lead to false results. | ||
41 | * | ||
42 | * This test assumes that all callers of dev_queue_xmit() explicitely | ||
43 | * disable bh. Knowing this, it is possible to detect softirq based | ||
44 | * calls by looking at the number of nested bh disable calls because | ||
45 | * softirqs always disables bh. | ||
46 | */ | ||
47 | if (in_serving_softirq()) { | ||
48 | /* If there is an sk_classid we'll use that. */ | ||
49 | if (!skb->sk) | ||
50 | return -1; | ||
51 | classid = skb->sk->sk_classid; | ||
52 | } | ||
53 | 34 | ||
54 | if (!classid) | 35 | if (!classid) |
55 | return -1; | 36 | return -1; |
56 | |||
57 | if (!tcf_em_tree_match(skb, &head->ematches, NULL)) | 37 | if (!tcf_em_tree_match(skb, &head->ematches, NULL)) |
58 | return -1; | 38 | return -1; |
59 | 39 | ||
60 | res->classid = classid; | 40 | res->classid = classid; |
61 | res->class = 0; | 41 | res->class = 0; |
42 | |||
62 | return tcf_exts_exec(skb, &head->exts, res); | 43 | return tcf_exts_exec(skb, &head->exts, res); |
63 | } | 44 | } |
64 | 45 | ||
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index b8d73bca683c..ffaeea63d473 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -186,7 +186,6 @@ struct qfq_sched { | |||
186 | 186 | ||
187 | u64 oldV, V; /* Precise virtual times. */ | 187 | u64 oldV, V; /* Precise virtual times. */ |
188 | struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ | 188 | struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ |
189 | u32 num_active_agg; /* Num. of active aggregates */ | ||
190 | u32 wsum; /* weight sum */ | 189 | u32 wsum; /* weight sum */ |
191 | u32 iwsum; /* inverse weight sum */ | 190 | u32 iwsum; /* inverse weight sum */ |
192 | 191 | ||