diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-12 23:05:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 12:30:25 -0400 |
commit | 25d8c0d55f241ce2d360df1bea48e23a55836ee6 (patch) | |
tree | c0aca67607e7ce560a4a2cebef5fb6d55adf4112 /net | |
parent | 46e5da40aec256155cfedee96dd21a75da941f2c (diff) |
net: rcu-ify tcf_proto
rcu'ify tcf_proto this allows calling tc_classify() without holding
any locks. Updaters are protected by RTNL.
This patch prepares the core net_sched infrastracture for running
the classifier/action chains without holding the qdisc lock however
it does nothing to ensure cls_xxx and act_xxx types also work without
locking. Additional patches are required to address the fall out.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/cls_api.c | 30 | ||||
-rw-r--r-- | net/sched/sch_api.c | 10 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 20 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 11 | ||||
-rw-r--r-- | net/sched/sch_choke.c | 15 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 9 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 9 | ||||
-rw-r--r-- | net/sched/sch_fq_codel.c | 11 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 8 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 15 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 8 | ||||
-rw-r--r-- | net/sched/sch_multiq.c | 8 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 11 | ||||
-rw-r--r-- | net/sched/sch_qfq.c | 9 | ||||
-rw-r--r-- | net/sched/sch_sfb.c | 15 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 11 |
16 files changed, 116 insertions, 84 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index c28b0d327b12..e547efdaba9d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -117,7 +117,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) | |||
117 | { | 117 | { |
118 | struct net *net = sock_net(skb->sk); | 118 | struct net *net = sock_net(skb->sk); |
119 | struct nlattr *tca[TCA_MAX + 1]; | 119 | struct nlattr *tca[TCA_MAX + 1]; |
120 | spinlock_t *root_lock; | ||
121 | struct tcmsg *t; | 120 | struct tcmsg *t; |
122 | u32 protocol; | 121 | u32 protocol; |
123 | u32 prio; | 122 | u32 prio; |
@@ -125,7 +124,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) | |||
125 | u32 parent; | 124 | u32 parent; |
126 | struct net_device *dev; | 125 | struct net_device *dev; |
127 | struct Qdisc *q; | 126 | struct Qdisc *q; |
128 | struct tcf_proto **back, **chain; | 127 | struct tcf_proto __rcu **back; |
128 | struct tcf_proto __rcu **chain; | ||
129 | struct tcf_proto *tp; | 129 | struct tcf_proto *tp; |
130 | const struct tcf_proto_ops *tp_ops; | 130 | const struct tcf_proto_ops *tp_ops; |
131 | const struct Qdisc_class_ops *cops; | 131 | const struct Qdisc_class_ops *cops; |
@@ -197,7 +197,9 @@ replay: | |||
197 | goto errout; | 197 | goto errout; |
198 | 198 | ||
199 | /* Check the chain for existence of proto-tcf with this priority */ | 199 | /* Check the chain for existence of proto-tcf with this priority */ |
200 | for (back = chain; (tp = *back) != NULL; back = &tp->next) { | 200 | for (back = chain; |
201 | (tp = rtnl_dereference(*back)) != NULL; | ||
202 | back = &tp->next) { | ||
201 | if (tp->prio >= prio) { | 203 | if (tp->prio >= prio) { |
202 | if (tp->prio == prio) { | 204 | if (tp->prio == prio) { |
203 | if (!nprio || | 205 | if (!nprio || |
@@ -209,8 +211,6 @@ replay: | |||
209 | } | 211 | } |
210 | } | 212 | } |
211 | 213 | ||
212 | root_lock = qdisc_root_sleeping_lock(q); | ||
213 | |||
214 | if (tp == NULL) { | 214 | if (tp == NULL) { |
215 | /* Proto-tcf does not exist, create new one */ | 215 | /* Proto-tcf does not exist, create new one */ |
216 | 216 | ||
@@ -259,7 +259,8 @@ replay: | |||
259 | } | 259 | } |
260 | tp->ops = tp_ops; | 260 | tp->ops = tp_ops; |
261 | tp->protocol = protocol; | 261 | tp->protocol = protocol; |
262 | tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); | 262 | tp->prio = nprio ? : |
263 | TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); | ||
263 | tp->q = q; | 264 | tp->q = q; |
264 | tp->classify = tp_ops->classify; | 265 | tp->classify = tp_ops->classify; |
265 | tp->classid = parent; | 266 | tp->classid = parent; |
@@ -280,9 +281,9 @@ replay: | |||
280 | 281 | ||
281 | if (fh == 0) { | 282 | if (fh == 0) { |
282 | if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { | 283 | if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { |
283 | spin_lock_bh(root_lock); | 284 | struct tcf_proto *next = rtnl_dereference(tp->next); |
284 | *back = tp->next; | 285 | |
285 | spin_unlock_bh(root_lock); | 286 | RCU_INIT_POINTER(*back, next); |
286 | 287 | ||
287 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); | 288 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
288 | tcf_destroy(tp); | 289 | tcf_destroy(tp); |
@@ -322,10 +323,8 @@ replay: | |||
322 | n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); | 323 | n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); |
323 | if (err == 0) { | 324 | if (err == 0) { |
324 | if (tp_created) { | 325 | if (tp_created) { |
325 | spin_lock_bh(root_lock); | 326 | RCU_INIT_POINTER(tp->next, rtnl_dereference(*back)); |
326 | tp->next = *back; | 327 | rcu_assign_pointer(*back, tp); |
327 | *back = tp; | ||
328 | spin_unlock_bh(root_lock); | ||
329 | } | 328 | } |
330 | tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); | 329 | tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
331 | } else { | 330 | } else { |
@@ -420,7 +419,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
420 | int s_t; | 419 | int s_t; |
421 | struct net_device *dev; | 420 | struct net_device *dev; |
422 | struct Qdisc *q; | 421 | struct Qdisc *q; |
423 | struct tcf_proto *tp, **chain; | 422 | struct tcf_proto *tp, __rcu **chain; |
424 | struct tcmsg *tcm = nlmsg_data(cb->nlh); | 423 | struct tcmsg *tcm = nlmsg_data(cb->nlh); |
425 | unsigned long cl = 0; | 424 | unsigned long cl = 0; |
426 | const struct Qdisc_class_ops *cops; | 425 | const struct Qdisc_class_ops *cops; |
@@ -454,7 +453,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
454 | 453 | ||
455 | s_t = cb->args[0]; | 454 | s_t = cb->args[0]; |
456 | 455 | ||
457 | for (tp = *chain, t = 0; tp; tp = tp->next, t++) { | 456 | for (tp = rtnl_dereference(*chain), t = 0; |
457 | tp; tp = rtnl_dereference(tp->next), t++) { | ||
458 | if (t < s_t) | 458 | if (t < s_t) |
459 | continue; | 459 | continue; |
460 | if (TC_H_MAJ(tcm->tcm_info) && | 460 | if (TC_H_MAJ(tcm->tcm_info) && |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 58bed7599db7..ca6248345937 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1781,7 +1781,7 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, | |||
1781 | __be16 protocol = skb->protocol; | 1781 | __be16 protocol = skb->protocol; |
1782 | int err; | 1782 | int err; |
1783 | 1783 | ||
1784 | for (; tp; tp = tp->next) { | 1784 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
1785 | if (tp->protocol != protocol && | 1785 | if (tp->protocol != protocol && |
1786 | tp->protocol != htons(ETH_P_ALL)) | 1786 | tp->protocol != htons(ETH_P_ALL)) |
1787 | continue; | 1787 | continue; |
@@ -1833,15 +1833,15 @@ void tcf_destroy(struct tcf_proto *tp) | |||
1833 | { | 1833 | { |
1834 | tp->ops->destroy(tp); | 1834 | tp->ops->destroy(tp); |
1835 | module_put(tp->ops->owner); | 1835 | module_put(tp->ops->owner); |
1836 | kfree(tp); | 1836 | kfree_rcu(tp, rcu); |
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | void tcf_destroy_chain(struct tcf_proto **fl) | 1839 | void tcf_destroy_chain(struct tcf_proto __rcu **fl) |
1840 | { | 1840 | { |
1841 | struct tcf_proto *tp; | 1841 | struct tcf_proto *tp; |
1842 | 1842 | ||
1843 | while ((tp = *fl) != NULL) { | 1843 | while ((tp = rtnl_dereference(*fl)) != NULL) { |
1844 | *fl = tp->next; | 1844 | RCU_INIT_POINTER(*fl, tp->next); |
1845 | tcf_destroy(tp); | 1845 | tcf_destroy(tp); |
1846 | } | 1846 | } |
1847 | } | 1847 | } |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 8449b337f9e3..c398f9c3dbdd 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | struct atm_flow_data { | 42 | struct atm_flow_data { |
43 | struct Qdisc *q; /* FIFO, TBF, etc. */ | 43 | struct Qdisc *q; /* FIFO, TBF, etc. */ |
44 | struct tcf_proto *filter_list; | 44 | struct tcf_proto __rcu *filter_list; |
45 | struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ | 45 | struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ |
46 | void (*old_pop)(struct atm_vcc *vcc, | 46 | void (*old_pop)(struct atm_vcc *vcc, |
47 | struct sk_buff *skb); /* chaining */ | 47 | struct sk_buff *skb); /* chaining */ |
@@ -273,7 +273,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
273 | error = -ENOBUFS; | 273 | error = -ENOBUFS; |
274 | goto err_out; | 274 | goto err_out; |
275 | } | 275 | } |
276 | flow->filter_list = NULL; | 276 | RCU_INIT_POINTER(flow->filter_list, NULL); |
277 | flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); | 277 | flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); |
278 | if (!flow->q) | 278 | if (!flow->q) |
279 | flow->q = &noop_qdisc; | 279 | flow->q = &noop_qdisc; |
@@ -311,7 +311,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) | |||
311 | pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); | 311 | pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); |
312 | if (list_empty(&flow->list)) | 312 | if (list_empty(&flow->list)) |
313 | return -EINVAL; | 313 | return -EINVAL; |
314 | if (flow->filter_list || flow == &p->link) | 314 | if (rcu_access_pointer(flow->filter_list) || flow == &p->link) |
315 | return -EBUSY; | 315 | return -EBUSY; |
316 | /* | 316 | /* |
317 | * Reference count must be 2: one for "keepalive" (set at class | 317 | * Reference count must be 2: one for "keepalive" (set at class |
@@ -345,7 +345,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
345 | } | 345 | } |
346 | } | 346 | } |
347 | 347 | ||
348 | static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl) | 348 | static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, |
349 | unsigned long cl) | ||
349 | { | 350 | { |
350 | struct atm_qdisc_data *p = qdisc_priv(sch); | 351 | struct atm_qdisc_data *p = qdisc_priv(sch); |
351 | struct atm_flow_data *flow = (struct atm_flow_data *)cl; | 352 | struct atm_flow_data *flow = (struct atm_flow_data *)cl; |
@@ -369,11 +370,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
369 | flow = NULL; | 370 | flow = NULL; |
370 | if (TC_H_MAJ(skb->priority) != sch->handle || | 371 | if (TC_H_MAJ(skb->priority) != sch->handle || |
371 | !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { | 372 | !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { |
373 | struct tcf_proto *fl; | ||
374 | |||
372 | list_for_each_entry(flow, &p->flows, list) { | 375 | list_for_each_entry(flow, &p->flows, list) { |
373 | if (flow->filter_list) { | 376 | fl = rcu_dereference_bh(flow->filter_list); |
374 | result = tc_classify_compat(skb, | 377 | if (fl) { |
375 | flow->filter_list, | 378 | result = tc_classify_compat(skb, fl, &res); |
376 | &res); | ||
377 | if (result < 0) | 379 | if (result < 0) |
378 | continue; | 380 | continue; |
379 | flow = (struct atm_flow_data *)res.class; | 381 | flow = (struct atm_flow_data *)res.class; |
@@ -544,7 +546,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) | |||
544 | if (!p->link.q) | 546 | if (!p->link.q) |
545 | p->link.q = &noop_qdisc; | 547 | p->link.q = &noop_qdisc; |
546 | pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); | 548 | pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); |
547 | p->link.filter_list = NULL; | 549 | RCU_INIT_POINTER(p->link.filter_list, NULL); |
548 | p->link.vcc = NULL; | 550 | p->link.vcc = NULL; |
549 | p->link.sock = NULL; | 551 | p->link.sock = NULL; |
550 | p->link.classid = sch->handle; | 552 | p->link.classid = sch->handle; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 762a04bb8f6d..a3244a800501 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -133,7 +133,7 @@ struct cbq_class { | |||
133 | struct gnet_stats_rate_est64 rate_est; | 133 | struct gnet_stats_rate_est64 rate_est; |
134 | struct tc_cbq_xstats xstats; | 134 | struct tc_cbq_xstats xstats; |
135 | 135 | ||
136 | struct tcf_proto *filter_list; | 136 | struct tcf_proto __rcu *filter_list; |
137 | 137 | ||
138 | int refcnt; | 138 | int refcnt; |
139 | int filters; | 139 | int filters; |
@@ -221,6 +221,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
221 | struct cbq_class **defmap; | 221 | struct cbq_class **defmap; |
222 | struct cbq_class *cl = NULL; | 222 | struct cbq_class *cl = NULL; |
223 | u32 prio = skb->priority; | 223 | u32 prio = skb->priority; |
224 | struct tcf_proto *fl; | ||
224 | struct tcf_result res; | 225 | struct tcf_result res; |
225 | 226 | ||
226 | /* | 227 | /* |
@@ -235,11 +236,12 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
235 | int result = 0; | 236 | int result = 0; |
236 | defmap = head->defaults; | 237 | defmap = head->defaults; |
237 | 238 | ||
239 | fl = rcu_dereference_bh(head->filter_list); | ||
238 | /* | 240 | /* |
239 | * Step 2+n. Apply classifier. | 241 | * Step 2+n. Apply classifier. |
240 | */ | 242 | */ |
241 | if (!head->filter_list || | 243 | result = tc_classify_compat(skb, fl, &res); |
242 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) | 244 | if (!fl || result < 0) |
243 | goto fallback; | 245 | goto fallback; |
244 | 246 | ||
245 | cl = (void *)res.class; | 247 | cl = (void *)res.class; |
@@ -1954,7 +1956,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) | |||
1954 | return 0; | 1956 | return 0; |
1955 | } | 1957 | } |
1956 | 1958 | ||
1957 | static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg) | 1959 | static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch, |
1960 | unsigned long arg) | ||
1958 | { | 1961 | { |
1959 | struct cbq_sched_data *q = qdisc_priv(sch); | 1962 | struct cbq_sched_data *q = qdisc_priv(sch); |
1960 | struct cbq_class *cl = (struct cbq_class *)arg; | 1963 | struct cbq_class *cl = (struct cbq_class *)arg; |
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index ed30e436128b..74813e6b6ff6 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c | |||
@@ -57,7 +57,7 @@ struct choke_sched_data { | |||
57 | 57 | ||
58 | /* Variables */ | 58 | /* Variables */ |
59 | struct red_vars vars; | 59 | struct red_vars vars; |
60 | struct tcf_proto *filter_list; | 60 | struct tcf_proto __rcu *filter_list; |
61 | struct { | 61 | struct { |
62 | u32 prob_drop; /* Early probability drops */ | 62 | u32 prob_drop; /* Early probability drops */ |
63 | u32 prob_mark; /* Early probability marks */ | 63 | u32 prob_mark; /* Early probability marks */ |
@@ -193,9 +193,11 @@ static bool choke_classify(struct sk_buff *skb, | |||
193 | { | 193 | { |
194 | struct choke_sched_data *q = qdisc_priv(sch); | 194 | struct choke_sched_data *q = qdisc_priv(sch); |
195 | struct tcf_result res; | 195 | struct tcf_result res; |
196 | struct tcf_proto *fl; | ||
196 | int result; | 197 | int result; |
197 | 198 | ||
198 | result = tc_classify(skb, q->filter_list, &res); | 199 | fl = rcu_dereference_bh(q->filter_list); |
200 | result = tc_classify(skb, fl, &res); | ||
199 | if (result >= 0) { | 201 | if (result >= 0) { |
200 | #ifdef CONFIG_NET_CLS_ACT | 202 | #ifdef CONFIG_NET_CLS_ACT |
201 | switch (result) { | 203 | switch (result) { |
@@ -249,7 +251,7 @@ static bool choke_match_random(const struct choke_sched_data *q, | |||
249 | return false; | 251 | return false; |
250 | 252 | ||
251 | oskb = choke_peek_random(q, pidx); | 253 | oskb = choke_peek_random(q, pidx); |
252 | if (q->filter_list) | 254 | if (rcu_access_pointer(q->filter_list)) |
253 | return choke_get_classid(nskb) == choke_get_classid(oskb); | 255 | return choke_get_classid(nskb) == choke_get_classid(oskb); |
254 | 256 | ||
255 | return choke_match_flow(oskb, nskb); | 257 | return choke_match_flow(oskb, nskb); |
@@ -257,11 +259,11 @@ static bool choke_match_random(const struct choke_sched_data *q, | |||
257 | 259 | ||
258 | static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 260 | static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
259 | { | 261 | { |
262 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | ||
260 | struct choke_sched_data *q = qdisc_priv(sch); | 263 | struct choke_sched_data *q = qdisc_priv(sch); |
261 | const struct red_parms *p = &q->parms; | 264 | const struct red_parms *p = &q->parms; |
262 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | ||
263 | 265 | ||
264 | if (q->filter_list) { | 266 | if (rcu_access_pointer(q->filter_list)) { |
265 | /* If using external classifiers, get result and record it. */ | 267 | /* If using external classifiers, get result and record it. */ |
266 | if (!choke_classify(skb, sch, &ret)) | 268 | if (!choke_classify(skb, sch, &ret)) |
267 | goto other_drop; /* Packet was eaten by filter */ | 269 | goto other_drop; /* Packet was eaten by filter */ |
@@ -554,7 +556,8 @@ static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, | |||
554 | return 0; | 556 | return 0; |
555 | } | 557 | } |
556 | 558 | ||
557 | static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl) | 559 | static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch, |
560 | unsigned long cl) | ||
558 | { | 561 | { |
559 | struct choke_sched_data *q = qdisc_priv(sch); | 562 | struct choke_sched_data *q = qdisc_priv(sch); |
560 | 563 | ||
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 7bbbfe112192..d8b5ccfd248a 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -35,7 +35,7 @@ struct drr_class { | |||
35 | 35 | ||
36 | struct drr_sched { | 36 | struct drr_sched { |
37 | struct list_head active; | 37 | struct list_head active; |
38 | struct tcf_proto *filter_list; | 38 | struct tcf_proto __rcu *filter_list; |
39 | struct Qdisc_class_hash clhash; | 39 | struct Qdisc_class_hash clhash; |
40 | }; | 40 | }; |
41 | 41 | ||
@@ -184,7 +184,8 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg) | |||
184 | drr_destroy_class(sch, cl); | 184 | drr_destroy_class(sch, cl); |
185 | } | 185 | } |
186 | 186 | ||
187 | static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl) | 187 | static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch, |
188 | unsigned long cl) | ||
188 | { | 189 | { |
189 | struct drr_sched *q = qdisc_priv(sch); | 190 | struct drr_sched *q = qdisc_priv(sch); |
190 | 191 | ||
@@ -319,6 +320,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
319 | struct drr_sched *q = qdisc_priv(sch); | 320 | struct drr_sched *q = qdisc_priv(sch); |
320 | struct drr_class *cl; | 321 | struct drr_class *cl; |
321 | struct tcf_result res; | 322 | struct tcf_result res; |
323 | struct tcf_proto *fl; | ||
322 | int result; | 324 | int result; |
323 | 325 | ||
324 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { | 326 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { |
@@ -328,7 +330,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
328 | } | 330 | } |
329 | 331 | ||
330 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 332 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
331 | result = tc_classify(skb, q->filter_list, &res); | 333 | fl = rcu_dereference_bh(q->filter_list); |
334 | result = tc_classify(skb, fl, &res); | ||
332 | if (result >= 0) { | 335 | if (result >= 0) { |
333 | #ifdef CONFIG_NET_CLS_ACT | 336 | #ifdef CONFIG_NET_CLS_ACT |
334 | switch (result) { | 337 | switch (result) { |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 49d6ef338b55..485e456c8139 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | struct dsmark_qdisc_data { | 38 | struct dsmark_qdisc_data { |
39 | struct Qdisc *q; | 39 | struct Qdisc *q; |
40 | struct tcf_proto *filter_list; | 40 | struct tcf_proto __rcu *filter_list; |
41 | u8 *mask; /* "owns" the array */ | 41 | u8 *mask; /* "owns" the array */ |
42 | u8 *value; | 42 | u8 *value; |
43 | u16 indices; | 43 | u16 indices; |
@@ -186,8 +186,8 @@ ignore: | |||
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
189 | static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch, | 189 | static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, |
190 | unsigned long cl) | 190 | unsigned long cl) |
191 | { | 191 | { |
192 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 192 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
193 | return &p->filter_list; | 193 | return &p->filter_list; |
@@ -229,7 +229,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
229 | skb->tc_index = TC_H_MIN(skb->priority); | 229 | skb->tc_index = TC_H_MIN(skb->priority); |
230 | else { | 230 | else { |
231 | struct tcf_result res; | 231 | struct tcf_result res; |
232 | int result = tc_classify(skb, p->filter_list, &res); | 232 | struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); |
233 | int result = tc_classify(skb, fl, &res); | ||
233 | 234 | ||
234 | pr_debug("result %d class 0x%04x\n", result, res.classid); | 235 | pr_debug("result %d class 0x%04x\n", result, res.classid); |
235 | 236 | ||
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index cc56c8bb9bed..105cf5557630 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -52,7 +52,7 @@ struct fq_codel_flow { | |||
52 | }; /* please try to keep this structure <= 64 bytes */ | 52 | }; /* please try to keep this structure <= 64 bytes */ |
53 | 53 | ||
54 | struct fq_codel_sched_data { | 54 | struct fq_codel_sched_data { |
55 | struct tcf_proto *filter_list; /* optional external classifier */ | 55 | struct tcf_proto __rcu *filter_list; /* optional external classifier */ |
56 | struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ | 56 | struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ |
57 | u32 *backlogs; /* backlog table [flows_cnt] */ | 57 | u32 *backlogs; /* backlog table [flows_cnt] */ |
58 | u32 flows_cnt; /* number of flows */ | 58 | u32 flows_cnt; /* number of flows */ |
@@ -85,6 +85,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
85 | int *qerr) | 85 | int *qerr) |
86 | { | 86 | { |
87 | struct fq_codel_sched_data *q = qdisc_priv(sch); | 87 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
88 | struct tcf_proto *filter; | ||
88 | struct tcf_result res; | 89 | struct tcf_result res; |
89 | int result; | 90 | int result; |
90 | 91 | ||
@@ -93,11 +94,12 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
93 | TC_H_MIN(skb->priority) <= q->flows_cnt) | 94 | TC_H_MIN(skb->priority) <= q->flows_cnt) |
94 | return TC_H_MIN(skb->priority); | 95 | return TC_H_MIN(skb->priority); |
95 | 96 | ||
96 | if (!q->filter_list) | 97 | filter = rcu_dereference(q->filter_list); |
98 | if (!filter) | ||
97 | return fq_codel_hash(q, skb) + 1; | 99 | return fq_codel_hash(q, skb) + 1; |
98 | 100 | ||
99 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 101 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
100 | result = tc_classify(skb, q->filter_list, &res); | 102 | result = tc_classify(skb, filter, &res); |
101 | if (result >= 0) { | 103 | if (result >= 0) { |
102 | #ifdef CONFIG_NET_CLS_ACT | 104 | #ifdef CONFIG_NET_CLS_ACT |
103 | switch (result) { | 105 | switch (result) { |
@@ -496,7 +498,8 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl) | |||
496 | { | 498 | { |
497 | } | 499 | } |
498 | 500 | ||
499 | static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl) | 501 | static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, |
502 | unsigned long cl) | ||
500 | { | 503 | { |
501 | struct fq_codel_sched_data *q = qdisc_priv(sch); | 504 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
502 | 505 | ||
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index ec8aeaac1dd7..04b0de4c68b5 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -116,7 +116,7 @@ struct hfsc_class { | |||
116 | struct gnet_stats_queue qstats; | 116 | struct gnet_stats_queue qstats; |
117 | struct gnet_stats_rate_est64 rate_est; | 117 | struct gnet_stats_rate_est64 rate_est; |
118 | unsigned int level; /* class level in hierarchy */ | 118 | unsigned int level; /* class level in hierarchy */ |
119 | struct tcf_proto *filter_list; /* filter list */ | 119 | struct tcf_proto __rcu *filter_list; /* filter list */ |
120 | unsigned int filter_cnt; /* filter count */ | 120 | unsigned int filter_cnt; /* filter count */ |
121 | 121 | ||
122 | struct hfsc_sched *sched; /* scheduler data */ | 122 | struct hfsc_sched *sched; /* scheduler data */ |
@@ -1161,7 +1161,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1161 | 1161 | ||
1162 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 1162 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1163 | head = &q->root; | 1163 | head = &q->root; |
1164 | tcf = q->root.filter_list; | 1164 | tcf = rcu_dereference_bh(q->root.filter_list); |
1165 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 1165 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { |
1166 | #ifdef CONFIG_NET_CLS_ACT | 1166 | #ifdef CONFIG_NET_CLS_ACT |
1167 | switch (result) { | 1167 | switch (result) { |
@@ -1185,7 +1185,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1185 | return cl; /* hit leaf class */ | 1185 | return cl; /* hit leaf class */ |
1186 | 1186 | ||
1187 | /* apply inner filter chain */ | 1187 | /* apply inner filter chain */ |
1188 | tcf = cl->filter_list; | 1188 | tcf = rcu_dereference_bh(cl->filter_list); |
1189 | head = cl; | 1189 | head = cl; |
1190 | } | 1190 | } |
1191 | 1191 | ||
@@ -1285,7 +1285,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) | |||
1285 | cl->filter_cnt--; | 1285 | cl->filter_cnt--; |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | static struct tcf_proto ** | 1288 | static struct tcf_proto __rcu ** |
1289 | hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) | 1289 | hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) |
1290 | { | 1290 | { |
1291 | struct hfsc_sched *q = qdisc_priv(sch); | 1291 | struct hfsc_sched *q = qdisc_priv(sch); |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index aea942ce6008..6d16b9b81c28 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -103,7 +103,7 @@ struct htb_class { | |||
103 | u32 prio; /* these two are used only by leaves... */ | 103 | u32 prio; /* these two are used only by leaves... */ |
104 | int quantum; /* but stored for parent-to-leaf return */ | 104 | int quantum; /* but stored for parent-to-leaf return */ |
105 | 105 | ||
106 | struct tcf_proto *filter_list; /* class attached filters */ | 106 | struct tcf_proto __rcu *filter_list; /* class attached filters */ |
107 | int filter_cnt; | 107 | int filter_cnt; |
108 | int refcnt; /* usage count of this class */ | 108 | int refcnt; /* usage count of this class */ |
109 | 109 | ||
@@ -153,7 +153,7 @@ struct htb_sched { | |||
153 | int rate2quantum; /* quant = rate / rate2quantum */ | 153 | int rate2quantum; /* quant = rate / rate2quantum */ |
154 | 154 | ||
155 | /* filters for qdisc itself */ | 155 | /* filters for qdisc itself */ |
156 | struct tcf_proto *filter_list; | 156 | struct tcf_proto __rcu *filter_list; |
157 | 157 | ||
158 | #define HTB_WARN_TOOMANYEVENTS 0x1 | 158 | #define HTB_WARN_TOOMANYEVENTS 0x1 |
159 | unsigned int warned; /* only one warning */ | 159 | unsigned int warned; /* only one warning */ |
@@ -223,9 +223,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
223 | if (cl->level == 0) | 223 | if (cl->level == 0) |
224 | return cl; | 224 | return cl; |
225 | /* Start with inner filter chain if a non-leaf class is selected */ | 225 | /* Start with inner filter chain if a non-leaf class is selected */ |
226 | tcf = cl->filter_list; | 226 | tcf = rcu_dereference_bh(cl->filter_list); |
227 | } else { | 227 | } else { |
228 | tcf = q->filter_list; | 228 | tcf = rcu_dereference_bh(q->filter_list); |
229 | } | 229 | } |
230 | 230 | ||
231 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 231 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
@@ -251,7 +251,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
251 | return cl; /* we hit leaf; return it */ | 251 | return cl; /* we hit leaf; return it */ |
252 | 252 | ||
253 | /* we have got inner class; apply inner filter chain */ | 253 | /* we have got inner class; apply inner filter chain */ |
254 | tcf = cl->filter_list; | 254 | tcf = rcu_dereference_bh(cl->filter_list); |
255 | } | 255 | } |
256 | /* classification failed; try to use default class */ | 256 | /* classification failed; try to use default class */ |
257 | cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); | 257 | cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); |
@@ -1519,11 +1519,12 @@ failure: | |||
1519 | return err; | 1519 | return err; |
1520 | } | 1520 | } |
1521 | 1521 | ||
1522 | static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) | 1522 | static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch, |
1523 | unsigned long arg) | ||
1523 | { | 1524 | { |
1524 | struct htb_sched *q = qdisc_priv(sch); | 1525 | struct htb_sched *q = qdisc_priv(sch); |
1525 | struct htb_class *cl = (struct htb_class *)arg; | 1526 | struct htb_class *cl = (struct htb_class *)arg; |
1526 | struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; | 1527 | struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; |
1527 | 1528 | ||
1528 | return fl; | 1529 | return fl; |
1529 | } | 1530 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 62871c14e1f9..b351125f3849 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | 18 | ||
19 | struct ingress_qdisc_data { | 19 | struct ingress_qdisc_data { |
20 | struct tcf_proto *filter_list; | 20 | struct tcf_proto __rcu *filter_list; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | /* ------------------------- Class/flow operations ------------------------- */ | 23 | /* ------------------------- Class/flow operations ------------------------- */ |
@@ -46,7 +46,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
46 | { | 46 | { |
47 | } | 47 | } |
48 | 48 | ||
49 | static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) | 49 | static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch, |
50 | unsigned long cl) | ||
50 | { | 51 | { |
51 | struct ingress_qdisc_data *p = qdisc_priv(sch); | 52 | struct ingress_qdisc_data *p = qdisc_priv(sch); |
52 | 53 | ||
@@ -59,9 +60,10 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
59 | { | 60 | { |
60 | struct ingress_qdisc_data *p = qdisc_priv(sch); | 61 | struct ingress_qdisc_data *p = qdisc_priv(sch); |
61 | struct tcf_result res; | 62 | struct tcf_result res; |
63 | struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); | ||
62 | int result; | 64 | int result; |
63 | 65 | ||
64 | result = tc_classify(skb, p->filter_list, &res); | 66 | result = tc_classify(skb, fl, &res); |
65 | 67 | ||
66 | qdisc_bstats_update(sch, skb); | 68 | qdisc_bstats_update(sch, skb); |
67 | switch (result) { | 69 | switch (result) { |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index afb050a735fa..c0466c1840f3 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -31,7 +31,7 @@ struct multiq_sched_data { | |||
31 | u16 bands; | 31 | u16 bands; |
32 | u16 max_bands; | 32 | u16 max_bands; |
33 | u16 curband; | 33 | u16 curband; |
34 | struct tcf_proto *filter_list; | 34 | struct tcf_proto __rcu *filter_list; |
35 | struct Qdisc **queues; | 35 | struct Qdisc **queues; |
36 | }; | 36 | }; |
37 | 37 | ||
@@ -42,10 +42,11 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
42 | struct multiq_sched_data *q = qdisc_priv(sch); | 42 | struct multiq_sched_data *q = qdisc_priv(sch); |
43 | u32 band; | 43 | u32 band; |
44 | struct tcf_result res; | 44 | struct tcf_result res; |
45 | struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); | ||
45 | int err; | 46 | int err; |
46 | 47 | ||
47 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 48 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
48 | err = tc_classify(skb, q->filter_list, &res); | 49 | err = tc_classify(skb, fl, &res); |
49 | #ifdef CONFIG_NET_CLS_ACT | 50 | #ifdef CONFIG_NET_CLS_ACT |
50 | switch (err) { | 51 | switch (err) { |
51 | case TC_ACT_STOLEN: | 52 | case TC_ACT_STOLEN: |
@@ -388,7 +389,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
388 | } | 389 | } |
389 | } | 390 | } |
390 | 391 | ||
391 | static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) | 392 | static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch, |
393 | unsigned long cl) | ||
392 | { | 394 | { |
393 | struct multiq_sched_data *q = qdisc_priv(sch); | 395 | struct multiq_sched_data *q = qdisc_priv(sch); |
394 | 396 | ||
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 79359b69ad8d..03ef99e52a5c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | struct prio_sched_data { | 25 | struct prio_sched_data { |
26 | int bands; | 26 | int bands; |
27 | struct tcf_proto *filter_list; | 27 | struct tcf_proto __rcu *filter_list; |
28 | u8 prio2band[TC_PRIO_MAX+1]; | 28 | u8 prio2band[TC_PRIO_MAX+1]; |
29 | struct Qdisc *queues[TCQ_PRIO_BANDS]; | 29 | struct Qdisc *queues[TCQ_PRIO_BANDS]; |
30 | }; | 30 | }; |
@@ -36,11 +36,13 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
36 | struct prio_sched_data *q = qdisc_priv(sch); | 36 | struct prio_sched_data *q = qdisc_priv(sch); |
37 | u32 band = skb->priority; | 37 | u32 band = skb->priority; |
38 | struct tcf_result res; | 38 | struct tcf_result res; |
39 | struct tcf_proto *fl; | ||
39 | int err; | 40 | int err; |
40 | 41 | ||
41 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 42 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
42 | if (TC_H_MAJ(skb->priority) != sch->handle) { | 43 | if (TC_H_MAJ(skb->priority) != sch->handle) { |
43 | err = tc_classify(skb, q->filter_list, &res); | 44 | fl = rcu_dereference_bh(q->filter_list); |
45 | err = tc_classify(skb, fl, &res); | ||
44 | #ifdef CONFIG_NET_CLS_ACT | 46 | #ifdef CONFIG_NET_CLS_ACT |
45 | switch (err) { | 47 | switch (err) { |
46 | case TC_ACT_STOLEN: | 48 | case TC_ACT_STOLEN: |
@@ -50,7 +52,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
50 | return NULL; | 52 | return NULL; |
51 | } | 53 | } |
52 | #endif | 54 | #endif |
53 | if (!q->filter_list || err < 0) { | 55 | if (!fl || err < 0) { |
54 | if (TC_H_MAJ(band)) | 56 | if (TC_H_MAJ(band)) |
55 | band = 0; | 57 | band = 0; |
56 | return q->queues[q->prio2band[band & TC_PRIO_MAX]]; | 58 | return q->queues[q->prio2band[band & TC_PRIO_MAX]]; |
@@ -351,7 +353,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
351 | } | 353 | } |
352 | } | 354 | } |
353 | 355 | ||
354 | static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) | 356 | static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch, |
357 | unsigned long cl) | ||
355 | { | 358 | { |
356 | struct prio_sched_data *q = qdisc_priv(sch); | 359 | struct prio_sched_data *q = qdisc_priv(sch); |
357 | 360 | ||
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8056fb4e618a..602ea01a4ddd 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -181,7 +181,7 @@ struct qfq_group { | |||
181 | }; | 181 | }; |
182 | 182 | ||
183 | struct qfq_sched { | 183 | struct qfq_sched { |
184 | struct tcf_proto *filter_list; | 184 | struct tcf_proto __rcu *filter_list; |
185 | struct Qdisc_class_hash clhash; | 185 | struct Qdisc_class_hash clhash; |
186 | 186 | ||
187 | u64 oldV, V; /* Precise virtual times. */ | 187 | u64 oldV, V; /* Precise virtual times. */ |
@@ -576,7 +576,8 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg) | |||
576 | qfq_destroy_class(sch, cl); | 576 | qfq_destroy_class(sch, cl); |
577 | } | 577 | } |
578 | 578 | ||
579 | static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl) | 579 | static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch, |
580 | unsigned long cl) | ||
580 | { | 581 | { |
581 | struct qfq_sched *q = qdisc_priv(sch); | 582 | struct qfq_sched *q = qdisc_priv(sch); |
582 | 583 | ||
@@ -704,6 +705,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
704 | struct qfq_sched *q = qdisc_priv(sch); | 705 | struct qfq_sched *q = qdisc_priv(sch); |
705 | struct qfq_class *cl; | 706 | struct qfq_class *cl; |
706 | struct tcf_result res; | 707 | struct tcf_result res; |
708 | struct tcf_proto *fl; | ||
707 | int result; | 709 | int result; |
708 | 710 | ||
709 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { | 711 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { |
@@ -714,7 +716,8 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
714 | } | 716 | } |
715 | 717 | ||
716 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 718 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
717 | result = tc_classify(skb, q->filter_list, &res); | 719 | fl = rcu_dereference_bh(q->filter_list); |
720 | result = tc_classify(skb, fl, &res); | ||
718 | if (result >= 0) { | 721 | if (result >= 0) { |
719 | #ifdef CONFIG_NET_CLS_ACT | 722 | #ifdef CONFIG_NET_CLS_ACT |
720 | switch (result) { | 723 | switch (result) { |
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 9b0f7093d970..1562fb2b3f46 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c | |||
@@ -55,7 +55,7 @@ struct sfb_bins { | |||
55 | 55 | ||
56 | struct sfb_sched_data { | 56 | struct sfb_sched_data { |
57 | struct Qdisc *qdisc; | 57 | struct Qdisc *qdisc; |
58 | struct tcf_proto *filter_list; | 58 | struct tcf_proto __rcu *filter_list; |
59 | unsigned long rehash_interval; | 59 | unsigned long rehash_interval; |
60 | unsigned long warmup_time; /* double buffering warmup time in jiffies */ | 60 | unsigned long warmup_time; /* double buffering warmup time in jiffies */ |
61 | u32 max; | 61 | u32 max; |
@@ -253,13 +253,13 @@ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) | |||
253 | return false; | 253 | return false; |
254 | } | 254 | } |
255 | 255 | ||
256 | static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q, | 256 | static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, |
257 | int *qerr, u32 *salt) | 257 | int *qerr, u32 *salt) |
258 | { | 258 | { |
259 | struct tcf_result res; | 259 | struct tcf_result res; |
260 | int result; | 260 | int result; |
261 | 261 | ||
262 | result = tc_classify(skb, q->filter_list, &res); | 262 | result = tc_classify(skb, fl, &res); |
263 | if (result >= 0) { | 263 | if (result >= 0) { |
264 | #ifdef CONFIG_NET_CLS_ACT | 264 | #ifdef CONFIG_NET_CLS_ACT |
265 | switch (result) { | 265 | switch (result) { |
@@ -281,6 +281,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
281 | 281 | ||
282 | struct sfb_sched_data *q = qdisc_priv(sch); | 282 | struct sfb_sched_data *q = qdisc_priv(sch); |
283 | struct Qdisc *child = q->qdisc; | 283 | struct Qdisc *child = q->qdisc; |
284 | struct tcf_proto *fl; | ||
284 | int i; | 285 | int i; |
285 | u32 p_min = ~0; | 286 | u32 p_min = ~0; |
286 | u32 minqlen = ~0; | 287 | u32 minqlen = ~0; |
@@ -306,9 +307,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
306 | } | 307 | } |
307 | } | 308 | } |
308 | 309 | ||
309 | if (q->filter_list) { | 310 | fl = rcu_dereference_bh(q->filter_list); |
311 | if (fl) { | ||
310 | /* If using external classifiers, get result and record it. */ | 312 | /* If using external classifiers, get result and record it. */ |
311 | if (!sfb_classify(skb, q, &ret, &salt)) | 313 | if (!sfb_classify(skb, fl, &ret, &salt)) |
312 | goto other_drop; | 314 | goto other_drop; |
313 | keys.src = salt; | 315 | keys.src = salt; |
314 | keys.dst = 0; | 316 | keys.dst = 0; |
@@ -660,7 +662,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
660 | } | 662 | } |
661 | } | 663 | } |
662 | 664 | ||
663 | static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl) | 665 | static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, |
666 | unsigned long cl) | ||
664 | { | 667 | { |
665 | struct sfb_sched_data *q = qdisc_priv(sch); | 668 | struct sfb_sched_data *q = qdisc_priv(sch); |
666 | 669 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 211db9017c35..80c36bd54abc 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -125,7 +125,7 @@ struct sfq_sched_data { | |||
125 | u8 cur_depth; /* depth of longest slot */ | 125 | u8 cur_depth; /* depth of longest slot */ |
126 | u8 flags; | 126 | u8 flags; |
127 | unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ | 127 | unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ |
128 | struct tcf_proto *filter_list; | 128 | struct tcf_proto __rcu *filter_list; |
129 | sfq_index *ht; /* Hash table ('divisor' slots) */ | 129 | sfq_index *ht; /* Hash table ('divisor' slots) */ |
130 | struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ | 130 | struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ |
131 | 131 | ||
@@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
187 | { | 187 | { |
188 | struct sfq_sched_data *q = qdisc_priv(sch); | 188 | struct sfq_sched_data *q = qdisc_priv(sch); |
189 | struct tcf_result res; | 189 | struct tcf_result res; |
190 | struct tcf_proto *fl; | ||
190 | int result; | 191 | int result; |
191 | 192 | ||
192 | if (TC_H_MAJ(skb->priority) == sch->handle && | 193 | if (TC_H_MAJ(skb->priority) == sch->handle && |
@@ -194,13 +195,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
194 | TC_H_MIN(skb->priority) <= q->divisor) | 195 | TC_H_MIN(skb->priority) <= q->divisor) |
195 | return TC_H_MIN(skb->priority); | 196 | return TC_H_MIN(skb->priority); |
196 | 197 | ||
197 | if (!q->filter_list) { | 198 | fl = rcu_dereference_bh(q->filter_list); |
199 | if (!fl) { | ||
198 | skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys); | 200 | skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys); |
199 | return sfq_hash(q, skb) + 1; | 201 | return sfq_hash(q, skb) + 1; |
200 | } | 202 | } |
201 | 203 | ||
202 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 204 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
203 | result = tc_classify(skb, q->filter_list, &res); | 205 | result = tc_classify(skb, fl, &res); |
204 | if (result >= 0) { | 206 | if (result >= 0) { |
205 | #ifdef CONFIG_NET_CLS_ACT | 207 | #ifdef CONFIG_NET_CLS_ACT |
206 | switch (result) { | 208 | switch (result) { |
@@ -836,7 +838,8 @@ static void sfq_put(struct Qdisc *q, unsigned long cl) | |||
836 | { | 838 | { |
837 | } | 839 | } |
838 | 840 | ||
839 | static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) | 841 | static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch, |
842 | unsigned long cl) | ||
840 | { | 843 | { |
841 | struct sfq_sched_data *q = qdisc_priv(sch); | 844 | struct sfq_sched_data *q = qdisc_priv(sch); |
842 | 845 | ||