diff options
author | Cong Wang <xiyou.wangcong@gmail.com> | 2018-05-23 18:26:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-24 22:56:15 -0400 |
commit | aaa908ffbee18a65529b716efb346a626e81559a (patch) | |
tree | 0ff6902780f9a964ac1b8fe8ea6a2bb7d7898cc7 /net/sched/cls_route.c | |
parent | 1bb58d2d3cbebeb2ee38d11e8fa86b06117f5f75 (diff) |
net_sched: switch to rcu_work
Commit 05f0fe6b74db ("RCU, workqueue: Implement rcu_work") introduces
new API's for dispatching work in a RCU callback. Now we can just
switch to the new API's for tc filters. This could get rid of a lot
of code.
Cc: Tejun Heo <tj@kernel.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_route.c')
-rw-r--r-- | net/sched/cls_route.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 21a03a8ee029..0404aa5fa7cb 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -57,10 +57,7 @@ struct route4_filter { | |||
57 | u32 handle; | 57 | u32 handle; |
58 | struct route4_bucket *bkt; | 58 | struct route4_bucket *bkt; |
59 | struct tcf_proto *tp; | 59 | struct tcf_proto *tp; |
60 | union { | 60 | struct rcu_work rwork; |
61 | struct work_struct work; | ||
62 | struct rcu_head rcu; | ||
63 | }; | ||
64 | }; | 61 | }; |
65 | 62 | ||
66 | #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) | 63 | #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) |
@@ -266,19 +263,17 @@ static void __route4_delete_filter(struct route4_filter *f) | |||
266 | 263 | ||
267 | static void route4_delete_filter_work(struct work_struct *work) | 264 | static void route4_delete_filter_work(struct work_struct *work) |
268 | { | 265 | { |
269 | struct route4_filter *f = container_of(work, struct route4_filter, work); | 266 | struct route4_filter *f = container_of(to_rcu_work(work), |
270 | 267 | struct route4_filter, | |
268 | rwork); | ||
271 | rtnl_lock(); | 269 | rtnl_lock(); |
272 | __route4_delete_filter(f); | 270 | __route4_delete_filter(f); |
273 | rtnl_unlock(); | 271 | rtnl_unlock(); |
274 | } | 272 | } |
275 | 273 | ||
276 | static void route4_delete_filter(struct rcu_head *head) | 274 | static void route4_queue_work(struct route4_filter *f) |
277 | { | 275 | { |
278 | struct route4_filter *f = container_of(head, struct route4_filter, rcu); | 276 | tcf_queue_work(&f->rwork, route4_delete_filter_work); |
279 | |||
280 | INIT_WORK(&f->work, route4_delete_filter_work); | ||
281 | tcf_queue_work(&f->work); | ||
282 | } | 277 | } |
283 | 278 | ||
284 | static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) | 279 | static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) |
@@ -304,7 +299,7 @@ static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) | |||
304 | RCU_INIT_POINTER(b->ht[h2], next); | 299 | RCU_INIT_POINTER(b->ht[h2], next); |
305 | tcf_unbind_filter(tp, &f->res); | 300 | tcf_unbind_filter(tp, &f->res); |
306 | if (tcf_exts_get_net(&f->exts)) | 301 | if (tcf_exts_get_net(&f->exts)) |
307 | call_rcu(&f->rcu, route4_delete_filter); | 302 | route4_queue_work(f); |
308 | else | 303 | else |
309 | __route4_delete_filter(f); | 304 | __route4_delete_filter(f); |
310 | } | 305 | } |
@@ -349,7 +344,7 @@ static int route4_delete(struct tcf_proto *tp, void *arg, bool *last, | |||
349 | /* Delete it */ | 344 | /* Delete it */ |
350 | tcf_unbind_filter(tp, &f->res); | 345 | tcf_unbind_filter(tp, &f->res); |
351 | tcf_exts_get_net(&f->exts); | 346 | tcf_exts_get_net(&f->exts); |
352 | call_rcu(&f->rcu, route4_delete_filter); | 347 | tcf_queue_work(&f->rwork, route4_delete_filter_work); |
353 | 348 | ||
354 | /* Strip RTNL protected tree */ | 349 | /* Strip RTNL protected tree */ |
355 | for (i = 0; i <= 32; i++) { | 350 | for (i = 0; i <= 32; i++) { |
@@ -554,7 +549,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, | |||
554 | if (fold) { | 549 | if (fold) { |
555 | tcf_unbind_filter(tp, &fold->res); | 550 | tcf_unbind_filter(tp, &fold->res); |
556 | tcf_exts_get_net(&fold->exts); | 551 | tcf_exts_get_net(&fold->exts); |
557 | call_rcu(&fold->rcu, route4_delete_filter); | 552 | tcf_queue_work(&fold->rwork, route4_delete_filter_work); |
558 | } | 553 | } |
559 | return 0; | 554 | return 0; |
560 | 555 | ||