diff options
Diffstat (limited to 'net/sched/cls_api.c')
-rw-r--r-- | net/sched/cls_api.c | 69 |
1 files changed, 52 insertions, 17 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0b2219adf520..231181c602ed 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -77,6 +77,8 @@ out: | |||
77 | } | 77 | } |
78 | EXPORT_SYMBOL(register_tcf_proto_ops); | 78 | EXPORT_SYMBOL(register_tcf_proto_ops); |
79 | 79 | ||
80 | static struct workqueue_struct *tc_filter_wq; | ||
81 | |||
80 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | 82 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) |
81 | { | 83 | { |
82 | struct tcf_proto_ops *t; | 84 | struct tcf_proto_ops *t; |
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | |||
86 | * tcf_proto_ops's destroy() handler. | 88 | * tcf_proto_ops's destroy() handler. |
87 | */ | 89 | */ |
88 | rcu_barrier(); | 90 | rcu_barrier(); |
91 | flush_workqueue(tc_filter_wq); | ||
89 | 92 | ||
90 | write_lock(&cls_mod_lock); | 93 | write_lock(&cls_mod_lock); |
91 | list_for_each_entry(t, &tcf_proto_base, head) { | 94 | list_for_each_entry(t, &tcf_proto_base, head) { |
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | |||
100 | } | 103 | } |
101 | EXPORT_SYMBOL(unregister_tcf_proto_ops); | 104 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
102 | 105 | ||
106 | bool tcf_queue_work(struct work_struct *work) | ||
107 | { | ||
108 | return queue_work(tc_filter_wq, work); | ||
109 | } | ||
110 | EXPORT_SYMBOL(tcf_queue_work); | ||
111 | |||
103 | /* Select new prio value from the range, managed by kernel. */ | 112 | /* Select new prio value from the range, managed by kernel. */ |
104 | 113 | ||
105 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) | 114 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) |
@@ -266,23 +275,30 @@ err_chain_create: | |||
266 | } | 275 | } |
267 | EXPORT_SYMBOL(tcf_block_get); | 276 | EXPORT_SYMBOL(tcf_block_get); |
268 | 277 | ||
269 | void tcf_block_put(struct tcf_block *block) | 278 | static void tcf_block_put_final(struct work_struct *work) |
270 | { | 279 | { |
280 | struct tcf_block *block = container_of(work, struct tcf_block, work); | ||
271 | struct tcf_chain *chain, *tmp; | 281 | struct tcf_chain *chain, *tmp; |
272 | 282 | ||
273 | if (!block) | 283 | /* At this point, all the chains should have refcnt == 1. */ |
274 | return; | 284 | rtnl_lock(); |
275 | 285 | list_for_each_entry_safe(chain, tmp, &block->chain_list, list) | |
276 | /* XXX: Standalone actions are not allowed to jump to any chain, and | 286 | tcf_chain_put(chain); |
277 | * bound actions should be all removed after flushing. However, | 287 | rtnl_unlock(); |
278 | * filters are destroyed in RCU callbacks, we have to hold the chains | 288 | kfree(block); |
279 | * first, otherwise we would always race with RCU callbacks on this list | 289 | } |
280 | * without proper locking. | ||
281 | */ | ||
282 | 290 | ||
283 | /* Wait for existing RCU callbacks to cool down. */ | 291 | /* XXX: Standalone actions are not allowed to jump to any chain, and bound |
284 | rcu_barrier(); | 292 | * actions should be all removed after flushing. However, filters are destroyed |
293 | * in RCU callbacks, we have to hold the chains first, otherwise we would | ||
294 | * always race with RCU callbacks on this list without proper locking. | ||
295 | */ | ||
296 | static void tcf_block_put_deferred(struct work_struct *work) | ||
297 | { | ||
298 | struct tcf_block *block = container_of(work, struct tcf_block, work); | ||
299 | struct tcf_chain *chain; | ||
285 | 300 | ||
301 | rtnl_lock(); | ||
286 | /* Hold a refcnt for all chains, except 0, in case they are gone. */ | 302 | /* Hold a refcnt for all chains, except 0, in case they are gone. */ |
287 | list_for_each_entry(chain, &block->chain_list, list) | 303 | list_for_each_entry(chain, &block->chain_list, list) |
288 | if (chain->index) | 304 | if (chain->index) |
@@ -292,13 +308,27 @@ void tcf_block_put(struct tcf_block *block) | |||
292 | list_for_each_entry(chain, &block->chain_list, list) | 308 | list_for_each_entry(chain, &block->chain_list, list) |
293 | tcf_chain_flush(chain); | 309 | tcf_chain_flush(chain); |
294 | 310 | ||
295 | /* Wait for RCU callbacks to release the reference count. */ | 311 | INIT_WORK(&block->work, tcf_block_put_final); |
312 | /* Wait for RCU callbacks to release the reference count and make | ||
313 | * sure their works have been queued before this. | ||
314 | */ | ||
296 | rcu_barrier(); | 315 | rcu_barrier(); |
316 | tcf_queue_work(&block->work); | ||
317 | rtnl_unlock(); | ||
318 | } | ||
297 | 319 | ||
298 | /* At this point, all the chains should have refcnt == 1. */ | 320 | void tcf_block_put(struct tcf_block *block) |
299 | list_for_each_entry_safe(chain, tmp, &block->chain_list, list) | 321 | { |
300 | tcf_chain_put(chain); | 322 | if (!block) |
301 | kfree(block); | 323 | return; |
324 | |||
325 | INIT_WORK(&block->work, tcf_block_put_deferred); | ||
326 | /* Wait for existing RCU callbacks to cool down, make sure their works | ||
327 | * have been queued before this. We can not flush pending works here | ||
328 | * because we are holding the RTNL lock. | ||
329 | */ | ||
330 | rcu_barrier(); | ||
331 | tcf_queue_work(&block->work); | ||
302 | } | 332 | } |
303 | EXPORT_SYMBOL(tcf_block_put); | 333 | EXPORT_SYMBOL(tcf_block_put); |
304 | 334 | ||
@@ -879,6 +909,7 @@ void tcf_exts_destroy(struct tcf_exts *exts) | |||
879 | #ifdef CONFIG_NET_CLS_ACT | 909 | #ifdef CONFIG_NET_CLS_ACT |
880 | LIST_HEAD(actions); | 910 | LIST_HEAD(actions); |
881 | 911 | ||
912 | ASSERT_RTNL(); | ||
882 | tcf_exts_to_list(exts, &actions); | 913 | tcf_exts_to_list(exts, &actions); |
883 | tcf_action_destroy(&actions, TCA_ACT_UNBIND); | 914 | tcf_action_destroy(&actions, TCA_ACT_UNBIND); |
884 | kfree(exts->actions); | 915 | kfree(exts->actions); |
@@ -1030,6 +1061,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev); | |||
1030 | 1061 | ||
1031 | static int __init tc_filter_init(void) | 1062 | static int __init tc_filter_init(void) |
1032 | { | 1063 | { |
1064 | tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); | ||
1065 | if (!tc_filter_wq) | ||
1066 | return -ENOMEM; | ||
1067 | |||
1033 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); | 1068 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); |
1034 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); | 1069 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); |
1035 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, | 1070 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, |