aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-07-24 00:36:50 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2014-07-24 07:07:25 -0400
commit7bd8490eef9776ced7632345df5133384b6be0fe (patch)
treecb5130cc165142998ac5729e8de9b26c622bb7e1 /net
parent5b96af7713546fca812682fed13cfad26d69fed7 (diff)
netfilter: xt_hashlimit: perform garbage collection from process context
xt_hashlimit cannot be used with large hash tables, because garbage collector is run from a timer. If table is really big, its possible to hold cpu for more than 500 msec, which is unacceptable. Switch to a work queue, and use proper scheduling points to remove latencies spikes. Later, we also could switch to a smoother garbage collection done at lookup time, one bucket at a time... Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Florian Westphal <fw@strlen.de> Cc: Patrick McHardy <kaber@trash.net> Reviewed-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/xt_hashlimit.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index a3910fc2122b..47dc6836830a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
104 spinlock_t lock; /* lock for list_head */ 104 spinlock_t lock; /* lock for list_head */
105 u_int32_t rnd; /* random seed for hash */ 105 u_int32_t rnd; /* random seed for hash */
106 unsigned int count; /* number entries in table */ 106 unsigned int count; /* number entries in table */
107 struct timer_list timer; /* timer for gc */ 107 struct delayed_work gc_work;
108 108
109 /* seq_file stuff */ 109 /* seq_file stuff */
110 struct proc_dir_entry *pde; 110 struct proc_dir_entry *pde;
@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
213 call_rcu_bh(&ent->rcu, dsthash_free_rcu); 213 call_rcu_bh(&ent->rcu, dsthash_free_rcu);
214 ht->count--; 214 ht->count--;
215} 215}
216static void htable_gc(unsigned long htlong); 216static void htable_gc(struct work_struct *work);
217 217
218static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, 218static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
219 u_int8_t family) 219 u_int8_t family)
@@ -273,9 +273,9 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
273 } 273 }
274 hinfo->net = net; 274 hinfo->net = net;
275 275
276 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); 276 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
277 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 277 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
278 add_timer(&hinfo->timer); 278 msecs_to_jiffies(hinfo->cfg.gc_interval));
279 279
280 hlist_add_head(&hinfo->node, &hashlimit_net->htables); 280 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
281 281
@@ -300,29 +300,30 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
300{ 300{
301 unsigned int i; 301 unsigned int i;
302 302
303 /* lock hash table and iterate over it */
304 spin_lock_bh(&ht->lock);
305 for (i = 0; i < ht->cfg.size; i++) { 303 for (i = 0; i < ht->cfg.size; i++) {
306 struct dsthash_ent *dh; 304 struct dsthash_ent *dh;
307 struct hlist_node *n; 305 struct hlist_node *n;
306
307 spin_lock_bh(&ht->lock);
308 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { 308 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
309 if ((*select)(ht, dh)) 309 if ((*select)(ht, dh))
310 dsthash_free(ht, dh); 310 dsthash_free(ht, dh);
311 } 311 }
312 spin_unlock_bh(&ht->lock);
313 cond_resched();
312 } 314 }
313 spin_unlock_bh(&ht->lock);
314} 315}
315 316
316/* hash table garbage collector, run by timer */ 317static void htable_gc(struct work_struct *work)
317static void htable_gc(unsigned long htlong)
318{ 318{
319 struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong; 319 struct xt_hashlimit_htable *ht;
320
321 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
320 322
321 htable_selective_cleanup(ht, select_gc); 323 htable_selective_cleanup(ht, select_gc);
322 324
323 /* re-add the timer accordingly */ 325 queue_delayed_work(system_power_efficient_wq,
324 ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval); 326 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
325 add_timer(&ht->timer);
326} 327}
327 328
328static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) 329static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
341 342
342static void htable_destroy(struct xt_hashlimit_htable *hinfo) 343static void htable_destroy(struct xt_hashlimit_htable *hinfo)
343{ 344{
344 del_timer_sync(&hinfo->timer); 345 cancel_delayed_work_sync(&hinfo->gc_work);
345 htable_remove_proc_entry(hinfo); 346 htable_remove_proc_entry(hinfo);
346 htable_selective_cleanup(hinfo, select_all); 347 htable_selective_cleanup(hinfo, select_all);
347 kfree(hinfo->name); 348 kfree(hinfo->name);