aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/flow.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/flow.c')
-rw-r--r--net/core/flow.c127
1 files changed, 57 insertions, 70 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index dfa602ceb8cd..344a184011fd 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -24,6 +24,7 @@
24#include <net/flow.h> 24#include <net/flow.h>
25#include <linux/atomic.h> 25#include <linux/atomic.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <net/net_namespace.h>
27 28
28struct flow_cache_entry { 29struct flow_cache_entry {
29 union { 30 union {
@@ -38,37 +39,12 @@ struct flow_cache_entry {
38 struct flow_cache_object *object; 39 struct flow_cache_object *object;
39}; 40};
40 41
41struct flow_cache_percpu {
42 struct hlist_head *hash_table;
43 int hash_count;
44 u32 hash_rnd;
45 int hash_rnd_recalc;
46 struct tasklet_struct flush_tasklet;
47};
48
49struct flow_flush_info { 42struct flow_flush_info {
50 struct flow_cache *cache; 43 struct flow_cache *cache;
51 atomic_t cpuleft; 44 atomic_t cpuleft;
52 struct completion completion; 45 struct completion completion;
53}; 46};
54 47
55struct flow_cache {
56 u32 hash_shift;
57 struct flow_cache_percpu __percpu *percpu;
58 struct notifier_block hotcpu_notifier;
59 int low_watermark;
60 int high_watermark;
61 struct timer_list rnd_timer;
62};
63
64atomic_t flow_cache_genid = ATOMIC_INIT(0);
65EXPORT_SYMBOL(flow_cache_genid);
66static struct flow_cache flow_cache_global;
67static struct kmem_cache *flow_cachep __read_mostly;
68
69static DEFINE_SPINLOCK(flow_cache_gc_lock);
70static LIST_HEAD(flow_cache_gc_list);
71
72#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) 48#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
73#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) 49#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
74 50
@@ -84,46 +60,50 @@ static void flow_cache_new_hashrnd(unsigned long arg)
84 add_timer(&fc->rnd_timer); 60 add_timer(&fc->rnd_timer);
85} 61}
86 62
87static int flow_entry_valid(struct flow_cache_entry *fle) 63static int flow_entry_valid(struct flow_cache_entry *fle,
64 struct netns_xfrm *xfrm)
88{ 65{
89 if (atomic_read(&flow_cache_genid) != fle->genid) 66 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
90 return 0; 67 return 0;
91 if (fle->object && !fle->object->ops->check(fle->object)) 68 if (fle->object && !fle->object->ops->check(fle->object))
92 return 0; 69 return 0;
93 return 1; 70 return 1;
94} 71}
95 72
96static void flow_entry_kill(struct flow_cache_entry *fle) 73static void flow_entry_kill(struct flow_cache_entry *fle,
74 struct netns_xfrm *xfrm)
97{ 75{
98 if (fle->object) 76 if (fle->object)
99 fle->object->ops->delete(fle->object); 77 fle->object->ops->delete(fle->object);
100 kmem_cache_free(flow_cachep, fle); 78 kmem_cache_free(xfrm->flow_cachep, fle);
101} 79}
102 80
103static void flow_cache_gc_task(struct work_struct *work) 81static void flow_cache_gc_task(struct work_struct *work)
104{ 82{
105 struct list_head gc_list; 83 struct list_head gc_list;
106 struct flow_cache_entry *fce, *n; 84 struct flow_cache_entry *fce, *n;
85 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
86 flow_cache_gc_work);
107 87
108 INIT_LIST_HEAD(&gc_list); 88 INIT_LIST_HEAD(&gc_list);
109 spin_lock_bh(&flow_cache_gc_lock); 89 spin_lock_bh(&xfrm->flow_cache_gc_lock);
110 list_splice_tail_init(&flow_cache_gc_list, &gc_list); 90 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
111 spin_unlock_bh(&flow_cache_gc_lock); 91 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
112 92
113 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) 93 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114 flow_entry_kill(fce); 94 flow_entry_kill(fce, xfrm);
115} 95}
116static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117 96
118static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, 97static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119 int deleted, struct list_head *gc_list) 98 int deleted, struct list_head *gc_list,
99 struct netns_xfrm *xfrm)
120{ 100{
121 if (deleted) { 101 if (deleted) {
122 fcp->hash_count -= deleted; 102 fcp->hash_count -= deleted;
123 spin_lock_bh(&flow_cache_gc_lock); 103 spin_lock_bh(&xfrm->flow_cache_gc_lock);
124 list_splice_tail(gc_list, &flow_cache_gc_list); 104 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
125 spin_unlock_bh(&flow_cache_gc_lock); 105 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
126 schedule_work(&flow_cache_gc_work); 106 schedule_work(&xfrm->flow_cache_gc_work);
127 } 107 }
128} 108}
129 109
@@ -135,6 +115,8 @@ static void __flow_cache_shrink(struct flow_cache *fc,
135 struct hlist_node *tmp; 115 struct hlist_node *tmp;
136 LIST_HEAD(gc_list); 116 LIST_HEAD(gc_list);
137 int i, deleted = 0; 117 int i, deleted = 0;
118 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
119 flow_cache_global);
138 120
139 for (i = 0; i < flow_cache_hash_size(fc); i++) { 121 for (i = 0; i < flow_cache_hash_size(fc); i++) {
140 int saved = 0; 122 int saved = 0;
@@ -142,7 +124,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
142 hlist_for_each_entry_safe(fle, tmp, 124 hlist_for_each_entry_safe(fle, tmp,
143 &fcp->hash_table[i], u.hlist) { 125 &fcp->hash_table[i], u.hlist) {
144 if (saved < shrink_to && 126 if (saved < shrink_to &&
145 flow_entry_valid(fle)) { 127 flow_entry_valid(fle, xfrm)) {
146 saved++; 128 saved++;
147 } else { 129 } else {
148 deleted++; 130 deleted++;
@@ -152,7 +134,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
152 } 134 }
153 } 135 }
154 136
155 flow_cache_queue_garbage(fcp, deleted, &gc_list); 137 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
156} 138}
157 139
158static void flow_cache_shrink(struct flow_cache *fc, 140static void flow_cache_shrink(struct flow_cache *fc,
@@ -208,7 +190,7 @@ struct flow_cache_object *
208flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, 190flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
209 flow_resolve_t resolver, void *ctx) 191 flow_resolve_t resolver, void *ctx)
210{ 192{
211 struct flow_cache *fc = &flow_cache_global; 193 struct flow_cache *fc = &net->xfrm.flow_cache_global;
212 struct flow_cache_percpu *fcp; 194 struct flow_cache_percpu *fcp;
213 struct flow_cache_entry *fle, *tfle; 195 struct flow_cache_entry *fle, *tfle;
214 struct flow_cache_object *flo; 196 struct flow_cache_object *flo;
@@ -248,7 +230,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
248 if (fcp->hash_count > fc->high_watermark) 230 if (fcp->hash_count > fc->high_watermark)
249 flow_cache_shrink(fc, fcp); 231 flow_cache_shrink(fc, fcp);
250 232
251 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 233 fle = kmem_cache_alloc(net->xfrm.flow_cachep, GFP_ATOMIC);
252 if (fle) { 234 if (fle) {
253 fle->net = net; 235 fle->net = net;
254 fle->family = family; 236 fle->family = family;
@@ -258,7 +240,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
258 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 240 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
259 fcp->hash_count++; 241 fcp->hash_count++;
260 } 242 }
261 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { 243 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
262 flo = fle->object; 244 flo = fle->object;
263 if (!flo) 245 if (!flo)
264 goto ret_object; 246 goto ret_object;
@@ -279,7 +261,7 @@ nocache:
279 } 261 }
280 flo = resolver(net, key, family, dir, flo, ctx); 262 flo = resolver(net, key, family, dir, flo, ctx);
281 if (fle) { 263 if (fle) {
282 fle->genid = atomic_read(&flow_cache_genid); 264 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
283 if (!IS_ERR(flo)) 265 if (!IS_ERR(flo))
284 fle->object = flo; 266 fle->object = flo;
285 else 267 else
@@ -303,12 +285,14 @@ static void flow_cache_flush_tasklet(unsigned long data)
303 struct hlist_node *tmp; 285 struct hlist_node *tmp;
304 LIST_HEAD(gc_list); 286 LIST_HEAD(gc_list);
305 int i, deleted = 0; 287 int i, deleted = 0;
288 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
289 flow_cache_global);
306 290
307 fcp = this_cpu_ptr(fc->percpu); 291 fcp = this_cpu_ptr(fc->percpu);
308 for (i = 0; i < flow_cache_hash_size(fc); i++) { 292 for (i = 0; i < flow_cache_hash_size(fc); i++) {
309 hlist_for_each_entry_safe(fle, tmp, 293 hlist_for_each_entry_safe(fle, tmp,
310 &fcp->hash_table[i], u.hlist) { 294 &fcp->hash_table[i], u.hlist) {
311 if (flow_entry_valid(fle)) 295 if (flow_entry_valid(fle, xfrm))
312 continue; 296 continue;
313 297
314 deleted++; 298 deleted++;
@@ -317,7 +301,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
317 } 301 }
318 } 302 }
319 303
320 flow_cache_queue_garbage(fcp, deleted, &gc_list); 304 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
321 305
322 if (atomic_dec_and_test(&info->cpuleft)) 306 if (atomic_dec_and_test(&info->cpuleft))
323 complete(&info->completion); 307 complete(&info->completion);
@@ -351,10 +335,9 @@ static void flow_cache_flush_per_cpu(void *data)
351 tasklet_schedule(tasklet); 335 tasklet_schedule(tasklet);
352} 336}
353 337
354void flow_cache_flush(void) 338void flow_cache_flush(struct net *net)
355{ 339{
356 struct flow_flush_info info; 340 struct flow_flush_info info;
357 static DEFINE_MUTEX(flow_flush_sem);
358 cpumask_var_t mask; 341 cpumask_var_t mask;
359 int i, self; 342 int i, self;
360 343
@@ -365,8 +348,8 @@ void flow_cache_flush(void)
365 348
366 /* Don't want cpus going down or up during this. */ 349 /* Don't want cpus going down or up during this. */
367 get_online_cpus(); 350 get_online_cpus();
368 mutex_lock(&flow_flush_sem); 351 mutex_lock(&net->xfrm.flow_flush_sem);
369 info.cache = &flow_cache_global; 352 info.cache = &net->xfrm.flow_cache_global;
370 for_each_online_cpu(i) 353 for_each_online_cpu(i)
371 if (!flow_cache_percpu_empty(info.cache, i)) 354 if (!flow_cache_percpu_empty(info.cache, i))
372 cpumask_set_cpu(i, mask); 355 cpumask_set_cpu(i, mask);
@@ -386,21 +369,23 @@ void flow_cache_flush(void)
386 wait_for_completion(&info.completion); 369 wait_for_completion(&info.completion);
387 370
388done: 371done:
389 mutex_unlock(&flow_flush_sem); 372 mutex_unlock(&net->xfrm.flow_flush_sem);
390 put_online_cpus(); 373 put_online_cpus();
391 free_cpumask_var(mask); 374 free_cpumask_var(mask);
392} 375}
393 376
394static void flow_cache_flush_task(struct work_struct *work) 377static void flow_cache_flush_task(struct work_struct *work)
395{ 378{
396 flow_cache_flush(); 379 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
397} 380 flow_cache_gc_work);
381 struct net *net = container_of(xfrm, struct net, xfrm);
398 382
399static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); 383 flow_cache_flush(net);
384}
400 385
401void flow_cache_flush_deferred(void) 386void flow_cache_flush_deferred(struct net *net)
402{ 387{
403 schedule_work(&flow_cache_flush_work); 388 schedule_work(&net->xfrm.flow_cache_flush_work);
404} 389}
405 390
406static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) 391static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
@@ -425,7 +410,8 @@ static int flow_cache_cpu(struct notifier_block *nfb,
425 unsigned long action, 410 unsigned long action,
426 void *hcpu) 411 void *hcpu)
427{ 412{
428 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); 413 struct flow_cache *fc = container_of(nfb, struct flow_cache,
414 hotcpu_notifier);
429 int res, cpu = (unsigned long) hcpu; 415 int res, cpu = (unsigned long) hcpu;
430 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); 416 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
431 417
@@ -444,9 +430,20 @@ static int flow_cache_cpu(struct notifier_block *nfb,
444 return NOTIFY_OK; 430 return NOTIFY_OK;
445} 431}
446 432
447static int __init flow_cache_init(struct flow_cache *fc) 433int flow_cache_init(struct net *net)
448{ 434{
449 int i; 435 int i;
436 struct flow_cache *fc = &net->xfrm.flow_cache_global;
437
438 /* Initialize per-net flow cache global variables here */
439 net->xfrm.flow_cachep = kmem_cache_create("flow_cache",
440 sizeof(struct flow_cache_entry),
441 0, SLAB_PANIC, NULL);
442 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
443 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
444 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
445 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
446 mutex_init(&net->xfrm.flow_flush_sem);
450 447
451 fc->hash_shift = 10; 448 fc->hash_shift = 10;
452 fc->low_watermark = 2 * flow_cache_hash_size(fc); 449 fc->low_watermark = 2 * flow_cache_hash_size(fc);
@@ -484,14 +481,4 @@ err:
484 481
485 return -ENOMEM; 482 return -ENOMEM;
486} 483}
487 484EXPORT_SYMBOL(flow_cache_init);
488static int __init flow_cache_init_global(void)
489{
490 flow_cachep = kmem_cache_create("flow_cache",
491 sizeof(struct flow_cache_entry),
492 0, SLAB_PANIC, NULL);
493
494 return flow_cache_init(&flow_cache_global);
495}
496
497module_init(flow_cache_init_global);