diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-09-10 03:00:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-13 23:02:50 -0400 |
commit | 83b6b1f5d13414d0cb5c4f0a567a6aec0af073bd (patch) | |
tree | aa3db3ccfefbb232c0acc01fac52873d7ed6d048 /net/core/flow.c | |
parent | efbc2d7cfa67a9753cd45eb7eee8c9ad10c8b74c (diff) |
flow: better memory management
Allocate hash tables for every online cpus, not every possible ones.
NUMA aware allocations.
Dont use a full page on arches where PAGE_SIZE > 1024*sizeof(void *)
misc:
__percpu , __read_mostly, __cpuinit annotations
flow_compare_t is just an "unsigned long"
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/flow.c')
-rw-r--r-- | net/core/flow.c | 78 |
1 files changed, 42 insertions, 36 deletions
diff --git a/net/core/flow.c b/net/core/flow.c index f67dcbfe54e..b143b86b1f2 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -53,8 +53,7 @@ struct flow_flush_info { | |||
53 | 53 | ||
54 | struct flow_cache { | 54 | struct flow_cache { |
55 | u32 hash_shift; | 55 | u32 hash_shift; |
56 | unsigned long order; | 56 | struct flow_cache_percpu __percpu *percpu; |
57 | struct flow_cache_percpu *percpu; | ||
58 | struct notifier_block hotcpu_notifier; | 57 | struct notifier_block hotcpu_notifier; |
59 | int low_watermark; | 58 | int low_watermark; |
60 | int high_watermark; | 59 | int high_watermark; |
@@ -64,7 +63,7 @@ struct flow_cache { | |||
64 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | 63 | atomic_t flow_cache_genid = ATOMIC_INIT(0); |
65 | EXPORT_SYMBOL(flow_cache_genid); | 64 | EXPORT_SYMBOL(flow_cache_genid); |
66 | static struct flow_cache flow_cache_global; | 65 | static struct flow_cache flow_cache_global; |
67 | static struct kmem_cache *flow_cachep; | 66 | static struct kmem_cache *flow_cachep __read_mostly; |
68 | 67 | ||
69 | static DEFINE_SPINLOCK(flow_cache_gc_lock); | 68 | static DEFINE_SPINLOCK(flow_cache_gc_lock); |
70 | static LIST_HEAD(flow_cache_gc_list); | 69 | static LIST_HEAD(flow_cache_gc_list); |
@@ -181,11 +180,7 @@ static u32 flow_hash_code(struct flow_cache *fc, | |||
181 | & (flow_cache_hash_size(fc) - 1)); | 180 | & (flow_cache_hash_size(fc) - 1)); |
182 | } | 181 | } |
183 | 182 | ||
184 | #if (BITS_PER_LONG == 64) | 183 | typedef unsigned long flow_compare_t; |
185 | typedef u64 flow_compare_t; | ||
186 | #else | ||
187 | typedef u32 flow_compare_t; | ||
188 | #endif | ||
189 | 184 | ||
190 | /* I hear what you're saying, use memcmp. But memcmp cannot make | 185 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
191 | * important assumptions that we can here, such as alignment and | 186 | * important assumptions that we can here, such as alignment and |
@@ -357,62 +352,73 @@ void flow_cache_flush(void) | |||
357 | put_online_cpus(); | 352 | put_online_cpus(); |
358 | } | 353 | } |
359 | 354 | ||
360 | static void __init flow_cache_cpu_prepare(struct flow_cache *fc, | 355 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) |
361 | struct flow_cache_percpu *fcp) | ||
362 | { | 356 | { |
363 | fcp->hash_table = (struct hlist_head *) | 357 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
364 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); | 358 | size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); |
365 | if (!fcp->hash_table) | ||
366 | panic("NET: failed to allocate flow cache order %lu\n", fc->order); | ||
367 | 359 | ||
368 | fcp->hash_rnd_recalc = 1; | 360 | if (!fcp->hash_table) { |
369 | fcp->hash_count = 0; | 361 | fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); |
370 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); | 362 | if (!fcp->hash_table) { |
363 | pr_err("NET: failed to allocate flow cache sz %zu\n", sz); | ||
364 | return -ENOMEM; | ||
365 | } | ||
366 | fcp->hash_rnd_recalc = 1; | ||
367 | fcp->hash_count = 0; | ||
368 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); | ||
369 | } | ||
370 | return 0; | ||
371 | } | 371 | } |
372 | 372 | ||
373 | static int flow_cache_cpu(struct notifier_block *nfb, | 373 | static int __cpuinit flow_cache_cpu(struct notifier_block *nfb, |
374 | unsigned long action, | 374 | unsigned long action, |
375 | void *hcpu) | 375 | void *hcpu) |
376 | { | 376 | { |
377 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); | 377 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); |
378 | int cpu = (unsigned long) hcpu; | 378 | int res, cpu = (unsigned long) hcpu; |
379 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | 379 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
380 | 380 | ||
381 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 381 | switch (action) { |
382 | case CPU_UP_PREPARE: | ||
383 | case CPU_UP_PREPARE_FROZEN: | ||
384 | res = flow_cache_cpu_prepare(fc, cpu); | ||
385 | if (res) | ||
386 | return notifier_from_errno(res); | ||
387 | break; | ||
388 | case CPU_DEAD: | ||
389 | case CPU_DEAD_FROZEN: | ||
382 | __flow_cache_shrink(fc, fcp, 0); | 390 | __flow_cache_shrink(fc, fcp, 0); |
391 | break; | ||
392 | } | ||
383 | return NOTIFY_OK; | 393 | return NOTIFY_OK; |
384 | } | 394 | } |
385 | 395 | ||
386 | static int flow_cache_init(struct flow_cache *fc) | 396 | static int __init flow_cache_init(struct flow_cache *fc) |
387 | { | 397 | { |
388 | unsigned long order; | ||
389 | int i; | 398 | int i; |
390 | 399 | ||
391 | fc->hash_shift = 10; | 400 | fc->hash_shift = 10; |
392 | fc->low_watermark = 2 * flow_cache_hash_size(fc); | 401 | fc->low_watermark = 2 * flow_cache_hash_size(fc); |
393 | fc->high_watermark = 4 * flow_cache_hash_size(fc); | 402 | fc->high_watermark = 4 * flow_cache_hash_size(fc); |
394 | 403 | ||
395 | for (order = 0; | ||
396 | (PAGE_SIZE << order) < | ||
397 | (sizeof(struct hlist_head)*flow_cache_hash_size(fc)); | ||
398 | order++) | ||
399 | /* NOTHING */; | ||
400 | fc->order = order; | ||
401 | fc->percpu = alloc_percpu(struct flow_cache_percpu); | 404 | fc->percpu = alloc_percpu(struct flow_cache_percpu); |
405 | if (!fc->percpu) | ||
406 | return -ENOMEM; | ||
402 | 407 | ||
403 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, | 408 | for_each_online_cpu(i) { |
404 | (unsigned long) fc); | 409 | if (flow_cache_cpu_prepare(fc, i)) |
405 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 410 | return -ENOMEM; |
406 | add_timer(&fc->rnd_timer); | 411 | } |
407 | |||
408 | for_each_possible_cpu(i) | ||
409 | flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i)); | ||
410 | |||
411 | fc->hotcpu_notifier = (struct notifier_block){ | 412 | fc->hotcpu_notifier = (struct notifier_block){ |
412 | .notifier_call = flow_cache_cpu, | 413 | .notifier_call = flow_cache_cpu, |
413 | }; | 414 | }; |
414 | register_hotcpu_notifier(&fc->hotcpu_notifier); | 415 | register_hotcpu_notifier(&fc->hotcpu_notifier); |
415 | 416 | ||
417 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, | ||
418 | (unsigned long) fc); | ||
419 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | ||
420 | add_timer(&fc->rnd_timer); | ||
421 | |||
416 | return 0; | 422 | return 0; |
417 | } | 423 | } |
418 | 424 | ||