diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-03-04 17:28:35 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-04 19:35:11 -0500 |
commit | be852795e1c8d3829ddf3cb1ce806113611fa555 (patch) | |
tree | 3671dbd5ba64f11650f93fdd9178666bcbb84f7e /mm | |
parent | e3892296de632e3f9299d9fabe0c746740004891 (diff) |
alloc_percpu() fails to allocate percpu data
Some oprofile results obtained while using tbench on a 2x2 cpu machine were
very surprising.
For example, loopback_xmit() function was using high number of cpu cycles
to perform the statistic updates, supposed to be real cheap since they use
percpu data
pcpu_lstats = netdev_priv(dev);
lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
lb_stats->packets++; /* HERE : serious contention */
lb_stats->bytes += skb->len;
struct pcpu_lstats is a small structure containing two longs. It appears
that on my 32bits platform, alloc_percpu(8) allocates a single cache line,
instead of giving to each cpu a separate cache line.
Using the following patch gave me impressive boost in various benchmarks
( 6 % in tbench)
(all percpu_counters hit this bug too)
Long term fix (ie >= 2.6.26) would be to let each CPU allocate their own
block of memory, so that we dont need to roudup sizes to L1_CACHE_BYTES, or
merging the SGI stuff of course...
Note : SLUB vs SLAB is important here to *show* the improvement, since they
dont have the same minimum allocation sizes (8 bytes vs 32 bytes). This
could very well explain regressions some guys reported when they switched
to SLUB.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/allocpercpu.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 7e58322b7134..b0012e27fea8 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
@@ -6,6 +6,10 @@ | |||
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | 8 | ||
9 | #ifndef cache_line_size | ||
10 | #define cache_line_size() L1_CACHE_BYTES | ||
11 | #endif | ||
12 | |||
9 | /** | 13 | /** |
10 | * percpu_depopulate - depopulate per-cpu data for given cpu | 14 | * percpu_depopulate - depopulate per-cpu data for given cpu |
11 | * @__pdata: per-cpu data to depopulate | 15 | * @__pdata: per-cpu data to depopulate |
@@ -52,6 +56,11 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) | |||
52 | struct percpu_data *pdata = __percpu_disguise(__pdata); | 56 | struct percpu_data *pdata = __percpu_disguise(__pdata); |
53 | int node = cpu_to_node(cpu); | 57 | int node = cpu_to_node(cpu); |
54 | 58 | ||
59 | /* | ||
60 | * We should make sure each CPU gets private memory. | ||
61 | */ | ||
62 | size = roundup(size, cache_line_size()); | ||
63 | |||
55 | BUG_ON(pdata->ptrs[cpu]); | 64 | BUG_ON(pdata->ptrs[cpu]); |
56 | if (node_online(node)) | 65 | if (node_online(node)) |
57 | pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); | 66 | pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); |
@@ -98,7 +107,11 @@ EXPORT_SYMBOL_GPL(__percpu_populate_mask); | |||
98 | */ | 107 | */ |
99 | void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 108 | void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) |
100 | { | 109 | { |
101 | void *pdata = kzalloc(nr_cpu_ids * sizeof(void *), gfp); | 110 | /* |
111 | * We allocate whole cache lines to avoid false sharing | ||
112 | */ | ||
113 | size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); | ||
114 | void *pdata = kzalloc(sz, gfp); | ||
102 | void *__pdata = __percpu_disguise(pdata); | 115 | void *__pdata = __percpu_disguise(pdata); |
103 | 116 | ||
104 | if (unlikely(!pdata)) | 117 | if (unlikely(!pdata)) |