diff options
author | Tejun Heo <tj@kernel.org> | 2009-06-21 22:56:24 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-06-21 22:56:24 -0400 |
commit | fa8a7094ba1679b4b9b443e0ac9f5e046c79ee8d (patch) | |
tree | cda9df47b1a84581685d8f4e0cd8ce66cac1d234 /mm/percpu.c | |
parent | e59a1bb2fdfb745c685f5b40ffbed126331d3223 (diff) |
x86: implement percpu_alloc kernel parameter
According to Andi, it isn't clear whether lpage allocator is worth the
trouble as there are many processors where PMD TLB is far scarcer than
PTE TLB. The advantage or disadvantage probably depends on the actual
size of percpu area and specific processor. As performance
degradation due to TLB pressure tends to be highly workload specific
and subtle, it is difficult to decide which way to go without more
data.
This patch implements percpu_alloc kernel parameter to allow selecting
which first chunk allocator to use to ease debugging and testing.
While at it, make sure all the failure paths report why something
failed to help determining why certain allocator isn't working. Also,
kill the "Great future plan" comment which had already been realized
quite some time ago.
[ Impact: allow explicit percpu first chunk allocator selection ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Jan Beulich <JBeulich@novell.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index d06f4748271e..b70f2acd8853 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1233,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | |||
1233 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | 1233 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, |
1234 | ssize_t dyn_size, ssize_t unit_size) | 1234 | ssize_t dyn_size, ssize_t unit_size) |
1235 | { | 1235 | { |
1236 | size_t chunk_size; | ||
1236 | unsigned int cpu; | 1237 | unsigned int cpu; |
1237 | 1238 | ||
1238 | /* determine parameters and allocate */ | 1239 | /* determine parameters and allocate */ |
@@ -1247,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1247 | } else | 1248 | } else |
1248 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); | 1249 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); |
1249 | 1250 | ||
1250 | pcpue_ptr = __alloc_bootmem_nopanic( | 1251 | chunk_size = pcpue_unit_size * num_possible_cpus(); |
1251 | num_possible_cpus() * pcpue_unit_size, | 1252 | |
1252 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 1253 | pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, |
1253 | if (!pcpue_ptr) | 1254 | __pa(MAX_DMA_ADDRESS)); |
1255 | if (!pcpue_ptr) { | ||
1256 | pr_warning("PERCPU: failed to allocate %zu bytes for " | ||
1257 | "embedding\n", chunk_size); | ||
1254 | return -ENOMEM; | 1258 | return -ENOMEM; |
1259 | } | ||
1255 | 1260 | ||
1256 | /* return the leftover and copy */ | 1261 | /* return the leftover and copy */ |
1257 | for_each_possible_cpu(cpu) { | 1262 | for_each_possible_cpu(cpu) { |