aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
committerTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
commit0017c869ddcb73069905d09f9e98e68627466237 (patch)
tree380248938561c53ff174ffba72f22cbc3e8a5438 /arch/x86
parentfa8a7094ba1679b4b9b443e0ac9f5e046c79ee8d (diff)
x86: ensure percpu lpage doesn't consume too much vmalloc space
On extreme configuration (e.g. 32bit 32-way NUMA machine), lpage percpu first chunk allocator can consume too much of vmalloc space. Make it fall back to 4k allocator if the consumption goes over 20%. [ Impact: add sanity check for lpage percpu first chunk allocator ] Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Jan Beulich <JBeulich@novell.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/setup_percpu.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 165ebd5ba83b..29a3eef7cf4a 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -163,9 +163,21 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
163 int i, j; 163 int i, j;
164 ssize_t ret; 164 ssize_t ret;
165 165
166 /* on non-NUMA, embedding is better */ 166 if (!chosen) {
167 if (!chosen && !pcpu_need_numa()) 167 size_t vm_size = VMALLOC_END - VMALLOC_START;
168 return -EINVAL; 168 size_t tot_size = num_possible_cpus() * PMD_SIZE;
169
170 /* on non-NUMA, embedding is better */
171 if (!pcpu_need_numa())
172 return -EINVAL;
173
174 /* don't consume more than 20% of vmalloc area */
175 if (tot_size > vm_size / 5) {
176 pr_info("PERCPU: too large chunk size %zuMB for "
177 "large page remap\n", tot_size >> 20);
178 return -EINVAL;
179 }
180 }
169 181
170 /* need PSE */ 182 /* need PSE */
171 if (!cpu_has_pse) { 183 if (!cpu_has_pse) {