aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-03-06 00:33:59 -0500
committerTejun Heo <tj@kernel.org>2009-03-06 00:33:59 -0500
commit9a4f8a878b68d5a5d9ee60908a52cf6a55e1b823 (patch)
treebe7075427a064709bf1a3d3184c7cc3dc147d6e8 /arch/x86/kernel/setup_percpu.c
parentcafe8816b217b98dc3f268d3b77445da498beb4f (diff)
x86: make embedding percpu allocator return excessive free space
Impact: reduce unnecessary memory usage on certain configurations Embedding percpu allocator allocates unit_size * smp_num_possible_cpus() bytes consecutively and use it for the first chunk. However, if the static area is small, this can result in excessive prellocated free space in the first chunk due to PCPU_MIN_UNIT_SIZE restriction. This patch makes embedding percpu allocator preallocate only what's necessary as described by PERPCU_DYNAMIC_RESERVE and return the leftover to the bootmem allocator. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef3a2cd3fe64..38e2b2a470a5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -241,24 +241,31 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
241 * Embedding allocator 241 * Embedding allocator
242 * 242 *
243 * The first chunk is sized to just contain the static area plus 243 * The first chunk is sized to just contain the static area plus
244 * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using 244 * module and dynamic reserves, and allocated as a contiguous area
245 * bootmem allocator and used as-is without being mapped into vmalloc 245 * using bootmem allocator and used as-is without being mapped into
246 * area. This enables the first chunk to piggy back on the linear 246 * vmalloc area. This enables the first chunk to piggy back on the
247 * physical PMD mapping and doesn't add any additional pressure to 247 * linear physical PMD mapping and doesn't add any additional pressure
248 * TLB. 248 * to TLB. Note that if the needed size is smaller than the minimum
249 * unit size, the leftover is returned to the bootmem allocator.
249 */ 250 */
250static void *pcpue_ptr __initdata; 251static void *pcpue_ptr __initdata;
252static size_t pcpue_size __initdata;
251static size_t pcpue_unit_size __initdata; 253static size_t pcpue_unit_size __initdata;
252 254
253static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) 255static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
254{ 256{
255 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size 257 size_t off = (size_t)pageno << PAGE_SHIFT;
256 + ((size_t)pageno << PAGE_SHIFT)); 258
259 if (off >= pcpue_size)
260 return NULL;
261
262 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
257} 263}
258 264
259static ssize_t __init setup_pcpu_embed(size_t static_size) 265static ssize_t __init setup_pcpu_embed(size_t static_size)
260{ 266{
261 unsigned int cpu; 267 unsigned int cpu;
268 size_t dyn_size;
262 269
263 /* 270 /*
264 * If large page isn't supported, there's no benefit in doing 271 * If large page isn't supported, there's no benefit in doing
@@ -269,25 +276,30 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
269 return -EINVAL; 276 return -EINVAL;
270 277
271 /* allocate and copy */ 278 /* allocate and copy */
272 pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); 279 pcpue_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
273 pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); 280 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
281 dyn_size = pcpue_size - static_size;
282
274 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, 283 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
275 PAGE_SIZE); 284 PAGE_SIZE);
276 if (!pcpue_ptr) 285 if (!pcpue_ptr)
277 return -ENOMEM; 286 return -ENOMEM;
278 287
279 for_each_possible_cpu(cpu) 288 for_each_possible_cpu(cpu) {
280 memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, 289 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
281 static_size); 290
291 free_bootmem(__pa(ptr + pcpue_size),
292 pcpue_unit_size - pcpue_size);
293 memcpy(ptr, __per_cpu_load, static_size);
294 }
282 295
283 /* we're ready, commit */ 296 /* we're ready, commit */
284 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", 297 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
285 pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); 298 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
286 299
287 return pcpu_setup_first_chunk(pcpue_get_page, static_size, 300 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
288 pcpue_unit_size, 301 pcpue_unit_size, dyn_size,
289 pcpue_unit_size - static_size, pcpue_ptr, 302 pcpue_ptr, NULL);
290 NULL);
291} 303}
292 304
293/* 305/*