aboutsummaryrefslogtreecommitdiffstats
path: root/mm/bootmem.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-03-01 02:06:56 -0500
committerTejun Heo <tj@kernel.org>2009-03-01 02:06:56 -0500
commitd0c4f570276cb4d2dc4215b90eb7cb6e2bdd4a15 (patch)
tree5cf4f85082ed720df7d815299782e25e44d75c9d /mm/bootmem.c
parentaf6326d72c95d6e0bbc88c92185c654f57acef3b (diff)
bootmem, x86: further fixes for arch-specific bootmem wrapping
Impact: fix new breakages introduced by previous fix Commit c132937556f56ee4b831ef4b23f1846e05fde102 tried to clean up bootmem arch wrapper but it wasn't quite correct. Before the commit, the followings were broken. * Low level interface functions prefixed with __ ignored arch preference. * reserve_bootmem(...) can't be mapped into reserve_bootmem_node(NODE_DATA(0)->bdata, ...) because the node is not preference here. The region specified MUST fall into the specified region; otherwise, it will panic. After the commit, * If allocation fails for the arch preferred node, it should fallback to whatever is available. Instead, it simply failed allocation. There are too many internal details to allow generic wrapping and still keep things simple for archs. Plus, all that arch wants is a way to prefer certain node over another. This patch drops the generic wrapping around alloc_bootmem_core() and add alloc_bootmem_core() instead. If necessary, arch can define bootmem_arch_referred_node() macro or function which takes all allocation information and returns the preferred node. bootmem generic code will always try the preferred node first and then fallback to other nodes as usual. Breakages noted and changes reviewed by Johannes Weiner. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Diffstat (limited to 'mm/bootmem.c')
-rw-r--r--mm/bootmem.c45
1 files changed, 30 insertions, 15 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d7140c008ba8..daf92713f7de 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -37,16 +37,6 @@ static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
37 37
38static int bootmem_debug; 38static int bootmem_debug;
39 39
40/*
41 * If an arch needs to apply workarounds to bootmem allocation, it can
42 * set CONFIG_HAVE_ARCH_BOOTMEM and define a wrapper around
43 * __alloc_bootmem_core().
44 */
45#ifndef CONFIG_HAVE_ARCH_BOOTMEM
46#define alloc_bootmem_core(bdata, size, align, goal, limit) \
47 __alloc_bootmem_core((bdata), (size), (align), (goal), (limit))
48#endif
49
50static int __init bootmem_debug_setup(char *buf) 40static int __init bootmem_debug_setup(char *buf)
51{ 41{
52 bootmem_debug = 1; 42 bootmem_debug = 1;
@@ -436,9 +426,9 @@ static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
436 return ALIGN(base + off, align) - base; 426 return ALIGN(base + off, align) - base;
437} 427}
438 428
439static void * __init __alloc_bootmem_core(struct bootmem_data *bdata, 429static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
440 unsigned long size, unsigned long align, 430 unsigned long size, unsigned long align,
441 unsigned long goal, unsigned long limit) 431 unsigned long goal, unsigned long limit)
442{ 432{
443 unsigned long fallback = 0; 433 unsigned long fallback = 0;
444 unsigned long min, max, start, sidx, midx, step; 434 unsigned long min, max, start, sidx, midx, step;
@@ -538,17 +528,34 @@ find_block:
538 return NULL; 528 return NULL;
539} 529}
540 530
531static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
532 unsigned long size, unsigned long align,
533 unsigned long goal, unsigned long limit)
534{
535#ifdef CONFIG_HAVE_ARCH_BOOTMEM
536 bootmem_data_t *p_bdata;
537
538 p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
539 if (p_bdata)
540 return alloc_bootmem_core(p_bdata, size, align, goal, limit);
541#endif
542 return NULL;
543}
544
541static void * __init ___alloc_bootmem_nopanic(unsigned long size, 545static void * __init ___alloc_bootmem_nopanic(unsigned long size,
542 unsigned long align, 546 unsigned long align,
543 unsigned long goal, 547 unsigned long goal,
544 unsigned long limit) 548 unsigned long limit)
545{ 549{
546 bootmem_data_t *bdata; 550 bootmem_data_t *bdata;
551 void *region;
547 552
548restart: 553restart:
549 list_for_each_entry(bdata, &bdata_list, list) { 554 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
550 void *region; 555 if (region)
556 return region;
551 557
558 list_for_each_entry(bdata, &bdata_list, list) {
552 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 559 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
553 continue; 560 continue;
554 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 561 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
@@ -626,6 +633,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
626{ 633{
627 void *ptr; 634 void *ptr;
628 635
636 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
637 if (ptr)
638 return ptr;
639
629 ptr = alloc_bootmem_core(bdata, size, align, goal, limit); 640 ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
630 if (ptr) 641 if (ptr)
631 return ptr; 642 return ptr;
@@ -682,6 +693,10 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
682{ 693{
683 void *ptr; 694 void *ptr;
684 695
696 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
697 if (ptr)
698 return ptr;
699
685 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 700 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
686 if (ptr) 701 if (ptr)
687 return ptr; 702 return ptr;