aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-01-13 13:14:12 -0500
committerIngo Molnar <mingo@elte.hu>2012-01-16 02:38:06 -0500
commit5d53cb27d849c899136c048ec84c940ac449494b (patch)
tree66c78c014a06641245fa3cd219afabdc7afdf427 /mm
parentedf7c8148ec40c0fd27c0ef3f688defcc65e3913 (diff)
memblock: Fix alloc failure due to dumb underflow protection in memblock_find_in_range_node()
7bd0b0f0da ("memblock: Reimplement memblock allocation using reverse free area iterator") implemented a simple top-down allocator using a reverse memblock iterator. To avoid underflow in the allocator loop, it simply raised the lower boundary to the requested size under the assumption that requested size would be far smaller than available memblocks. This causes early page table allocation failure under certain configurations in Xen. Fix it by checking for underflow directly instead of bumping up lower bound. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: rjw@sisk.pl Cc: xen-devel@lists.xensource.com Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20120113181412.GA11112@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 2f55f19b7c8..77b5f227e1d 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -106,14 +106,17 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
106 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 106 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
107 end = memblock.current_limit; 107 end = memblock.current_limit;
108 108
109 /* adjust @start to avoid underflow and allocating the first page */ 109 /* avoid allocating the first page */
110 start = max3(start, size, (phys_addr_t)PAGE_SIZE); 110 start = max_t(phys_addr_t, start, PAGE_SIZE);
111 end = max(start, end); 111 end = max(start, end);
112 112
113 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 113 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
114 this_start = clamp(this_start, start, end); 114 this_start = clamp(this_start, start, end);
115 this_end = clamp(this_end, start, end); 115 this_end = clamp(this_end, start, end);
116 116
117 if (this_end < size)
118 continue;
119
117 cand = round_down(this_end - size, align); 120 cand = round_down(this_end - size, align);
118 if (cand >= this_start) 121 if (cand >= this_start)
119 return cand; 122 return cand;