aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTang Chen <tangchen@cn.fujitsu.com>2013-02-22 19:33:51 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:14 -0500
commitf7210e6c4ac795694106c1c5307134d3fc233e88 (patch)
tree1d453017db83fbf575ae88c0015e877a9fe05d9e
parent01a178a94e8eaec351b29ee49fbb3d1c124cb7fb (diff)
mm/memblock.c: use CONFIG_HAVE_MEMBLOCK_NODE_MAP to protect movablecore_map in memblock_overlaps_region().
The definition of struct movablecore_map is protected by CONFIG_HAVE_MEMBLOCK_NODE_MAP but its use in memblock_overlaps_region() is not. So add CONFIG_HAVE_MEMBLOCK_NODE_MAP to protect the use of movablecore_map in memblock_overlaps_region(). Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memblock.h1
-rw-r--r--mm/memblock.c34
2 files changed, 35 insertions, 0 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index dfefaf111c0e..3e5ecb2d790e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -61,6 +61,7 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
61void memblock_trim_memory(phys_addr_t align); 61void memblock_trim_memory(phys_addr_t align);
62 62
63#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 63#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
64
64void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 65void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
65 unsigned long *out_end_pfn, int *out_nid); 66 unsigned long *out_end_pfn, int *out_nid);
66 67
diff --git a/mm/memblock.c b/mm/memblock.c
index c83ff97f57f4..1bcd9b970564 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -92,9 +92,13 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
92 * 92 *
93 * Find @size free area aligned to @align in the specified range and node. 93 * Find @size free area aligned to @align in the specified range and node.
94 * 94 *
95 * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
96 * memory we found if not in hotpluggable ranges.
97 *
95 * RETURNS: 98 * RETURNS:
96 * Found address on success, %0 on failure. 99 * Found address on success, %0 on failure.
97 */ 100 */
101#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
98phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, 102phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
99 phys_addr_t end, phys_addr_t size, 103 phys_addr_t end, phys_addr_t size,
100 phys_addr_t align, int nid) 104 phys_addr_t align, int nid)
@@ -139,6 +143,36 @@ restart:
139 143
140 return 0; 144 return 0;
141} 145}
146#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
147phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
148 phys_addr_t end, phys_addr_t size,
149 phys_addr_t align, int nid)
150{
151 phys_addr_t this_start, this_end, cand;
152 u64 i;
153
154 /* pump up @end */
155 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
156 end = memblock.current_limit;
157
158 /* avoid allocating the first page */
159 start = max_t(phys_addr_t, start, PAGE_SIZE);
160 end = max(start, end);
161
162 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
163 this_start = clamp(this_start, start, end);
164 this_end = clamp(this_end, start, end);
165
166 if (this_end < size)
167 continue;
168
169 cand = round_down(this_end - size, align);
170 if (cand >= this_start)
171 return cand;
172 }
173 return 0;
174}
175#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
142 176
143/** 177/**
144 * memblock_find_in_range - find free area in given range 178 * memblock_find_in_range - find free area in given range