aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/memblock.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-10-05 19:15:15 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-10-06 00:44:35 -0400
commit1d931264af0f10649b35afa8fbd2e169da51ac08 (patch)
tree01ccff1fd0777cf256aeef478357bda6fc178276 /arch/x86/mm/memblock.c
parent9f4c13964b58608fbce05540743281ea3146c0e8 (diff)
x86-32, memblock: Make add_highpages honor early reserved ranges
Originally the only early reserved range that is overlapped with high pages is "KVA RAM", but we already do remove that from the active ranges. However, It turns out Xen could have that kind of overlapping to support memory ballooning.x So we need to make add_highpage_with_active_regions() to subtract memblock reserved just like low ram; this is the proper design anyway. In this patch, refactering get_freel_all_memory_range() to make it can be used by add_highpage_with_active_regions(). Also we don't need to remove "KVA RAM" from active ranges. Signed-off-by: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <4CABB183.1040607@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/memblock.c')
-rw-r--r--arch/x86/mm/memblock.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index 50ecbc59757f..fd7a0404945d 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -156,7 +156,8 @@ static int __init count_early_node_map(int nodeid)
156 return data.nr; 156 return data.nr;
157} 157}
158 158
159int __init get_free_all_memory_range(struct range **rangep, int nodeid) 159int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
160 unsigned long start_pfn, unsigned long end_pfn)
160{ 161{
161 int count; 162 int count;
162 struct range *range; 163 struct range *range;
@@ -172,9 +173,9 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
172 * at first 173 * at first
173 */ 174 */
174 nr_range = add_from_early_node_map(range, count, nr_range, nodeid); 175 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
175#ifdef CONFIG_X86_32 176 subtract_range(range, count, 0, start_pfn);
176 subtract_range(range, count, max_low_pfn, -1ULL); 177 subtract_range(range, count, end_pfn, -1ULL);
177#endif 178
178 memblock_x86_subtract_reserved(range, count); 179 memblock_x86_subtract_reserved(range, count);
179 nr_range = clean_sort_range(range, count); 180 nr_range = clean_sort_range(range, count);
180 181
@@ -182,6 +183,16 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
182 return nr_range; 183 return nr_range;
183} 184}
184 185
186int __init get_free_all_memory_range(struct range **rangep, int nodeid)
187{
188 unsigned long end_pfn = -1UL;
189
190#ifdef CONFIG_X86_32
191 end_pfn = max_low_pfn;
192#endif
193 return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
194}
195
185static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) 196static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
186{ 197{
187 int i, count; 198 int i, count;