aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-08-25 16:39:16 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-08-27 14:10:48 -0400
commit4d5cf86ce187c0d3a4cdf233ab0cc6526ccbe01f (patch)
treecd631e331bd071a0e4c38ada5628b0648f87e22c /arch/x86
parent9dc5d569c133819c1ce069ebb1d771c62de32580 (diff)
x86, memblock: Add get_free_all_memory_range()
get_free_all_memory_range is for CONFIG_NO_BOOTMEM=y, and will be called by free_all_memory_core_early(). It will use early_node_map aka active ranges subtract memblock.reserved to get all free range, and those ranges will convert to slab pages. -v4: increase range size Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Jan Beulich <jbeulich@novell.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/mm/memblock.c98
2 files changed, 99 insertions, 1 deletions
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index e11ddf059fa1..72639ce65e8d 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -8,5 +8,7 @@ void memblock_x86_to_bootmem(u64 start, u64 end);
8 8
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 9void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 10void memblock_x86_free_range(u64 start, u64 end);
11struct range;
12int get_free_all_memory_range(struct range **rangep, int nodeid);
11 13
12#endif 14#endif
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index 9829eaf1dbda..b4500604ab30 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -86,7 +86,103 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
86 return MEMBLOCK_ERROR; 86 return MEMBLOCK_ERROR;
87} 87}
88 88
89#ifndef CONFIG_NO_BOOTMEM 89static __init struct range *find_range_array(int count)
90{
91 u64 end, size, mem;
92 struct range *range;
93
94 size = sizeof(struct range) * count;
95 end = memblock.current_limit;
96
97 mem = memblock_find_in_range(0, end, size, sizeof(struct range));
98 if (mem == MEMBLOCK_ERROR)
99 panic("can not find more space for range array");
100
101 /*
102 * This range is tempoaray, so don't reserve it, it will not be
103 * overlapped because We will not alloccate new buffer before
104 * We discard this one
105 */
106 range = __va(mem);
107 memset(range, 0, size);
108
109 return range;
110}
111
112#ifdef CONFIG_NO_BOOTMEM
113static void __init memblock_x86_subtract_reserved(struct range *range, int az)
114{
115 u64 final_start, final_end;
116 struct memblock_region *r;
117
118 /* Take out region array itself at first*/
119 memblock_free_reserved_regions();
120
121 pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
122
123 for_each_memblock(reserved, r) {
124 pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
125 final_start = PFN_DOWN(r->base);
126 final_end = PFN_UP(r->base + r->size);
127 if (final_start >= final_end)
128 continue;
129 subtract_range(range, az, final_start, final_end);
130 }
131
132 /* Put region array back ? */
133 memblock_reserve_reserved_regions();
134}
135
136struct count_data {
137 int nr;
138};
139
140static int __init count_work_fn(unsigned long start_pfn,
141 unsigned long end_pfn, void *datax)
142{
143 struct count_data *data = datax;
144
145 data->nr++;
146
147 return 0;
148}
149
150static int __init count_early_node_map(int nodeid)
151{
152 struct count_data data;
153
154 data.nr = 0;
155 work_with_active_regions(nodeid, count_work_fn, &data);
156
157 return data.nr;
158}
159
160int __init get_free_all_memory_range(struct range **rangep, int nodeid)
161{
162 int count;
163 struct range *range;
164 int nr_range;
165
166 count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
167
168 range = find_range_array(count);
169 nr_range = 0;
170
171 /*
172 * Use early_node_map[] and memblock.reserved.region to get range array
173 * at first
174 */
175 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
176#ifdef CONFIG_X86_32
177 subtract_range(range, count, max_low_pfn, -1ULL);
178#endif
179 memblock_x86_subtract_reserved(range, count);
180 nr_range = clean_sort_range(range, count);
181
182 *rangep = range;
183 return nr_range;
184}
185#else
90void __init memblock_x86_to_bootmem(u64 start, u64 end) 186void __init memblock_x86_to_bootmem(u64 start, u64 end)
91{ 187{
92 int count; 188 int count;