aboutsummaryrefslogtreecommitdiffstats
path: root/mm/bootmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/bootmem.c')
-rw-r--r--mm/bootmem.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 260e703850d8..c1330cc19783 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -33,6 +33,14 @@ EXPORT_SYMBOL(max_pfn); /* This is exported so
33 * dma_get_required_mask(), which uses 33 * dma_get_required_mask(), which uses
34 * it, can be an inline function */ 34 * it, can be an inline function */
35 35
36#ifdef CONFIG_CRASH_DUMP
37/*
38 * If we have booted due to a crash, max_pfn will be a very low value. We need
39 * to know the amount of memory that the previous kernel used.
40 */
41unsigned long saved_max_pfn;
42#endif
43
36/* return the number of _pages_ that will be allocated for the boot bitmap */ 44/* return the number of _pages_ that will be allocated for the boot bitmap */
37unsigned long __init bootmem_bootmap_pages (unsigned long pages) 45unsigned long __init bootmem_bootmap_pages (unsigned long pages)
38{ 46{
@@ -57,7 +65,7 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
57 pgdat->pgdat_next = pgdat_list; 65 pgdat->pgdat_next = pgdat_list;
58 pgdat_list = pgdat; 66 pgdat_list = pgdat;
59 67
60 mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL); 68 mapsize = ALIGN(mapsize, sizeof(long));
61 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); 69 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
62 bdata->node_boot_start = (start << PAGE_SHIFT); 70 bdata->node_boot_start = (start << PAGE_SHIFT);
63 bdata->node_low_pfn = end; 71 bdata->node_low_pfn = end;
@@ -178,7 +186,7 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
178 } else 186 } else
179 preferred = 0; 187 preferred = 0;
180 188
181 preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT; 189 preferred = ALIGN(preferred, align) >> PAGE_SHIFT;
182 preferred += offset; 190 preferred += offset;
183 areasize = (size+PAGE_SIZE-1)/PAGE_SIZE; 191 areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
184 incr = align >> PAGE_SHIFT ? : 1; 192 incr = align >> PAGE_SHIFT ? : 1;
@@ -219,7 +227,7 @@ found:
219 */ 227 */
220 if (align < PAGE_SIZE && 228 if (align < PAGE_SIZE &&
221 bdata->last_offset && bdata->last_pos+1 == start) { 229 bdata->last_offset && bdata->last_pos+1 == start) {
222 offset = (bdata->last_offset+align-1) & ~(align-1); 230 offset = ALIGN(bdata->last_offset, align);
223 BUG_ON(offset > PAGE_SIZE); 231 BUG_ON(offset > PAGE_SIZE);
224 remaining_size = PAGE_SIZE-offset; 232 remaining_size = PAGE_SIZE-offset;
225 if (size < remaining_size) { 233 if (size < remaining_size) {
@@ -256,6 +264,7 @@ found:
256static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) 264static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
257{ 265{
258 struct page *page; 266 struct page *page;
267 unsigned long pfn;
259 bootmem_data_t *bdata = pgdat->bdata; 268 bootmem_data_t *bdata = pgdat->bdata;
260 unsigned long i, count, total = 0; 269 unsigned long i, count, total = 0;
261 unsigned long idx; 270 unsigned long idx;
@@ -266,7 +275,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
266 275
267 count = 0; 276 count = 0;
268 /* first extant page of the node */ 277 /* first extant page of the node */
269 page = virt_to_page(phys_to_virt(bdata->node_boot_start)); 278 pfn = bdata->node_boot_start >> PAGE_SHIFT;
270 idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); 279 idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
271 map = bdata->node_bootmem_map; 280 map = bdata->node_bootmem_map;
272 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */ 281 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
@@ -275,9 +284,11 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
275 gofast = 1; 284 gofast = 1;
276 for (i = 0; i < idx; ) { 285 for (i = 0; i < idx; ) {
277 unsigned long v = ~map[i / BITS_PER_LONG]; 286 unsigned long v = ~map[i / BITS_PER_LONG];
287
278 if (gofast && v == ~0UL) { 288 if (gofast && v == ~0UL) {
279 int j, order; 289 int j, order;
280 290
291 page = pfn_to_page(pfn);
281 count += BITS_PER_LONG; 292 count += BITS_PER_LONG;
282 __ClearPageReserved(page); 293 __ClearPageReserved(page);
283 order = ffs(BITS_PER_LONG) - 1; 294 order = ffs(BITS_PER_LONG) - 1;
@@ -292,6 +303,8 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
292 page += BITS_PER_LONG; 303 page += BITS_PER_LONG;
293 } else if (v) { 304 } else if (v) {
294 unsigned long m; 305 unsigned long m;
306
307 page = pfn_to_page(pfn);
295 for (m = 1; m && i < idx; m<<=1, page++, i++) { 308 for (m = 1; m && i < idx; m<<=1, page++, i++) {
296 if (v & m) { 309 if (v & m) {
297 count++; 310 count++;
@@ -302,8 +315,8 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
302 } 315 }
303 } else { 316 } else {
304 i+=BITS_PER_LONG; 317 i+=BITS_PER_LONG;
305 page += BITS_PER_LONG;
306 } 318 }
319 pfn += BITS_PER_LONG;
307 } 320 }
308 total += count; 321 total += count;
309 322