aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-01-06 03:11:08 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:26 -0500
commita226f6c899799fe2c4919daa0767ac579c88f7bd (patch)
tree82863c401f344cae8ab518b174085a7071a0a325 /mm
parent008857c1a49ccffc31a54c3ea7e182833bd61304 (diff)
[PATCH] FRV: Clean up bootmem allocator's page freeing algorithm
The attached patch cleans up the way the bootmem allocator frees pages. A new function, __free_pages_bootmem(), is provided in mm/page_alloc.c that is called from mm/bootmem.c to turn pages over to the main allocator. All the bits of code to initialise pages (clearing PG_reserved and setting the page count) are moved to here. The checks on page validity are removed, on the assumption that the struct page arrays will have been prepared correctly. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c20
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c36
3 files changed, 41 insertions, 17 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index cbb82ee14fb5..35c32290f717 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -296,20 +296,12 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
296 unsigned long v = ~map[i / BITS_PER_LONG]; 296 unsigned long v = ~map[i / BITS_PER_LONG];
297 297
298 if (gofast && v == ~0UL) { 298 if (gofast && v == ~0UL) {
299 int j, order; 299 int order;
300 300
301 page = pfn_to_page(pfn); 301 page = pfn_to_page(pfn);
302 count += BITS_PER_LONG; 302 count += BITS_PER_LONG;
303 __ClearPageReserved(page);
304 order = ffs(BITS_PER_LONG) - 1; 303 order = ffs(BITS_PER_LONG) - 1;
305 set_page_refs(page, order); 304 __free_pages_bootmem(page, order);
306 for (j = 1; j < BITS_PER_LONG; j++) {
307 if (j + 16 < BITS_PER_LONG)
308 prefetchw(page + j + 16);
309 __ClearPageReserved(page + j);
310 set_page_count(page + j, 0);
311 }
312 __free_pages(page, order);
313 i += BITS_PER_LONG; 305 i += BITS_PER_LONG;
314 page += BITS_PER_LONG; 306 page += BITS_PER_LONG;
315 } else if (v) { 307 } else if (v) {
@@ -319,9 +311,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
319 for (m = 1; m && i < idx; m<<=1, page++, i++) { 311 for (m = 1; m && i < idx; m<<=1, page++, i++) {
320 if (v & m) { 312 if (v & m) {
321 count++; 313 count++;
322 __ClearPageReserved(page); 314 __free_pages_bootmem(page, 0);
323 set_page_refs(page, 0);
324 __free_page(page);
325 } 315 }
326 } 316 }
327 } else { 317 } else {
@@ -339,9 +329,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
339 count = 0; 329 count = 0;
340 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) { 330 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
341 count++; 331 count++;
342 __ClearPageReserved(page); 332 __free_pages_bootmem(page, 0);
343 set_page_count(page, 1);
344 __free_page(page);
345 } 333 }
346 total += count; 334 total += count;
347 bdata->node_bootmem_map = NULL; 335 bdata->node_bootmem_map = NULL;
diff --git a/mm/internal.h b/mm/internal.h
index 85004f540e34..17256bb2f4ef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -26,3 +26,5 @@ static inline void set_page_refs(struct page *page, int order)
26#endif /* CONFIG_MMU */ 26#endif /* CONFIG_MMU */
27} 27}
28 28
29extern void fastcall __init __free_pages_bootmem(struct page *page,
30 unsigned int order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 925b0b985f79..cdad3249cf7f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -53,6 +53,8 @@ unsigned long totalram_pages __read_mostly;
53unsigned long totalhigh_pages __read_mostly; 53unsigned long totalhigh_pages __read_mostly;
54long nr_swap_pages; 54long nr_swap_pages;
55 55
56static void fastcall free_hot_cold_page(struct page *page, int cold);
57
56/* 58/*
57 * results with 256, 32 in the lowmem_reserve sysctl: 59 * results with 256, 32 in the lowmem_reserve sysctl:
58 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 60 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
@@ -432,6 +434,39 @@ void __free_pages_ok(struct page *page, unsigned int order)
432 local_irq_restore(flags); 434 local_irq_restore(flags);
433} 435}
434 436
437/*
438 * permit the bootmem allocator to evade page validation on high-order frees
439 */
440void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
441{
442 if (order == 0) {
443 __ClearPageReserved(page);
444 set_page_count(page, 0);
445
446 free_hot_cold_page(page, 0);
447 } else {
448 LIST_HEAD(list);
449 int loop;
450
451 for (loop = 0; loop < BITS_PER_LONG; loop++) {
452 struct page *p = &page[loop];
453
454 if (loop + 16 < BITS_PER_LONG)
455 prefetchw(p + 16);
456 __ClearPageReserved(p);
457 set_page_count(p, 0);
458 }
459
460 arch_free_page(page, order);
461
462 mod_page_state(pgfree, 1 << order);
463
464 list_add(&page->lru, &list);
465 kernel_map_pages(page, 1 << order, 0);
466 free_pages_bulk(page_zone(page), 1, &list, order);
467 }
468}
469
435 470
436/* 471/*
437 * The order of subdivision here is critical for the IO subsystem. 472 * The order of subdivision here is critical for the IO subsystem.
@@ -671,7 +706,6 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
671/* 706/*
672 * Free a 0-order page 707 * Free a 0-order page
673 */ 708 */
674static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
675static void fastcall free_hot_cold_page(struct page *page, int cold) 709static void fastcall free_hot_cold_page(struct page *page, int cold)
676{ 710{
677 struct zone *zone = page_zone(page); 711 struct zone *zone = page_zone(page);