aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:58 -0500
commit545b1ea9bfa5a8ca9af33d63144bd4f2faaea8dd (patch)
treedeef747e0f08089a0cd14e09551efaddfad813f9
parent9d41415221214ca4820b9464dfa548e2f20e7dd5 (diff)
[PATCH] mm: cleanup bootmem
The bootmem code added to page_alloc.c duplicated some page freeing code that it really doesn't need to because it is not so performance critical. While we're here, make prefetching work properly by actually prefetching the page we're about to use before prefetching ahead to the next one (ie. get the most important transaction started first). Also prefetch just a single page ahead rather than leaving a gap of 16. Jack Steiner reported no problems with SGI's ia64 simulator. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/page_alloc.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc65e87368b3..7aa0181287e1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -55,7 +55,6 @@ unsigned long totalhigh_pages __read_mostly;
55long nr_swap_pages; 55long nr_swap_pages;
56int percpu_pagelist_fraction; 56int percpu_pagelist_fraction;
57 57
58static void fastcall free_hot_cold_page(struct page *page, int cold);
59static void __free_pages_ok(struct page *page, unsigned int order); 58static void __free_pages_ok(struct page *page, unsigned int order);
60 59
61/* 60/*
@@ -448,28 +447,23 @@ void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
448 if (order == 0) { 447 if (order == 0) {
449 __ClearPageReserved(page); 448 __ClearPageReserved(page);
450 set_page_count(page, 0); 449 set_page_count(page, 0);
451 450 set_page_refs(page, 0);
452 free_hot_cold_page(page, 0); 451 __free_page(page);
453 } else { 452 } else {
454 LIST_HEAD(list);
455 int loop; 453 int loop;
456 454
455 prefetchw(page);
457 for (loop = 0; loop < BITS_PER_LONG; loop++) { 456 for (loop = 0; loop < BITS_PER_LONG; loop++) {
458 struct page *p = &page[loop]; 457 struct page *p = &page[loop];
459 458
460 if (loop + 16 < BITS_PER_LONG) 459 if (loop + 1 < BITS_PER_LONG)
461 prefetchw(p + 16); 460 prefetchw(p + 1);
462 __ClearPageReserved(p); 461 __ClearPageReserved(p);
463 set_page_count(p, 0); 462 set_page_count(p, 0);
464 } 463 }
465 464
466 arch_free_page(page, order); 465 set_page_refs(page, order);
467 466 __free_pages(page, order);
468 mod_page_state(pgfree, 1 << order);
469
470 list_add(&page->lru, &list);
471 kernel_map_pages(page, 1 << order, 0);
472 free_pages_bulk(page_zone(page), 1, &list, order);
473 } 467 }
474} 468}
475 469