aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc65e87368b3..7aa0181287e1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -55,7 +55,6 @@ unsigned long totalhigh_pages __read_mostly;
55long nr_swap_pages; 55long nr_swap_pages;
56int percpu_pagelist_fraction; 56int percpu_pagelist_fraction;
57 57
58static void fastcall free_hot_cold_page(struct page *page, int cold);
59static void __free_pages_ok(struct page *page, unsigned int order); 58static void __free_pages_ok(struct page *page, unsigned int order);
60 59
61/* 60/*
@@ -448,28 +447,23 @@ void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
448 if (order == 0) { 447 if (order == 0) {
449 __ClearPageReserved(page); 448 __ClearPageReserved(page);
450 set_page_count(page, 0); 449 set_page_count(page, 0);
451 450 set_page_refs(page, 0);
452 free_hot_cold_page(page, 0); 451 __free_page(page);
453 } else { 452 } else {
454 LIST_HEAD(list);
455 int loop; 453 int loop;
456 454
455 prefetchw(page);
457 for (loop = 0; loop < BITS_PER_LONG; loop++) { 456 for (loop = 0; loop < BITS_PER_LONG; loop++) {
458 struct page *p = &page[loop]; 457 struct page *p = &page[loop];
459 458
460 if (loop + 16 < BITS_PER_LONG) 459 if (loop + 1 < BITS_PER_LONG)
461 prefetchw(p + 16); 460 prefetchw(p + 1);
462 __ClearPageReserved(p); 461 __ClearPageReserved(p);
463 set_page_count(p, 0); 462 set_page_count(p, 0);
464 } 463 }
465 464
466 arch_free_page(page, order); 465 set_page_refs(page, order);
467 466 __free_pages(page, order);
468 mod_page_state(pgfree, 1 << order);
469
470 list_add(&page->lru, &list);
471 kernel_map_pages(page, 1 << order, 0);
472 free_pages_bulk(page_zone(page), 1, &list, order);
473 } 467 }
474} 468}
475 469