aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:32:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:35 -0400
commitf2260e6b1f4eba0f5b5906795117791b5c660154 (patch)
tree0faa8ce5fb0875835142e6ff3928b2ce076b4874 /mm
parent418589663d6011de9006425b6c5721e1544fb47a (diff)
page allocator: update NR_FREE_PAGES only as necessary
When pages are being freed to the buddy allocator, the zone NR_FREE_PAGES counter must be updated. In the case of bulk per-cpu page freeing, it's updated once per page. This retouches cache lines more than necessary. Update the counters one per per-cpu bulk free. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index abe26003124d..d56e377ad085 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -456,7 +456,6 @@ static inline void __free_one_page(struct page *page,
456 int migratetype) 456 int migratetype)
457{ 457{
458 unsigned long page_idx; 458 unsigned long page_idx;
459 int order_size = 1 << order;
460 459
461 if (unlikely(PageCompound(page))) 460 if (unlikely(PageCompound(page)))
462 if (unlikely(destroy_compound_page(page, order))) 461 if (unlikely(destroy_compound_page(page, order)))
@@ -466,10 +465,9 @@ static inline void __free_one_page(struct page *page,
466 465
467 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 466 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
468 467
469 VM_BUG_ON(page_idx & (order_size - 1)); 468 VM_BUG_ON(page_idx & ((1 << order) - 1));
470 VM_BUG_ON(bad_range(zone, page)); 469 VM_BUG_ON(bad_range(zone, page));
471 470
472 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
473 while (order < MAX_ORDER-1) { 471 while (order < MAX_ORDER-1) {
474 unsigned long combined_idx; 472 unsigned long combined_idx;
475 struct page *buddy; 473 struct page *buddy;
@@ -524,6 +522,8 @@ static void free_pages_bulk(struct zone *zone, int count,
524 spin_lock(&zone->lock); 522 spin_lock(&zone->lock);
525 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 523 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
526 zone->pages_scanned = 0; 524 zone->pages_scanned = 0;
525
526 __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
527 while (count--) { 527 while (count--) {
528 struct page *page; 528 struct page *page;
529 529
@@ -542,6 +542,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
542 spin_lock(&zone->lock); 542 spin_lock(&zone->lock);
543 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 543 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
544 zone->pages_scanned = 0; 544 zone->pages_scanned = 0;
545
546 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
545 __free_one_page(page, zone, order, migratetype); 547 __free_one_page(page, zone, order, migratetype);
546 spin_unlock(&zone->lock); 548 spin_unlock(&zone->lock);
547} 549}
@@ -686,7 +688,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
686 list_del(&page->lru); 688 list_del(&page->lru);
687 rmv_page_order(page); 689 rmv_page_order(page);
688 area->nr_free--; 690 area->nr_free--;
689 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
690 expand(zone, page, order, current_order, area, migratetype); 691 expand(zone, page, order, current_order, area, migratetype);
691 return page; 692 return page;
692 } 693 }
@@ -826,8 +827,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
826 /* Remove the page from the freelists */ 827 /* Remove the page from the freelists */
827 list_del(&page->lru); 828 list_del(&page->lru);
828 rmv_page_order(page); 829 rmv_page_order(page);
829 __mod_zone_page_state(zone, NR_FREE_PAGES,
830 -(1UL << order));
831 830
832 if (current_order == pageblock_order) 831 if (current_order == pageblock_order)
833 set_pageblock_migratetype(page, 832 set_pageblock_migratetype(page,
@@ -900,6 +899,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
900 set_page_private(page, migratetype); 899 set_page_private(page, migratetype);
901 list = &page->lru; 900 list = &page->lru;
902 } 901 }
902 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
903 spin_unlock(&zone->lock); 903 spin_unlock(&zone->lock);
904 return i; 904 return i;
905} 905}
@@ -1129,6 +1129,7 @@ again:
1129 } else { 1129 } else {
1130 spin_lock_irqsave(&zone->lock, flags); 1130 spin_lock_irqsave(&zone->lock, flags);
1131 page = __rmqueue(zone, order, migratetype); 1131 page = __rmqueue(zone, order, migratetype);
1132 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1132 spin_unlock(&zone->lock); 1133 spin_unlock(&zone->lock);
1133 if (!page) 1134 if (!page)
1134 goto failed; 1135 goto failed;