aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:32:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:34 -0400
commited0ae21dc5fe3b9ad4cf1c7bb2bfd2ad596c481c (patch)
tree1ccdf36012a6a8ed22c5a78f7093bae0a259274e
parent0ac3a4099b0171ff965836182bc688bb8ca01058 (diff)
page allocator: do not call get_pageblock_migratetype() more than necessary
get_pageblock_migratetype() is potentially called twice for every page free. Once, when being freed to the pcp lists and once when being freed back to buddy. When freeing from the pcp lists, it is known what the pageblock type was at the time of free so use it rather than rechecking. In low memory situations under memory pressure, this might skew anti-fragmentation slightly but the interference is minimal and decisions that are fragmenting memory are being made anyway. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91e29b3ed2b6..8f334d339b08 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -452,16 +452,18 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
452 */ 452 */
453 453
454static inline void __free_one_page(struct page *page, 454static inline void __free_one_page(struct page *page,
455 struct zone *zone, unsigned int order) 455 struct zone *zone, unsigned int order,
456 int migratetype)
456{ 457{
457 unsigned long page_idx; 458 unsigned long page_idx;
458 int order_size = 1 << order; 459 int order_size = 1 << order;
459 int migratetype = get_pageblock_migratetype(page);
460 460
461 if (unlikely(PageCompound(page))) 461 if (unlikely(PageCompound(page)))
462 if (unlikely(destroy_compound_page(page, order))) 462 if (unlikely(destroy_compound_page(page, order)))
463 return; 463 return;
464 464
465 VM_BUG_ON(migratetype == -1);
466
465 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 467 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
466 468
467 VM_BUG_ON(page_idx & (order_size - 1)); 469 VM_BUG_ON(page_idx & (order_size - 1));
@@ -530,17 +532,18 @@ static void free_pages_bulk(struct zone *zone, int count,
530 page = list_entry(list->prev, struct page, lru); 532 page = list_entry(list->prev, struct page, lru);
531 /* have to delete it as __free_one_page list manipulates */ 533 /* have to delete it as __free_one_page list manipulates */
532 list_del(&page->lru); 534 list_del(&page->lru);
533 __free_one_page(page, zone, order); 535 __free_one_page(page, zone, order, page_private(page));
534 } 536 }
535 spin_unlock(&zone->lock); 537 spin_unlock(&zone->lock);
536} 538}
537 539
538static void free_one_page(struct zone *zone, struct page *page, int order) 540static void free_one_page(struct zone *zone, struct page *page, int order,
541 int migratetype)
539{ 542{
540 spin_lock(&zone->lock); 543 spin_lock(&zone->lock);
541 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 544 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
542 zone->pages_scanned = 0; 545 zone->pages_scanned = 0;
543 __free_one_page(page, zone, order); 546 __free_one_page(page, zone, order, migratetype);
544 spin_unlock(&zone->lock); 547 spin_unlock(&zone->lock);
545} 548}
546 549
@@ -565,7 +568,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
565 568
566 local_irq_save(flags); 569 local_irq_save(flags);
567 __count_vm_events(PGFREE, 1 << order); 570 __count_vm_events(PGFREE, 1 << order);
568 free_one_page(page_zone(page), page, order); 571 free_one_page(page_zone(page), page, order,
572 get_pageblock_migratetype(page));
569 local_irq_restore(flags); 573 local_irq_restore(flags);
570} 574}
571 575