aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-09-21 20:03:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:39 -0400
commita6f9edd65beaef24836e8934c8912c1e974dd45c (patch)
tree041c60ed559d3bc1f289d0040e75cfdd78f0acd0 /mm/page_alloc.c
parent5f8dcc21211a3d4e3a7a5ca366b469fb88117f61 (diff)
page-allocator: maintain rolling count of pages to free from the PCP
When round-robin freeing pages from the PCP lists, empty lists may be encountered. In the event one of the lists has more pages than another, there may be numerous checks for list_empty() which is undesirable. This patch maintains a count of pages to free which is incremented when empty lists are encountered. The intention is that more pages will then be freed from fuller lists than the empty ones reducing the number of empty list checks in the free path. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1b1c39e6a9b8..6877e22e3aa1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -525,32 +525,38 @@ static void free_pcppages_bulk(struct zone *zone, int count,
525 struct per_cpu_pages *pcp) 525 struct per_cpu_pages *pcp)
526{ 526{
527 int migratetype = 0; 527 int migratetype = 0;
528 int batch_free = 0;
528 529
529 spin_lock(&zone->lock); 530 spin_lock(&zone->lock);
530 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 531 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
531 zone->pages_scanned = 0; 532 zone->pages_scanned = 0;
532 533
533 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 534 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
534 while (count--) { 535 while (count) {
535 struct page *page; 536 struct page *page;
536 struct list_head *list; 537 struct list_head *list;
537 538
538 /* 539 /*
539 * Remove pages from lists in a round-robin fashion. This spinning 540 * Remove pages from lists in a round-robin fashion. A
540 * around potentially empty lists is bloody awful, alternatives that 541 * batch_free count is maintained that is incremented when an
541 * don't suck are welcome 542 * empty list is encountered. This is so more pages are freed
543 * off fuller lists instead of spinning excessively around empty
544 * lists
542 */ 545 */
543 do { 546 do {
547 batch_free++;
544 if (++migratetype == MIGRATE_PCPTYPES) 548 if (++migratetype == MIGRATE_PCPTYPES)
545 migratetype = 0; 549 migratetype = 0;
546 list = &pcp->lists[migratetype]; 550 list = &pcp->lists[migratetype];
547 } while (list_empty(list)); 551 } while (list_empty(list));
548 552
549 page = list_entry(list->prev, struct page, lru); 553 do {
550 /* have to delete it as __free_one_page list manipulates */ 554 page = list_entry(list->prev, struct page, lru);
551 list_del(&page->lru); 555 /* must delete as __free_one_page list manipulates */
552 trace_mm_page_pcpu_drain(page, 0, migratetype); 556 list_del(&page->lru);
553 __free_one_page(page, zone, 0, migratetype); 557 __free_one_page(page, zone, 0, migratetype);
558 trace_mm_page_pcpu_drain(page, 0, migratetype);
559 } while (--count && --batch_free && !list_empty(list));
554 } 560 }
555 spin_unlock(&zone->lock); 561 spin_unlock(&zone->lock);
556} 562}