aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:29:19 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:18 -0500
commit3dfa5721f12c3d5a441448086bee156887daa961 (patch)
tree8ace8c3f842f8b626b762bb9d2a9b24d8e3bd130 /mm/page_alloc.c
parent5dc331852848a38ca00a2817e5b98a1d0561b116 (diff)
Page allocator: get rid of the list of cold pages
We have repeatedly discussed if the cold pages still have a point. There is one way to join the two lists: Use a single list and put the cold pages at the end and the hot pages at the beginning. That way a single list can serve for both types of allocations. The discussion of the RFC for this and Mel's measurements indicate that there may not be too much of a point left to having separate lists for hot and cold pages (see http://marc.info/?t=119492914200001&r=1&w=2). Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Martin Bligh <mbligh@mbligh.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c57
1 files changed, 27 insertions, 30 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c7de8e959fc..144c0967e702 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -901,24 +901,21 @@ static void drain_pages(unsigned int cpu)
901{ 901{
902 unsigned long flags; 902 unsigned long flags;
903 struct zone *zone; 903 struct zone *zone;
904 int i;
905 904
906 for_each_zone(zone) { 905 for_each_zone(zone) {
907 struct per_cpu_pageset *pset; 906 struct per_cpu_pageset *pset;
907 struct per_cpu_pages *pcp;
908 908
909 if (!populated_zone(zone)) 909 if (!populated_zone(zone))
910 continue; 910 continue;
911 911
912 pset = zone_pcp(zone, cpu); 912 pset = zone_pcp(zone, cpu);
913 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 913
914 struct per_cpu_pages *pcp; 914 pcp = &pset->pcp;
915 915 local_irq_save(flags);
916 pcp = &pset->pcp[i]; 916 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
917 local_irq_save(flags); 917 pcp->count = 0;
918 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 918 local_irq_restore(flags);
919 pcp->count = 0;
920 local_irq_restore(flags);
921 }
922 } 919 }
923} 920}
924 921
@@ -993,10 +990,13 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
993 arch_free_page(page, 0); 990 arch_free_page(page, 0);
994 kernel_map_pages(page, 1, 0); 991 kernel_map_pages(page, 1, 0);
995 992
996 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 993 pcp = &zone_pcp(zone, get_cpu())->pcp;
997 local_irq_save(flags); 994 local_irq_save(flags);
998 __count_vm_event(PGFREE); 995 __count_vm_event(PGFREE);
999 list_add(&page->lru, &pcp->list); 996 if (cold)
997 list_add_tail(&page->lru, &pcp->list);
998 else
999 list_add(&page->lru, &pcp->list);
1000 set_page_private(page, get_pageblock_migratetype(page)); 1000 set_page_private(page, get_pageblock_migratetype(page));
1001 pcp->count++; 1001 pcp->count++;
1002 if (pcp->count >= pcp->high) { 1002 if (pcp->count >= pcp->high) {
@@ -1054,7 +1054,7 @@ again:
1054 if (likely(order == 0)) { 1054 if (likely(order == 0)) {
1055 struct per_cpu_pages *pcp; 1055 struct per_cpu_pages *pcp;
1056 1056
1057 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 1057 pcp = &zone_pcp(zone, cpu)->pcp;
1058 local_irq_save(flags); 1058 local_irq_save(flags);
1059 if (!pcp->count) { 1059 if (!pcp->count) {
1060 pcp->count = rmqueue_bulk(zone, 0, 1060 pcp->count = rmqueue_bulk(zone, 0,
@@ -1064,9 +1064,15 @@ again:
1064 } 1064 }
1065 1065
1066 /* Find a page of the appropriate migrate type */ 1066 /* Find a page of the appropriate migrate type */
1067 list_for_each_entry(page, &pcp->list, lru) 1067 if (cold) {
1068 if (page_private(page) == migratetype) 1068 list_for_each_entry_reverse(page, &pcp->list, lru)
1069 break; 1069 if (page_private(page) == migratetype)
1070 break;
1071 } else {
1072 list_for_each_entry(page, &pcp->list, lru)
1073 if (page_private(page) == migratetype)
1074 break;
1075 }
1070 1076
1071 /* Allocate more to the pcp list if necessary */ 1077 /* Allocate more to the pcp list if necessary */
1072 if (unlikely(&page->lru == &pcp->list)) { 1078 if (unlikely(&page->lru == &pcp->list)) {
@@ -1793,12 +1799,9 @@ void show_free_areas(void)
1793 1799
1794 pageset = zone_pcp(zone, cpu); 1800 pageset = zone_pcp(zone, cpu);
1795 1801
1796 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " 1802 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1797 "Cold: hi:%5d, btch:%4d usd:%4d\n", 1803 cpu, pageset->pcp.high,
1798 cpu, pageset->pcp[0].high, 1804 pageset->pcp.batch, pageset->pcp.count);
1799 pageset->pcp[0].batch, pageset->pcp[0].count,
1800 pageset->pcp[1].high, pageset->pcp[1].batch,
1801 pageset->pcp[1].count);
1802 } 1805 }
1803 } 1806 }
1804 1807
@@ -2596,17 +2599,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2596 2599
2597 memset(p, 0, sizeof(*p)); 2600 memset(p, 0, sizeof(*p));
2598 2601
2599 pcp = &p->pcp[0]; /* hot */ 2602 pcp = &p->pcp;
2600 pcp->count = 0; 2603 pcp->count = 0;
2601 pcp->high = 6 * batch; 2604 pcp->high = 6 * batch;
2602 pcp->batch = max(1UL, 1 * batch); 2605 pcp->batch = max(1UL, 1 * batch);
2603 INIT_LIST_HEAD(&pcp->list); 2606 INIT_LIST_HEAD(&pcp->list);
2604
2605 pcp = &p->pcp[1]; /* cold*/
2606 pcp->count = 0;
2607 pcp->high = 2 * batch;
2608 pcp->batch = max(1UL, batch/2);
2609 INIT_LIST_HEAD(&pcp->list);
2610} 2607}
2611 2608
2612/* 2609/*
@@ -2619,7 +2616,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2619{ 2616{
2620 struct per_cpu_pages *pcp; 2617 struct per_cpu_pages *pcp;
2621 2618
2622 pcp = &p->pcp[0]; /* hot list */ 2619 pcp = &p->pcp;
2623 pcp->high = high; 2620 pcp->high = high;
2624 pcp->batch = max(1UL, high/4); 2621 pcp->batch = max(1UL, high/4);
2625 if ((high/4) > (PAGE_SHIFT * 8)) 2622 if ((high/4) > (PAGE_SHIFT * 8))