aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:49 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:59 -0400
commit535131e6925b4a95f321148ad7293f496e0e58d7 (patch)
treefdd49e29f89eb6db3ba2b5ba7df7b059de95a91f /mm/page_alloc.c
parentb2a0ac8875a0a3b9f0739b60526f8c5977d2200f (diff)
Choose pages from the per-cpu list based on migration type
The freelists for each migrate type can slowly become polluted due to the per-cpu list. Consider what happens when the following happens 1. A 2^(MAX_ORDER-1) list is reserved for __GFP_MOVABLE pages 2. An order-0 page is allocated from the newly reserved block 3. The page is freed and placed on the per-cpu list 4. alloc_page() is called with GFP_KERNEL as the gfp_mask 5. The per-cpu list is used to satisfy the allocation This results in a kernel page is in the middle of a migratable region. This patch prevents this leak occuring by storing the MIGRATE_ type of the page in page->private. On allocate, a page will only be returned of the desired type, else more pages will be allocated. This may temporarily allow a per-cpu list to go over the pcp->high limit but it'll be corrected on the next free. Care is taken to preserve the hotness of pages recently freed. The additional code is not measurably slower for the workloads we've tested. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d54ecf41b44c..e3e726bd2858 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -760,7 +760,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
760 struct page *page = __rmqueue(zone, order, migratetype); 760 struct page *page = __rmqueue(zone, order, migratetype);
761 if (unlikely(page == NULL)) 761 if (unlikely(page == NULL))
762 break; 762 break;
763 list_add_tail(&page->lru, list); 763 list_add(&page->lru, list);
764 set_page_private(page, migratetype);
764 } 765 }
765 spin_unlock(&zone->lock); 766 spin_unlock(&zone->lock);
766 return i; 767 return i;
@@ -887,6 +888,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
887 local_irq_save(flags); 888 local_irq_save(flags);
888 __count_vm_event(PGFREE); 889 __count_vm_event(PGFREE);
889 list_add(&page->lru, &pcp->list); 890 list_add(&page->lru, &pcp->list);
891 set_page_private(page, get_pageblock_migratetype(page));
890 pcp->count++; 892 pcp->count++;
891 if (pcp->count >= pcp->high) { 893 if (pcp->count >= pcp->high) {
892 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 894 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
@@ -951,9 +953,27 @@ again:
951 if (unlikely(!pcp->count)) 953 if (unlikely(!pcp->count))
952 goto failed; 954 goto failed;
953 } 955 }
954 page = list_entry(pcp->list.next, struct page, lru); 956 /* Find a page of the appropriate migrate type */
955 list_del(&page->lru); 957 list_for_each_entry(page, &pcp->list, lru) {
956 pcp->count--; 958 if (page_private(page) == migratetype) {
959 list_del(&page->lru);
960 pcp->count--;
961 break;
962 }
963 }
964
965 /*
966 * Check if a page of the appropriate migrate type
967 * was found. If not, allocate more to the pcp list
968 */
969 if (&page->lru == &pcp->list) {
970 pcp->count += rmqueue_bulk(zone, 0,
971 pcp->batch, &pcp->list, migratetype);
972 page = list_entry(pcp->list.next, struct page, lru);
973 VM_BUG_ON(page_private(page) != migratetype);
974 list_del(&page->lru);
975 pcp->count--;
976 }
957 } else { 977 } else {
958 spin_lock_irqsave(&zone->lock, flags); 978 spin_lock_irqsave(&zone->lock, flags);
959 page = __rmqueue(zone, order, migratetype); 979 page = __rmqueue(zone, order, migratetype);