summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-08 18:01:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commitbb14c2c75db972a1bf65fd63c8d5a0b41a8f263a (patch)
tree851c3ad6a765ed0d4ecb171f86764e8e7cdf6405 /mm/page_alloc.c
parentaa016d145d4c3b8a7273429528f19d5b423ddbc7 (diff)
mm: rename and move get/set_freepage_migratetype
The pair of get/set_freepage_migratetype() functions are used to cache pageblock migratetype for a page put on a pcplist, so that it does not have to be retrieved again when the page is put on a free list (e.g. when pcplists become full). Historically it was also assumed that the value is accurate for pages on freelists (as the functions' names unfortunately suggest), but that cannot be guaranteed without affecting various allocator fast paths. It is in fact not needed and all such uses have been removed. The last remaining (but pointless) usage related to pages of freelists is in move_freepages(), which this patch removes. To prevent further confusion, rename the functions to get/set_pcppage_migratetype() and expand their description. Since all the users are now in mm/page_alloc.c, move the functions there from the shared header. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Laura Abbott <lauraa@codeaurora.org> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Seungho Park <seungho1.park@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a329cfaf634d..252665d553b4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -125,6 +125,24 @@ unsigned long dirty_balance_reserve __read_mostly;
125int percpu_pagelist_fraction; 125int percpu_pagelist_fraction;
126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
127 127
128/*
129 * A cached value of the page's pageblock's migratetype, used when the page is
130 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
131 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
132 * Also the migratetype set in the page does not necessarily match the pcplist
133 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
134 * other index - this ensures that it will be put on the correct CMA freelist.
135 */
136static inline int get_pcppage_migratetype(struct page *page)
137{
138 return page->index;
139}
140
141static inline void set_pcppage_migratetype(struct page *page, int migratetype)
142{
143 page->index = migratetype;
144}
145
128#ifdef CONFIG_PM_SLEEP 146#ifdef CONFIG_PM_SLEEP
129/* 147/*
130 * The following functions are used by the suspend/hibernate code to temporarily 148 * The following functions are used by the suspend/hibernate code to temporarily
@@ -789,7 +807,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
789 /* must delete as __free_one_page list manipulates */ 807 /* must delete as __free_one_page list manipulates */
790 list_del(&page->lru); 808 list_del(&page->lru);
791 809
792 mt = get_freepage_migratetype(page); 810 mt = get_pcppage_migratetype(page);
793 /* MIGRATE_ISOLATE page should not go to pcplists */ 811 /* MIGRATE_ISOLATE page should not go to pcplists */
794 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 812 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
795 /* Pageblock could have been isolated meanwhile */ 813 /* Pageblock could have been isolated meanwhile */
@@ -956,7 +974,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
956 migratetype = get_pfnblock_migratetype(page, pfn); 974 migratetype = get_pfnblock_migratetype(page, pfn);
957 local_irq_save(flags); 975 local_irq_save(flags);
958 __count_vm_events(PGFREE, 1 << order); 976 __count_vm_events(PGFREE, 1 << order);
959 set_freepage_migratetype(page, migratetype);
960 free_one_page(page_zone(page), page, pfn, order, migratetype); 977 free_one_page(page_zone(page), page, pfn, order, migratetype);
961 local_irq_restore(flags); 978 local_irq_restore(flags);
962} 979}
@@ -1384,7 +1401,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1384 rmv_page_order(page); 1401 rmv_page_order(page);
1385 area->nr_free--; 1402 area->nr_free--;
1386 expand(zone, page, order, current_order, area, migratetype); 1403 expand(zone, page, order, current_order, area, migratetype);
1387 set_freepage_migratetype(page, migratetype); 1404 set_pcppage_migratetype(page, migratetype);
1388 return page; 1405 return page;
1389 } 1406 }
1390 1407
@@ -1461,7 +1478,6 @@ int move_freepages(struct zone *zone,
1461 order = page_order(page); 1478 order = page_order(page);
1462 list_move(&page->lru, 1479 list_move(&page->lru,
1463 &zone->free_area[order].free_list[migratetype]); 1480 &zone->free_area[order].free_list[migratetype]);
1464 set_freepage_migratetype(page, migratetype);
1465 page += 1 << order; 1481 page += 1 << order;
1466 pages_moved += 1 << order; 1482 pages_moved += 1 << order;
1467 } 1483 }
@@ -1631,14 +1647,13 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1631 expand(zone, page, order, current_order, area, 1647 expand(zone, page, order, current_order, area,
1632 start_migratetype); 1648 start_migratetype);
1633 /* 1649 /*
1634 * The freepage_migratetype may differ from pageblock's 1650 * The pcppage_migratetype may differ from pageblock's
1635 * migratetype depending on the decisions in 1651 * migratetype depending on the decisions in
1636 * try_to_steal_freepages(). This is OK as long as it 1652 * find_suitable_fallback(). This is OK as long as it does not
1637 * does not differ for MIGRATE_CMA pageblocks. For CMA 1653 * differ for MIGRATE_CMA pageblocks. Those can be used as
1638 * we need to make sure unallocated pages flushed from 1654 * fallback only via special __rmqueue_cma_fallback() function
1639 * pcp lists are returned to the correct freelist.
1640 */ 1655 */
1641 set_freepage_migratetype(page, start_migratetype); 1656 set_pcppage_migratetype(page, start_migratetype);
1642 1657
1643 trace_mm_page_alloc_extfrag(page, order, current_order, 1658 trace_mm_page_alloc_extfrag(page, order, current_order,
1644 start_migratetype, fallback_mt); 1659 start_migratetype, fallback_mt);
@@ -1714,7 +1729,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1714 else 1729 else
1715 list_add_tail(&page->lru, list); 1730 list_add_tail(&page->lru, list);
1716 list = &page->lru; 1731 list = &page->lru;
1717 if (is_migrate_cma(get_freepage_migratetype(page))) 1732 if (is_migrate_cma(get_pcppage_migratetype(page)))
1718 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1733 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1719 -(1 << order)); 1734 -(1 << order));
1720 } 1735 }
@@ -1911,7 +1926,7 @@ void free_hot_cold_page(struct page *page, bool cold)
1911 return; 1926 return;
1912 1927
1913 migratetype = get_pfnblock_migratetype(page, pfn); 1928 migratetype = get_pfnblock_migratetype(page, pfn);
1914 set_freepage_migratetype(page, migratetype); 1929 set_pcppage_migratetype(page, migratetype);
1915 local_irq_save(flags); 1930 local_irq_save(flags);
1916 __count_vm_event(PGFREE); 1931 __count_vm_event(PGFREE);
1917 1932
@@ -2116,7 +2131,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
2116 if (!page) 2131 if (!page)
2117 goto failed; 2132 goto failed;
2118 __mod_zone_freepage_state(zone, -(1 << order), 2133 __mod_zone_freepage_state(zone, -(1 << order),
2119 get_freepage_migratetype(page)); 2134 get_pcppage_migratetype(page));
2120 } 2135 }
2121 2136
2122 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 2137 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));