aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-08 19:32:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:45 -0400
commitb12c4ad14ee0232ad47c2bef404b6d42a3578332 (patch)
tree9fc0d3fa799b7aef83f824eb538f0b75c3af0683 /mm/page_alloc.c
parentd95ea5d18e699515468368415c93ed49b1a3221b (diff)
mm: page_alloc: use get_freepage_migratetype() instead of page_private()
The page allocator uses set_page_private and page_private for handling migratetype when it frees page. Let's replace them with [set|get] _freepage_migratetype to make it more clear. Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f2c7cc6a3039..6aa0a8e89c5d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -674,7 +674,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
674 page = list_entry(list->prev, struct page, lru); 674 page = list_entry(list->prev, struct page, lru);
675 /* must delete as __free_one_page list manipulates */ 675 /* must delete as __free_one_page list manipulates */
676 list_del(&page->lru); 676 list_del(&page->lru);
677 mt = page_private(page); 677 mt = get_freepage_migratetype(page);
678 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 678 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
679 __free_one_page(page, zone, 0, mt); 679 __free_one_page(page, zone, 0, mt);
680 trace_mm_page_pcpu_drain(page, 0, mt); 680 trace_mm_page_pcpu_drain(page, 0, mt);
@@ -1143,7 +1143,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1143 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE) 1143 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1144 mt = migratetype; 1144 mt = migratetype;
1145 } 1145 }
1146 set_page_private(page, mt); 1146 set_freepage_migratetype(page, mt);
1147 list = &page->lru; 1147 list = &page->lru;
1148 if (is_migrate_cma(mt)) 1148 if (is_migrate_cma(mt))
1149 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1149 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
@@ -1313,7 +1313,7 @@ void free_hot_cold_page(struct page *page, int cold)
1313 return; 1313 return;
1314 1314
1315 migratetype = get_pageblock_migratetype(page); 1315 migratetype = get_pageblock_migratetype(page);
1316 set_page_private(page, migratetype); 1316 set_freepage_migratetype(page, migratetype);
1317 local_irq_save(flags); 1317 local_irq_save(flags);
1318 if (unlikely(wasMlocked)) 1318 if (unlikely(wasMlocked))
1319 free_page_mlock(page); 1319 free_page_mlock(page);