aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-11-13 18:19:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-11-13 19:17:05 -0500
commit8f82b55dd558a74fc33d69a1f2c2605d0cd2c908 (patch)
tree81fccad1e4c19eb9aa121b998cec036da09d17ba /mm
parent51bb1a4093cc68bc16b282548d9cee6104be0ef1 (diff)
mm/page_alloc: move freepage counting logic to __free_one_page()
All the caller of __free_one_page() has similar freepage counting logic, so we can move it to __free_one_page(). This reduce line of code and help future maintenance. This is also preparation step for "mm/page_alloc: restrict max order of merging on isolated pageblock" which fix the freepage counting problem on freepage with more than pageblock order. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Heesub Shin <heesub.shin@samsung.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Ritesh Harjani <ritesh.list@gmail.com> Cc: Gioh Kim <gioh.kim@lge.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 58923bea0d8b..9f689f16b5aa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -577,6 +577,8 @@ static inline void __free_one_page(struct page *page,
577 return; 577 return;
578 578
579 VM_BUG_ON(migratetype == -1); 579 VM_BUG_ON(migratetype == -1);
580 if (!is_migrate_isolate(migratetype))
581 __mod_zone_freepage_state(zone, 1 << order, migratetype);
580 582
581 page_idx = pfn & ((1 << MAX_ORDER) - 1); 583 page_idx = pfn & ((1 << MAX_ORDER) - 1);
582 584
@@ -715,14 +717,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
715 /* must delete as __free_one_page list manipulates */ 717 /* must delete as __free_one_page list manipulates */
716 list_del(&page->lru); 718 list_del(&page->lru);
717 mt = get_freepage_migratetype(page); 719 mt = get_freepage_migratetype(page);
718 if (unlikely(has_isolate_pageblock(zone))) { 720 if (unlikely(has_isolate_pageblock(zone)))
719 mt = get_pageblock_migratetype(page); 721 mt = get_pageblock_migratetype(page);
720 if (is_migrate_isolate(mt))
721 goto skip_counting;
722 }
723 __mod_zone_freepage_state(zone, 1, mt);
724 722
725skip_counting:
726 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 723 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
727 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 724 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
728 trace_mm_page_pcpu_drain(page, 0, mt); 725 trace_mm_page_pcpu_drain(page, 0, mt);
@@ -745,12 +742,7 @@ static void free_one_page(struct zone *zone,
745 if (unlikely(has_isolate_pageblock(zone) || 742 if (unlikely(has_isolate_pageblock(zone) ||
746 is_migrate_isolate(migratetype))) { 743 is_migrate_isolate(migratetype))) {
747 migratetype = get_pfnblock_migratetype(page, pfn); 744 migratetype = get_pfnblock_migratetype(page, pfn);
748 if (is_migrate_isolate(migratetype))
749 goto skip_counting;
750 } 745 }
751 __mod_zone_freepage_state(zone, 1 << order, migratetype);
752
753skip_counting:
754 __free_one_page(page, pfn, zone, order, migratetype); 746 __free_one_page(page, pfn, zone, order, migratetype);
755 spin_unlock(&zone->lock); 747 spin_unlock(&zone->lock);
756} 748}