aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2012-10-08 19:32:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:44 -0400
commitd1ce749a0db12202b711d1aba1d29e823034648d (patch)
treeb9b1f0e1d4fcda9ab900575f42f5ddc155d28648 /mm/page_alloc.c
parent2139cbe627b8910ded55148f87ee10f7485408ed (diff)
cma: count free CMA pages
Add NR_FREE_CMA_PAGES counter to be later used for checking watermark in __zone_watermark_ok(). For simplicity and to avoid #ifdef hell make this counter always available (not only when CONFIG_CMA=y). [akpm@linux-foundation.org: use conventional migratetype naming] Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d259cc2b69c..6969a8abdba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -558,7 +558,8 @@ static inline void __free_one_page(struct page *page,
558 if (page_is_guard(buddy)) { 558 if (page_is_guard(buddy)) {
559 clear_page_guard_flag(buddy); 559 clear_page_guard_flag(buddy);
560 set_page_private(page, 0); 560 set_page_private(page, 0);
561 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); 561 __mod_zone_freepage_state(zone, 1 << order,
562 migratetype);
562 } else { 563 } else {
563 list_del(&buddy->lru); 564 list_del(&buddy->lru);
564 zone->free_area[order].nr_free--; 565 zone->free_area[order].nr_free--;
@@ -677,6 +678,8 @@ static void free_pcppages_bulk(struct zone *zone, int count,
677 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 678 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
678 __free_one_page(page, zone, 0, mt); 679 __free_one_page(page, zone, 0, mt);
679 trace_mm_page_pcpu_drain(page, 0, mt); 680 trace_mm_page_pcpu_drain(page, 0, mt);
681 if (is_migrate_cma(mt))
682 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
680 } while (--to_free && --batch_free && !list_empty(list)); 683 } while (--to_free && --batch_free && !list_empty(list));
681 } 684 }
682 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 685 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -692,7 +695,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
692 695
693 __free_one_page(page, zone, order, migratetype); 696 __free_one_page(page, zone, order, migratetype);
694 if (unlikely(migratetype != MIGRATE_ISOLATE)) 697 if (unlikely(migratetype != MIGRATE_ISOLATE))
695 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); 698 __mod_zone_freepage_state(zone, 1 << order, migratetype);
696 spin_unlock(&zone->lock); 699 spin_unlock(&zone->lock);
697} 700}
698 701
@@ -815,7 +818,8 @@ static inline void expand(struct zone *zone, struct page *page,
815 set_page_guard_flag(&page[size]); 818 set_page_guard_flag(&page[size]);
816 set_page_private(&page[size], high); 819 set_page_private(&page[size], high);
817 /* Guard pages are not available for any usage */ 820 /* Guard pages are not available for any usage */
818 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high)); 821 __mod_zone_freepage_state(zone, -(1 << high),
822 migratetype);
819 continue; 823 continue;
820 } 824 }
821#endif 825#endif
@@ -1141,6 +1145,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1141 } 1145 }
1142 set_page_private(page, mt); 1146 set_page_private(page, mt);
1143 list = &page->lru; 1147 list = &page->lru;
1148 if (is_migrate_cma(mt))
1149 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1150 -(1 << order));
1144 } 1151 }
1145 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1152 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1146 spin_unlock(&zone->lock); 1153 spin_unlock(&zone->lock);
@@ -1412,7 +1419,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
1412 1419
1413 mt = get_pageblock_migratetype(page); 1420 mt = get_pageblock_migratetype(page);
1414 if (unlikely(mt != MIGRATE_ISOLATE)) 1421 if (unlikely(mt != MIGRATE_ISOLATE))
1415 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); 1422 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1416 1423
1417 if (alloc_order != order) 1424 if (alloc_order != order)
1418 expand(zone, page, alloc_order, order, 1425 expand(zone, page, alloc_order, order,
@@ -1516,7 +1523,8 @@ again:
1516 spin_unlock(&zone->lock); 1523 spin_unlock(&zone->lock);
1517 if (!page) 1524 if (!page)
1518 goto failed; 1525 goto failed;
1519 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); 1526 __mod_zone_freepage_state(zone, -(1 << order),
1527 get_pageblock_migratetype(page));
1520 } 1528 }
1521 1529
1522 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1530 __count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -2890,7 +2898,8 @@ void show_free_areas(unsigned int filter)
2890 " unevictable:%lu" 2898 " unevictable:%lu"
2891 " dirty:%lu writeback:%lu unstable:%lu\n" 2899 " dirty:%lu writeback:%lu unstable:%lu\n"
2892 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" 2900 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2893 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", 2901 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
2902 " free_cma:%lu\n",
2894 global_page_state(NR_ACTIVE_ANON), 2903 global_page_state(NR_ACTIVE_ANON),
2895 global_page_state(NR_INACTIVE_ANON), 2904 global_page_state(NR_INACTIVE_ANON),
2896 global_page_state(NR_ISOLATED_ANON), 2905 global_page_state(NR_ISOLATED_ANON),
@@ -2907,7 +2916,8 @@ void show_free_areas(unsigned int filter)
2907 global_page_state(NR_FILE_MAPPED), 2916 global_page_state(NR_FILE_MAPPED),
2908 global_page_state(NR_SHMEM), 2917 global_page_state(NR_SHMEM),
2909 global_page_state(NR_PAGETABLE), 2918 global_page_state(NR_PAGETABLE),
2910 global_page_state(NR_BOUNCE)); 2919 global_page_state(NR_BOUNCE),
2920 global_page_state(NR_FREE_CMA_PAGES));
2911 2921
2912 for_each_populated_zone(zone) { 2922 for_each_populated_zone(zone) {
2913 int i; 2923 int i;
@@ -2939,6 +2949,7 @@ void show_free_areas(unsigned int filter)
2939 " pagetables:%lukB" 2949 " pagetables:%lukB"
2940 " unstable:%lukB" 2950 " unstable:%lukB"
2941 " bounce:%lukB" 2951 " bounce:%lukB"
2952 " free_cma:%lukB"
2942 " writeback_tmp:%lukB" 2953 " writeback_tmp:%lukB"
2943 " pages_scanned:%lu" 2954 " pages_scanned:%lu"
2944 " all_unreclaimable? %s" 2955 " all_unreclaimable? %s"
@@ -2968,6 +2979,7 @@ void show_free_areas(unsigned int filter)
2968 K(zone_page_state(zone, NR_PAGETABLE)), 2979 K(zone_page_state(zone, NR_PAGETABLE)),
2969 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 2980 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2970 K(zone_page_state(zone, NR_BOUNCE)), 2981 K(zone_page_state(zone, NR_BOUNCE)),
2982 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
2971 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 2983 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2972 zone->pages_scanned, 2984 zone->pages_scanned,
2973 (zone->all_unreclaimable ? "yes" : "no") 2985 (zone->all_unreclaimable ? "yes" : "no")