diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2012-10-08 19:32:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:22:44 -0400 |
commit | d1ce749a0db12202b711d1aba1d29e823034648d (patch) | |
tree | b9b1f0e1d4fcda9ab900575f42f5ddc155d28648 | |
parent | 2139cbe627b8910ded55148f87ee10f7485408ed (diff) |
cma: count free CMA pages
Add NR_FREE_CMA_PAGES counter to be later used for checking watermark in
__zone_watermark_ok(). For simplicity and to avoid #ifdef hell make this
counter always available (not only when CONFIG_CMA=y).
[akpm@linux-foundation.org: use conventional migratetype naming]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mmzone.h | 1 | ||||
-rw-r--r-- | include/linux/vmstat.h | 8 | ||||
-rw-r--r-- | mm/page_alloc.c | 26 | ||||
-rw-r--r-- | mm/page_isolation.c | 5 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
5 files changed, 32 insertions, 9 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2daa54f55db7..85ac67aa5770 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -142,6 +142,7 @@ enum zone_stat_item { | |||
142 | NUMA_OTHER, /* allocation from other node */ | 142 | NUMA_OTHER, /* allocation from other node */ |
143 | #endif | 143 | #endif |
144 | NR_ANON_TRANSPARENT_HUGEPAGES, | 144 | NR_ANON_TRANSPARENT_HUGEPAGES, |
145 | NR_FREE_CMA_PAGES, | ||
145 | NR_VM_ZONE_STAT_ITEMS }; | 146 | NR_VM_ZONE_STAT_ITEMS }; |
146 | 147 | ||
147 | /* | 148 | /* |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index ad2cfd53dadc..a5bb15018b5c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -253,6 +253,14 @@ static inline void refresh_zone_stat_thresholds(void) { } | |||
253 | 253 | ||
254 | #endif /* CONFIG_SMP */ | 254 | #endif /* CONFIG_SMP */ |
255 | 255 | ||
256 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, | ||
257 | int migratetype) | ||
258 | { | ||
259 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); | ||
260 | if (is_migrate_cma(migratetype)) | ||
261 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); | ||
262 | } | ||
263 | |||
256 | extern const char * const vmstat_text[]; | 264 | extern const char * const vmstat_text[]; |
257 | 265 | ||
258 | #endif /* _LINUX_VMSTAT_H */ | 266 | #endif /* _LINUX_VMSTAT_H */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d259cc2b69c1..6969a8abdba2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -558,7 +558,8 @@ static inline void __free_one_page(struct page *page, | |||
558 | if (page_is_guard(buddy)) { | 558 | if (page_is_guard(buddy)) { |
559 | clear_page_guard_flag(buddy); | 559 | clear_page_guard_flag(buddy); |
560 | set_page_private(page, 0); | 560 | set_page_private(page, 0); |
561 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | 561 | __mod_zone_freepage_state(zone, 1 << order, |
562 | migratetype); | ||
562 | } else { | 563 | } else { |
563 | list_del(&buddy->lru); | 564 | list_del(&buddy->lru); |
564 | zone->free_area[order].nr_free--; | 565 | zone->free_area[order].nr_free--; |
@@ -677,6 +678,8 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
677 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ | 678 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
678 | __free_one_page(page, zone, 0, mt); | 679 | __free_one_page(page, zone, 0, mt); |
679 | trace_mm_page_pcpu_drain(page, 0, mt); | 680 | trace_mm_page_pcpu_drain(page, 0, mt); |
681 | if (is_migrate_cma(mt)) | ||
682 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); | ||
680 | } while (--to_free && --batch_free && !list_empty(list)); | 683 | } while (--to_free && --batch_free && !list_empty(list)); |
681 | } | 684 | } |
682 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 685 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); |
@@ -692,7 +695,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
692 | 695 | ||
693 | __free_one_page(page, zone, order, migratetype); | 696 | __free_one_page(page, zone, order, migratetype); |
694 | if (unlikely(migratetype != MIGRATE_ISOLATE)) | 697 | if (unlikely(migratetype != MIGRATE_ISOLATE)) |
695 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | 698 | __mod_zone_freepage_state(zone, 1 << order, migratetype); |
696 | spin_unlock(&zone->lock); | 699 | spin_unlock(&zone->lock); |
697 | } | 700 | } |
698 | 701 | ||
@@ -815,7 +818,8 @@ static inline void expand(struct zone *zone, struct page *page, | |||
815 | set_page_guard_flag(&page[size]); | 818 | set_page_guard_flag(&page[size]); |
816 | set_page_private(&page[size], high); | 819 | set_page_private(&page[size], high); |
817 | /* Guard pages are not available for any usage */ | 820 | /* Guard pages are not available for any usage */ |
818 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high)); | 821 | __mod_zone_freepage_state(zone, -(1 << high), |
822 | migratetype); | ||
819 | continue; | 823 | continue; |
820 | } | 824 | } |
821 | #endif | 825 | #endif |
@@ -1141,6 +1145,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
1141 | } | 1145 | } |
1142 | set_page_private(page, mt); | 1146 | set_page_private(page, mt); |
1143 | list = &page->lru; | 1147 | list = &page->lru; |
1148 | if (is_migrate_cma(mt)) | ||
1149 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, | ||
1150 | -(1 << order)); | ||
1144 | } | 1151 | } |
1145 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); | 1152 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); |
1146 | spin_unlock(&zone->lock); | 1153 | spin_unlock(&zone->lock); |
@@ -1412,7 +1419,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1412 | 1419 | ||
1413 | mt = get_pageblock_migratetype(page); | 1420 | mt = get_pageblock_migratetype(page); |
1414 | if (unlikely(mt != MIGRATE_ISOLATE)) | 1421 | if (unlikely(mt != MIGRATE_ISOLATE)) |
1415 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); | 1422 | __mod_zone_freepage_state(zone, -(1UL << order), mt); |
1416 | 1423 | ||
1417 | if (alloc_order != order) | 1424 | if (alloc_order != order) |
1418 | expand(zone, page, alloc_order, order, | 1425 | expand(zone, page, alloc_order, order, |
@@ -1516,7 +1523,8 @@ again: | |||
1516 | spin_unlock(&zone->lock); | 1523 | spin_unlock(&zone->lock); |
1517 | if (!page) | 1524 | if (!page) |
1518 | goto failed; | 1525 | goto failed; |
1519 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); | 1526 | __mod_zone_freepage_state(zone, -(1 << order), |
1527 | get_pageblock_migratetype(page)); | ||
1520 | } | 1528 | } |
1521 | 1529 | ||
1522 | __count_zone_vm_events(PGALLOC, zone, 1 << order); | 1530 | __count_zone_vm_events(PGALLOC, zone, 1 << order); |
@@ -2890,7 +2898,8 @@ void show_free_areas(unsigned int filter) | |||
2890 | " unevictable:%lu" | 2898 | " unevictable:%lu" |
2891 | " dirty:%lu writeback:%lu unstable:%lu\n" | 2899 | " dirty:%lu writeback:%lu unstable:%lu\n" |
2892 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" | 2900 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
2893 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", | 2901 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" |
2902 | " free_cma:%lu\n", | ||
2894 | global_page_state(NR_ACTIVE_ANON), | 2903 | global_page_state(NR_ACTIVE_ANON), |
2895 | global_page_state(NR_INACTIVE_ANON), | 2904 | global_page_state(NR_INACTIVE_ANON), |
2896 | global_page_state(NR_ISOLATED_ANON), | 2905 | global_page_state(NR_ISOLATED_ANON), |
@@ -2907,7 +2916,8 @@ void show_free_areas(unsigned int filter) | |||
2907 | global_page_state(NR_FILE_MAPPED), | 2916 | global_page_state(NR_FILE_MAPPED), |
2908 | global_page_state(NR_SHMEM), | 2917 | global_page_state(NR_SHMEM), |
2909 | global_page_state(NR_PAGETABLE), | 2918 | global_page_state(NR_PAGETABLE), |
2910 | global_page_state(NR_BOUNCE)); | 2919 | global_page_state(NR_BOUNCE), |
2920 | global_page_state(NR_FREE_CMA_PAGES)); | ||
2911 | 2921 | ||
2912 | for_each_populated_zone(zone) { | 2922 | for_each_populated_zone(zone) { |
2913 | int i; | 2923 | int i; |
@@ -2939,6 +2949,7 @@ void show_free_areas(unsigned int filter) | |||
2939 | " pagetables:%lukB" | 2949 | " pagetables:%lukB" |
2940 | " unstable:%lukB" | 2950 | " unstable:%lukB" |
2941 | " bounce:%lukB" | 2951 | " bounce:%lukB" |
2952 | " free_cma:%lukB" | ||
2942 | " writeback_tmp:%lukB" | 2953 | " writeback_tmp:%lukB" |
2943 | " pages_scanned:%lu" | 2954 | " pages_scanned:%lu" |
2944 | " all_unreclaimable? %s" | 2955 | " all_unreclaimable? %s" |
@@ -2968,6 +2979,7 @@ void show_free_areas(unsigned int filter) | |||
2968 | K(zone_page_state(zone, NR_PAGETABLE)), | 2979 | K(zone_page_state(zone, NR_PAGETABLE)), |
2969 | K(zone_page_state(zone, NR_UNSTABLE_NFS)), | 2980 | K(zone_page_state(zone, NR_UNSTABLE_NFS)), |
2970 | K(zone_page_state(zone, NR_BOUNCE)), | 2981 | K(zone_page_state(zone, NR_BOUNCE)), |
2982 | K(zone_page_state(zone, NR_FREE_CMA_PAGES)), | ||
2971 | K(zone_page_state(zone, NR_WRITEBACK_TEMP)), | 2983 | K(zone_page_state(zone, NR_WRITEBACK_TEMP)), |
2972 | zone->pages_scanned, | 2984 | zone->pages_scanned, |
2973 | (zone->all_unreclaimable ? "yes" : "no") | 2985 | (zone->all_unreclaimable ? "yes" : "no") |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 3ca1716471bc..345643b85bd4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -77,11 +77,12 @@ int set_migratetype_isolate(struct page *page) | |||
77 | out: | 77 | out: |
78 | if (!ret) { | 78 | if (!ret) { |
79 | unsigned long nr_pages; | 79 | unsigned long nr_pages; |
80 | int migratetype = get_pageblock_migratetype(page); | ||
80 | 81 | ||
81 | set_pageblock_isolate(page); | 82 | set_pageblock_isolate(page); |
82 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); | 83 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
83 | 84 | ||
84 | __mod_zone_page_state(zone, NR_FREE_PAGES, -nr_pages); | 85 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
85 | } | 86 | } |
86 | 87 | ||
87 | spin_unlock_irqrestore(&zone->lock, flags); | 88 | spin_unlock_irqrestore(&zone->lock, flags); |
@@ -100,7 +101,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |||
100 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) | 101 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
101 | goto out; | 102 | goto out; |
102 | nr_pages = move_freepages_block(zone, page, migratetype); | 103 | nr_pages = move_freepages_block(zone, page, migratetype); |
103 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); | 104 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
104 | restore_pageblock_isolate(page, migratetype); | 105 | restore_pageblock_isolate(page, migratetype); |
105 | out: | 106 | out: |
106 | spin_unlock_irqrestore(&zone->lock, flags); | 107 | spin_unlock_irqrestore(&zone->lock, flags); |
diff --git a/mm/vmstat.c b/mm/vmstat.c index b3e3b9d525d0..acbd85c983e6 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -722,6 +722,7 @@ const char * const vmstat_text[] = { | |||
722 | "numa_other", | 722 | "numa_other", |
723 | #endif | 723 | #endif |
724 | "nr_anon_transparent_hugepages", | 724 | "nr_anon_transparent_hugepages", |
725 | "nr_free_cma", | ||
725 | "nr_dirty_threshold", | 726 | "nr_dirty_threshold", |
726 | "nr_dirty_background_threshold", | 727 | "nr_dirty_background_threshold", |
727 | 728 | ||