diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-09-21 20:01:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:29 -0400 |
commit | a731286de62294b63d8ceb3c5914ac52cc17e690 (patch) | |
tree | c321e14500ec264e37fd103ffa71c7b133088010 /mm | |
parent | b35ea17b7bbf5dea35faa0de11030acc620c3197 (diff) |
mm: vmstat: add isolate pages
If the system is running a heavy load of processes then concurrent reclaim
can isolate a large number of pages from the LRU. /proc/vmstat and the
output generated for an OOM do not show how many pages were isolated.
This has been observed during process fork bomb testing (mstctl11 in LTP).
This patch shows the information about isolated pages.
Reproduced via:
-----------------------
% ./hackbench 140 process 1000
=> OOM occur
active_anon:146 inactive_anon:0 isolated_anon:49245
active_file:79 inactive_file:18 isolated_file:113
unevictable:0 dirty:0 writeback:0 unstable:0 buffer:39
free:370 slab_reclaimable:309 slab_unreclaimable:5492
mapped:53 shmem:15 pagetables:28140 bounce:0
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Wu Fengguang <fengguang.wu@intel.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/migrate.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 12 | ||||
-rw-r--r-- | mm/vmscan.c | 12 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
4 files changed, 33 insertions, 4 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 37143b924484..b535a2c1656c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l) | |||
67 | 67 | ||
68 | list_for_each_entry_safe(page, page2, l, lru) { | 68 | list_for_each_entry_safe(page, page2, l, lru) { |
69 | list_del(&page->lru); | 69 | list_del(&page->lru); |
70 | dec_zone_page_state(page, NR_ISOLATED_ANON + | ||
71 | !!page_is_file_cache(page)); | ||
70 | putback_lru_page(page); | 72 | putback_lru_page(page); |
71 | count++; | 73 | count++; |
72 | } | 74 | } |
@@ -698,6 +700,8 @@ unlock: | |||
698 | * restored. | 700 | * restored. |
699 | */ | 701 | */ |
700 | list_del(&page->lru); | 702 | list_del(&page->lru); |
703 | dec_zone_page_state(page, NR_ISOLATED_ANON + | ||
704 | !!page_is_file_cache(page)); | ||
701 | putback_lru_page(page); | 705 | putback_lru_page(page); |
702 | } | 706 | } |
703 | 707 | ||
@@ -742,6 +746,13 @@ int migrate_pages(struct list_head *from, | |||
742 | struct page *page2; | 746 | struct page *page2; |
743 | int swapwrite = current->flags & PF_SWAPWRITE; | 747 | int swapwrite = current->flags & PF_SWAPWRITE; |
744 | int rc; | 748 | int rc; |
749 | unsigned long flags; | ||
750 | |||
751 | local_irq_save(flags); | ||
752 | list_for_each_entry(page, from, lru) | ||
753 | __inc_zone_page_state(page, NR_ISOLATED_ANON + | ||
754 | !!page_is_file_cache(page)); | ||
755 | local_irq_restore(flags); | ||
745 | 756 | ||
746 | if (!swapwrite) | 757 | if (!swapwrite) |
747 | current->flags |= PF_SWAPWRITE; | 758 | current->flags |= PF_SWAPWRITE; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b6d0d09557ef..afda8fd16484 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2134,16 +2134,18 @@ void show_free_areas(void) | |||
2134 | } | 2134 | } |
2135 | } | 2135 | } |
2136 | 2136 | ||
2137 | printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" | 2137 | printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" |
2138 | " inactive_file:%lu" | 2138 | " active_file:%lu inactive_file:%lu isolated_file:%lu\n" |
2139 | " unevictable:%lu" | 2139 | " unevictable:%lu" |
2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" | 2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" |
2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" | 2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
2142 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", | 2142 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", |
2143 | global_page_state(NR_ACTIVE_ANON), | 2143 | global_page_state(NR_ACTIVE_ANON), |
2144 | global_page_state(NR_ACTIVE_FILE), | ||
2145 | global_page_state(NR_INACTIVE_ANON), | 2144 | global_page_state(NR_INACTIVE_ANON), |
2145 | global_page_state(NR_ISOLATED_ANON), | ||
2146 | global_page_state(NR_ACTIVE_FILE), | ||
2146 | global_page_state(NR_INACTIVE_FILE), | 2147 | global_page_state(NR_INACTIVE_FILE), |
2148 | global_page_state(NR_ISOLATED_FILE), | ||
2147 | global_page_state(NR_UNEVICTABLE), | 2149 | global_page_state(NR_UNEVICTABLE), |
2148 | global_page_state(NR_FILE_DIRTY), | 2150 | global_page_state(NR_FILE_DIRTY), |
2149 | global_page_state(NR_WRITEBACK), | 2151 | global_page_state(NR_WRITEBACK), |
@@ -2171,6 +2173,8 @@ void show_free_areas(void) | |||
2171 | " active_file:%lukB" | 2173 | " active_file:%lukB" |
2172 | " inactive_file:%lukB" | 2174 | " inactive_file:%lukB" |
2173 | " unevictable:%lukB" | 2175 | " unevictable:%lukB" |
2176 | " isolated(anon):%lukB" | ||
2177 | " isolated(file):%lukB" | ||
2174 | " present:%lukB" | 2178 | " present:%lukB" |
2175 | " mlocked:%lukB" | 2179 | " mlocked:%lukB" |
2176 | " dirty:%lukB" | 2180 | " dirty:%lukB" |
@@ -2197,6 +2201,8 @@ void show_free_areas(void) | |||
2197 | K(zone_page_state(zone, NR_ACTIVE_FILE)), | 2201 | K(zone_page_state(zone, NR_ACTIVE_FILE)), |
2198 | K(zone_page_state(zone, NR_INACTIVE_FILE)), | 2202 | K(zone_page_state(zone, NR_INACTIVE_FILE)), |
2199 | K(zone_page_state(zone, NR_UNEVICTABLE)), | 2203 | K(zone_page_state(zone, NR_UNEVICTABLE)), |
2204 | K(zone_page_state(zone, NR_ISOLATED_ANON)), | ||
2205 | K(zone_page_state(zone, NR_ISOLATED_FILE)), | ||
2200 | K(zone->present_pages), | 2206 | K(zone->present_pages), |
2201 | K(zone_page_state(zone, NR_MLOCK)), | 2207 | K(zone_page_state(zone, NR_MLOCK)), |
2202 | K(zone_page_state(zone, NR_FILE_DIRTY)), | 2208 | K(zone_page_state(zone, NR_FILE_DIRTY)), |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d86a91f8c16b..75c29974e878 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1072,6 +1072,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1072 | unsigned long nr_active; | 1072 | unsigned long nr_active; |
1073 | unsigned int count[NR_LRU_LISTS] = { 0, }; | 1073 | unsigned int count[NR_LRU_LISTS] = { 0, }; |
1074 | int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; | 1074 | int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; |
1075 | unsigned long nr_anon; | ||
1076 | unsigned long nr_file; | ||
1075 | 1077 | ||
1076 | nr_taken = sc->isolate_pages(sc->swap_cluster_max, | 1078 | nr_taken = sc->isolate_pages(sc->swap_cluster_max, |
1077 | &page_list, &nr_scan, sc->order, mode, | 1079 | &page_list, &nr_scan, sc->order, mode, |
@@ -1102,6 +1104,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1102 | __mod_zone_page_state(zone, NR_INACTIVE_ANON, | 1104 | __mod_zone_page_state(zone, NR_INACTIVE_ANON, |
1103 | -count[LRU_INACTIVE_ANON]); | 1105 | -count[LRU_INACTIVE_ANON]); |
1104 | 1106 | ||
1107 | nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; | ||
1108 | nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; | ||
1109 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); | ||
1110 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); | ||
1105 | 1111 | ||
1106 | reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; | 1112 | reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; |
1107 | reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; | 1113 | reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; |
@@ -1169,6 +1175,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1169 | spin_lock_irq(&zone->lru_lock); | 1175 | spin_lock_irq(&zone->lru_lock); |
1170 | } | 1176 | } |
1171 | } | 1177 | } |
1178 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); | ||
1179 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); | ||
1180 | |||
1172 | } while (nr_scanned < max_scan); | 1181 | } while (nr_scanned < max_scan); |
1173 | 1182 | ||
1174 | done: | 1183 | done: |
@@ -1279,6 +1288,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1279 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); | 1288 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); |
1280 | else | 1289 | else |
1281 | __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); | 1290 | __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); |
1291 | __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); | ||
1282 | spin_unlock_irq(&zone->lru_lock); | 1292 | spin_unlock_irq(&zone->lru_lock); |
1283 | 1293 | ||
1284 | while (!list_empty(&l_hold)) { | 1294 | while (!list_empty(&l_hold)) { |
@@ -1329,7 +1339,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1329 | LRU_ACTIVE + file * LRU_FILE); | 1339 | LRU_ACTIVE + file * LRU_FILE); |
1330 | move_active_pages_to_lru(zone, &l_inactive, | 1340 | move_active_pages_to_lru(zone, &l_inactive, |
1331 | LRU_BASE + file * LRU_FILE); | 1341 | LRU_BASE + file * LRU_FILE); |
1332 | 1342 | __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); | |
1333 | spin_unlock_irq(&zone->lru_lock); | 1343 | spin_unlock_irq(&zone->lru_lock); |
1334 | } | 1344 | } |
1335 | 1345 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 7214a4511257..c81321f9feec 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -644,6 +644,8 @@ static const char * const vmstat_text[] = { | |||
644 | "nr_bounce", | 644 | "nr_bounce", |
645 | "nr_vmscan_write", | 645 | "nr_vmscan_write", |
646 | "nr_writeback_temp", | 646 | "nr_writeback_temp", |
647 | "nr_isolated_anon", | ||
648 | "nr_isolated_file", | ||
647 | "nr_shmem", | 649 | "nr_shmem", |
648 | #ifdef CONFIG_NUMA | 650 | #ifdef CONFIG_NUMA |
649 | "numa_hit", | 651 | "numa_hit", |