diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-09-21 20:01:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:27 -0400 |
commit | 4b02108ac1b3354a22b0d83c684797692efdc395 (patch) | |
tree | 9f65d6e8e35ddce940e7b9da6305cf5a19e5904e /mm | |
parent | c6a7f5728a1db45d30df55a01adc130b4ab0327c (diff) |
mm: oom analysis: add shmem vmstat
Recently we encountered OOM problems due to memory use of the GEM cache.
Generally a large amuont of Shmem/Tmpfs pages tend to create a memory
shortage problem.
We often use the following calculation to determine the amount of shmem
pages:
shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES
however the expression does not consider isolated and mlocked pages.
This patch adds explicit accounting for pages used by shmem and tmpfs.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/migrate.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
4 files changed, 13 insertions, 3 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index dd51c68e2b86..bcc7372aebbc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -119,6 +119,8 @@ void __remove_from_page_cache(struct page *page) | |||
119 | page->mapping = NULL; | 119 | page->mapping = NULL; |
120 | mapping->nrpages--; | 120 | mapping->nrpages--; |
121 | __dec_zone_page_state(page, NR_FILE_PAGES); | 121 | __dec_zone_page_state(page, NR_FILE_PAGES); |
122 | if (PageSwapBacked(page)) | ||
123 | __dec_zone_page_state(page, NR_SHMEM); | ||
122 | BUG_ON(page_mapped(page)); | 124 | BUG_ON(page_mapped(page)); |
123 | 125 | ||
124 | /* | 126 | /* |
@@ -431,6 +433,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
431 | if (likely(!error)) { | 433 | if (likely(!error)) { |
432 | mapping->nrpages++; | 434 | mapping->nrpages++; |
433 | __inc_zone_page_state(page, NR_FILE_PAGES); | 435 | __inc_zone_page_state(page, NR_FILE_PAGES); |
436 | if (PageSwapBacked(page)) | ||
437 | __inc_zone_page_state(page, NR_SHMEM); | ||
434 | spin_unlock_irq(&mapping->tree_lock); | 438 | spin_unlock_irq(&mapping->tree_lock); |
435 | } else { | 439 | } else { |
436 | page->mapping = NULL; | 440 | page->mapping = NULL; |
diff --git a/mm/migrate.c b/mm/migrate.c index 0edeac91348d..37143b924484 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -312,7 +312,10 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
312 | */ | 312 | */ |
313 | __dec_zone_page_state(page, NR_FILE_PAGES); | 313 | __dec_zone_page_state(page, NR_FILE_PAGES); |
314 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 314 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
315 | 315 | if (PageSwapBacked(page)) { | |
316 | __dec_zone_page_state(page, NR_SHMEM); | ||
317 | __inc_zone_page_state(newpage, NR_SHMEM); | ||
318 | } | ||
316 | spin_unlock_irq(&mapping->tree_lock); | 319 | spin_unlock_irq(&mapping->tree_lock); |
317 | 320 | ||
318 | return 0; | 321 | return 0; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e050f325ebd..e50c22545b8f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2139,7 +2139,7 @@ void show_free_areas(void) | |||
2139 | " unevictable:%lu" | 2139 | " unevictable:%lu" |
2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" | 2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" |
2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" | 2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
2142 | " mapped:%lu pagetables:%lu bounce:%lu\n", | 2142 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", |
2143 | global_page_state(NR_ACTIVE_ANON), | 2143 | global_page_state(NR_ACTIVE_ANON), |
2144 | global_page_state(NR_ACTIVE_FILE), | 2144 | global_page_state(NR_ACTIVE_FILE), |
2145 | global_page_state(NR_INACTIVE_ANON), | 2145 | global_page_state(NR_INACTIVE_ANON), |
@@ -2153,6 +2153,7 @@ void show_free_areas(void) | |||
2153 | global_page_state(NR_SLAB_RECLAIMABLE), | 2153 | global_page_state(NR_SLAB_RECLAIMABLE), |
2154 | global_page_state(NR_SLAB_UNRECLAIMABLE), | 2154 | global_page_state(NR_SLAB_UNRECLAIMABLE), |
2155 | global_page_state(NR_FILE_MAPPED), | 2155 | global_page_state(NR_FILE_MAPPED), |
2156 | global_page_state(NR_SHMEM), | ||
2156 | global_page_state(NR_PAGETABLE), | 2157 | global_page_state(NR_PAGETABLE), |
2157 | global_page_state(NR_BOUNCE)); | 2158 | global_page_state(NR_BOUNCE)); |
2158 | 2159 | ||
@@ -2175,6 +2176,7 @@ void show_free_areas(void) | |||
2175 | " dirty:%lukB" | 2176 | " dirty:%lukB" |
2176 | " writeback:%lukB" | 2177 | " writeback:%lukB" |
2177 | " mapped:%lukB" | 2178 | " mapped:%lukB" |
2179 | " shmem:%lukB" | ||
2178 | " slab_reclaimable:%lukB" | 2180 | " slab_reclaimable:%lukB" |
2179 | " slab_unreclaimable:%lukB" | 2181 | " slab_unreclaimable:%lukB" |
2180 | " kernel_stack:%lukB" | 2182 | " kernel_stack:%lukB" |
@@ -2200,6 +2202,7 @@ void show_free_areas(void) | |||
2200 | K(zone_page_state(zone, NR_FILE_DIRTY)), | 2202 | K(zone_page_state(zone, NR_FILE_DIRTY)), |
2201 | K(zone_page_state(zone, NR_WRITEBACK)), | 2203 | K(zone_page_state(zone, NR_WRITEBACK)), |
2202 | K(zone_page_state(zone, NR_FILE_MAPPED)), | 2204 | K(zone_page_state(zone, NR_FILE_MAPPED)), |
2205 | K(zone_page_state(zone, NR_SHMEM)), | ||
2203 | K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), | 2206 | K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), |
2204 | K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), | 2207 | K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), |
2205 | zone_page_state(zone, NR_KERNEL_STACK) * | 2208 | zone_page_state(zone, NR_KERNEL_STACK) * |
diff --git a/mm/vmstat.c b/mm/vmstat.c index ceda39b63d7e..7214a4511257 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -644,7 +644,7 @@ static const char * const vmstat_text[] = { | |||
644 | "nr_bounce", | 644 | "nr_bounce", |
645 | "nr_vmscan_write", | 645 | "nr_vmscan_write", |
646 | "nr_writeback_temp", | 646 | "nr_writeback_temp", |
647 | 647 | "nr_shmem", | |
648 | #ifdef CONFIG_NUMA | 648 | #ifdef CONFIG_NUMA |
649 | "numa_hit", | 649 | "numa_hit", |
650 | "numa_miss", | 650 | "numa_miss", |