diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-09-21 20:01:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:27 -0400 |
commit | 4b02108ac1b3354a22b0d83c684797692efdc395 (patch) | |
tree | 9f65d6e8e35ddce940e7b9da6305cf5a19e5904e /mm/page_alloc.c | |
parent | c6a7f5728a1db45d30df55a01adc130b4ab0327c (diff) |
mm: oom analysis: add shmem vmstat
Recently we encountered OOM problems due to memory use of the GEM cache.
Generally a large amuont of Shmem/Tmpfs pages tend to create a memory
shortage problem.
We often use the following calculation to determine the amount of shmem
pages:
shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES
however the expression does not consider isolated and mlocked pages.
This patch adds explicit accounting for pages used by shmem and tmpfs.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e050f325ebd..e50c22545b8f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2139,7 +2139,7 @@ void show_free_areas(void) | |||
2139 | " unevictable:%lu" | 2139 | " unevictable:%lu" |
2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" | 2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" |
2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" | 2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
2142 | " mapped:%lu pagetables:%lu bounce:%lu\n", | 2142 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", |
2143 | global_page_state(NR_ACTIVE_ANON), | 2143 | global_page_state(NR_ACTIVE_ANON), |
2144 | global_page_state(NR_ACTIVE_FILE), | 2144 | global_page_state(NR_ACTIVE_FILE), |
2145 | global_page_state(NR_INACTIVE_ANON), | 2145 | global_page_state(NR_INACTIVE_ANON), |
@@ -2153,6 +2153,7 @@ void show_free_areas(void) | |||
2153 | global_page_state(NR_SLAB_RECLAIMABLE), | 2153 | global_page_state(NR_SLAB_RECLAIMABLE), |
2154 | global_page_state(NR_SLAB_UNRECLAIMABLE), | 2154 | global_page_state(NR_SLAB_UNRECLAIMABLE), |
2155 | global_page_state(NR_FILE_MAPPED), | 2155 | global_page_state(NR_FILE_MAPPED), |
2156 | global_page_state(NR_SHMEM), | ||
2156 | global_page_state(NR_PAGETABLE), | 2157 | global_page_state(NR_PAGETABLE), |
2157 | global_page_state(NR_BOUNCE)); | 2158 | global_page_state(NR_BOUNCE)); |
2158 | 2159 | ||
@@ -2175,6 +2176,7 @@ void show_free_areas(void) | |||
2175 | " dirty:%lukB" | 2176 | " dirty:%lukB" |
2176 | " writeback:%lukB" | 2177 | " writeback:%lukB" |
2177 | " mapped:%lukB" | 2178 | " mapped:%lukB" |
2179 | " shmem:%lukB" | ||
2178 | " slab_reclaimable:%lukB" | 2180 | " slab_reclaimable:%lukB" |
2179 | " slab_unreclaimable:%lukB" | 2181 | " slab_unreclaimable:%lukB" |
2180 | " kernel_stack:%lukB" | 2182 | " kernel_stack:%lukB" |
@@ -2200,6 +2202,7 @@ void show_free_areas(void) | |||
2200 | K(zone_page_state(zone, NR_FILE_DIRTY)), | 2202 | K(zone_page_state(zone, NR_FILE_DIRTY)), |
2201 | K(zone_page_state(zone, NR_WRITEBACK)), | 2203 | K(zone_page_state(zone, NR_WRITEBACK)), |
2202 | K(zone_page_state(zone, NR_FILE_MAPPED)), | 2204 | K(zone_page_state(zone, NR_FILE_MAPPED)), |
2205 | K(zone_page_state(zone, NR_SHMEM)), | ||
2203 | K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), | 2206 | K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), |
2204 | K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), | 2207 | K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), |
2205 | zone_page_state(zone, NR_KERNEL_STACK) * | 2208 | zone_page_state(zone, NR_KERNEL_STACK) * |