diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-09-21 20:01:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:27 -0400 |
commit | 4b02108ac1b3354a22b0d83c684797692efdc395 (patch) | |
tree | 9f65d6e8e35ddce940e7b9da6305cf5a19e5904e | |
parent | c6a7f5728a1db45d30df55a01adc130b4ab0327c (diff) |
mm: oom analysis: add shmem vmstat
Recently we encountered OOM problems due to memory use of the GEM cache.
Generally a large amuont of Shmem/Tmpfs pages tend to create a memory
shortage problem.
We often use the following calculation to determine the amount of shmem
pages:
shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES
however the expression does not consider isolated and mlocked pages.
This patch adds explicit accounting for pages used by shmem and tmpfs.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/base/node.c | 2 | ||||
-rw-r--r-- | fs/proc/meminfo.c | 2 | ||||
-rw-r--r-- | include/linux/mmzone.h | 1 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/migrate.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
7 files changed, 18 insertions, 3 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c index b560c17f6d4e..1fe5536d404f 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -85,6 +85,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
85 | "Node %d FilePages: %8lu kB\n" | 85 | "Node %d FilePages: %8lu kB\n" |
86 | "Node %d Mapped: %8lu kB\n" | 86 | "Node %d Mapped: %8lu kB\n" |
87 | "Node %d AnonPages: %8lu kB\n" | 87 | "Node %d AnonPages: %8lu kB\n" |
88 | "Node %d Shmem: %8lu kB\n" | ||
88 | "Node %d KernelStack: %8lu kB\n" | 89 | "Node %d KernelStack: %8lu kB\n" |
89 | "Node %d PageTables: %8lu kB\n" | 90 | "Node %d PageTables: %8lu kB\n" |
90 | "Node %d NFS_Unstable: %8lu kB\n" | 91 | "Node %d NFS_Unstable: %8lu kB\n" |
@@ -117,6 +118,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
117 | nid, K(node_page_state(nid, NR_FILE_PAGES)), | 118 | nid, K(node_page_state(nid, NR_FILE_PAGES)), |
118 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), | 119 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), |
119 | nid, K(node_page_state(nid, NR_ANON_PAGES)), | 120 | nid, K(node_page_state(nid, NR_ANON_PAGES)), |
121 | nid, K(node_page_state(nid, NR_SHMEM)), | ||
120 | nid, node_page_state(nid, NR_KERNEL_STACK) * | 122 | nid, node_page_state(nid, NR_KERNEL_STACK) * |
121 | THREAD_SIZE / 1024, | 123 | THREAD_SIZE / 1024, |
122 | nid, K(node_page_state(nid, NR_PAGETABLE)), | 124 | nid, K(node_page_state(nid, NR_PAGETABLE)), |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 1fc588f430e4..171e052c07b3 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -81,6 +81,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
81 | "Writeback: %8lu kB\n" | 81 | "Writeback: %8lu kB\n" |
82 | "AnonPages: %8lu kB\n" | 82 | "AnonPages: %8lu kB\n" |
83 | "Mapped: %8lu kB\n" | 83 | "Mapped: %8lu kB\n" |
84 | "Shmem: %8lu kB\n" | ||
84 | "Slab: %8lu kB\n" | 85 | "Slab: %8lu kB\n" |
85 | "SReclaimable: %8lu kB\n" | 86 | "SReclaimable: %8lu kB\n" |
86 | "SUnreclaim: %8lu kB\n" | 87 | "SUnreclaim: %8lu kB\n" |
@@ -125,6 +126,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
125 | K(global_page_state(NR_WRITEBACK)), | 126 | K(global_page_state(NR_WRITEBACK)), |
126 | K(global_page_state(NR_ANON_PAGES)), | 127 | K(global_page_state(NR_ANON_PAGES)), |
127 | K(global_page_state(NR_FILE_MAPPED)), | 128 | K(global_page_state(NR_FILE_MAPPED)), |
129 | K(global_page_state(NR_SHMEM)), | ||
128 | K(global_page_state(NR_SLAB_RECLAIMABLE) + | 130 | K(global_page_state(NR_SLAB_RECLAIMABLE) + |
129 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | 131 | global_page_state(NR_SLAB_UNRECLAIMABLE)), |
130 | K(global_page_state(NR_SLAB_RECLAIMABLE)), | 132 | K(global_page_state(NR_SLAB_RECLAIMABLE)), |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d9335b8de84a..b3583b93b77e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -100,6 +100,7 @@ enum zone_stat_item { | |||
100 | NR_BOUNCE, | 100 | NR_BOUNCE, |
101 | NR_VMSCAN_WRITE, | 101 | NR_VMSCAN_WRITE, |
102 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | 102 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
103 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ | ||
103 | #ifdef CONFIG_NUMA | 104 | #ifdef CONFIG_NUMA |
104 | NUMA_HIT, /* allocated in intended node */ | 105 | NUMA_HIT, /* allocated in intended node */ |
105 | NUMA_MISS, /* allocated in non intended node */ | 106 | NUMA_MISS, /* allocated in non intended node */ |
diff --git a/mm/filemap.c b/mm/filemap.c index dd51c68e2b86..bcc7372aebbc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -119,6 +119,8 @@ void __remove_from_page_cache(struct page *page) | |||
119 | page->mapping = NULL; | 119 | page->mapping = NULL; |
120 | mapping->nrpages--; | 120 | mapping->nrpages--; |
121 | __dec_zone_page_state(page, NR_FILE_PAGES); | 121 | __dec_zone_page_state(page, NR_FILE_PAGES); |
122 | if (PageSwapBacked(page)) | ||
123 | __dec_zone_page_state(page, NR_SHMEM); | ||
122 | BUG_ON(page_mapped(page)); | 124 | BUG_ON(page_mapped(page)); |
123 | 125 | ||
124 | /* | 126 | /* |
@@ -431,6 +433,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
431 | if (likely(!error)) { | 433 | if (likely(!error)) { |
432 | mapping->nrpages++; | 434 | mapping->nrpages++; |
433 | __inc_zone_page_state(page, NR_FILE_PAGES); | 435 | __inc_zone_page_state(page, NR_FILE_PAGES); |
436 | if (PageSwapBacked(page)) | ||
437 | __inc_zone_page_state(page, NR_SHMEM); | ||
434 | spin_unlock_irq(&mapping->tree_lock); | 438 | spin_unlock_irq(&mapping->tree_lock); |
435 | } else { | 439 | } else { |
436 | page->mapping = NULL; | 440 | page->mapping = NULL; |
diff --git a/mm/migrate.c b/mm/migrate.c index 0edeac91348d..37143b924484 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -312,7 +312,10 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
312 | */ | 312 | */ |
313 | __dec_zone_page_state(page, NR_FILE_PAGES); | 313 | __dec_zone_page_state(page, NR_FILE_PAGES); |
314 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 314 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
315 | 315 | if (PageSwapBacked(page)) { | |
316 | __dec_zone_page_state(page, NR_SHMEM); | ||
317 | __inc_zone_page_state(newpage, NR_SHMEM); | ||
318 | } | ||
316 | spin_unlock_irq(&mapping->tree_lock); | 319 | spin_unlock_irq(&mapping->tree_lock); |
317 | 320 | ||
318 | return 0; | 321 | return 0; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e050f325ebd..e50c22545b8f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2139,7 +2139,7 @@ void show_free_areas(void) | |||
2139 | " unevictable:%lu" | 2139 | " unevictable:%lu" |
2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" | 2140 | " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" |
2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" | 2141 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
2142 | " mapped:%lu pagetables:%lu bounce:%lu\n", | 2142 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", |
2143 | global_page_state(NR_ACTIVE_ANON), | 2143 | global_page_state(NR_ACTIVE_ANON), |
2144 | global_page_state(NR_ACTIVE_FILE), | 2144 | global_page_state(NR_ACTIVE_FILE), |
2145 | global_page_state(NR_INACTIVE_ANON), | 2145 | global_page_state(NR_INACTIVE_ANON), |
@@ -2153,6 +2153,7 @@ void show_free_areas(void) | |||
2153 | global_page_state(NR_SLAB_RECLAIMABLE), | 2153 | global_page_state(NR_SLAB_RECLAIMABLE), |
2154 | global_page_state(NR_SLAB_UNRECLAIMABLE), | 2154 | global_page_state(NR_SLAB_UNRECLAIMABLE), |
2155 | global_page_state(NR_FILE_MAPPED), | 2155 | global_page_state(NR_FILE_MAPPED), |
2156 | global_page_state(NR_SHMEM), | ||
2156 | global_page_state(NR_PAGETABLE), | 2157 | global_page_state(NR_PAGETABLE), |
2157 | global_page_state(NR_BOUNCE)); | 2158 | global_page_state(NR_BOUNCE)); |
2158 | 2159 | ||
@@ -2175,6 +2176,7 @@ void show_free_areas(void) | |||
2175 | " dirty:%lukB" | 2176 | " dirty:%lukB" |
2176 | " writeback:%lukB" | 2177 | " writeback:%lukB" |
2177 | " mapped:%lukB" | 2178 | " mapped:%lukB" |
2179 | " shmem:%lukB" | ||
2178 | " slab_reclaimable:%lukB" | 2180 | " slab_reclaimable:%lukB" |
2179 | " slab_unreclaimable:%lukB" | 2181 | " slab_unreclaimable:%lukB" |
2180 | " kernel_stack:%lukB" | 2182 | " kernel_stack:%lukB" |
@@ -2200,6 +2202,7 @@ void show_free_areas(void) | |||
2200 | K(zone_page_state(zone, NR_FILE_DIRTY)), | 2202 | K(zone_page_state(zone, NR_FILE_DIRTY)), |
2201 | K(zone_page_state(zone, NR_WRITEBACK)), | 2203 | K(zone_page_state(zone, NR_WRITEBACK)), |
2202 | K(zone_page_state(zone, NR_FILE_MAPPED)), | 2204 | K(zone_page_state(zone, NR_FILE_MAPPED)), |
2205 | K(zone_page_state(zone, NR_SHMEM)), | ||
2203 | K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), | 2206 | K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), |
2204 | K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), | 2207 | K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), |
2205 | zone_page_state(zone, NR_KERNEL_STACK) * | 2208 | zone_page_state(zone, NR_KERNEL_STACK) * |
diff --git a/mm/vmstat.c b/mm/vmstat.c index ceda39b63d7e..7214a4511257 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -644,7 +644,7 @@ static const char * const vmstat_text[] = { | |||
644 | "nr_bounce", | 644 | "nr_bounce", |
645 | "nr_vmscan_write", | 645 | "nr_vmscan_write", |
646 | "nr_writeback_temp", | 646 | "nr_writeback_temp", |
647 | 647 | "nr_shmem", | |
648 | #ifdef CONFIG_NUMA | 648 | #ifdef CONFIG_NUMA |
649 | "numa_hit", | 649 | "numa_hit", |
650 | "numa_miss", | 650 | "numa_miss", |