diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/swap_state.c | 4 | ||||
-rw-r--r-- | mm/vmstat.c | 7 |
6 files changed, 7 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 648f2c0c8e18..87d62c44c3f0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -120,7 +120,7 @@ void __remove_from_page_cache(struct page *page) | |||
120 | radix_tree_delete(&mapping->page_tree, page->index); | 120 | radix_tree_delete(&mapping->page_tree, page->index); |
121 | page->mapping = NULL; | 121 | page->mapping = NULL; |
122 | mapping->nrpages--; | 122 | mapping->nrpages--; |
123 | pagecache_acct(-1); | 123 | __dec_zone_page_state(page, NR_FILE_PAGES); |
124 | } | 124 | } |
125 | 125 | ||
126 | void remove_from_page_cache(struct page *page) | 126 | void remove_from_page_cache(struct page *page) |
@@ -449,7 +449,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, | |||
449 | page->mapping = mapping; | 449 | page->mapping = mapping; |
450 | page->index = offset; | 450 | page->index = offset; |
451 | mapping->nrpages++; | 451 | mapping->nrpages++; |
452 | pagecache_acct(1); | 452 | __inc_zone_page_state(page, NR_FILE_PAGES); |
453 | } | 453 | } |
454 | write_unlock_irq(&mapping->tree_lock); | 454 | write_unlock_irq(&mapping->tree_lock); |
455 | radix_tree_preload_end(); | 455 | radix_tree_preload_end(); |
@@ -96,7 +96,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
96 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 96 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
97 | unsigned long n; | 97 | unsigned long n; |
98 | 98 | ||
99 | free = get_page_cache_size(); | 99 | free = global_page_state(NR_FILE_PAGES); |
100 | free += nr_swap_pages; | 100 | free += nr_swap_pages; |
101 | 101 | ||
102 | /* | 102 | /* |
diff --git a/mm/nommu.c b/mm/nommu.c index 029fadac0fb5..5151c44a8257 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1122,7 +1122,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
1122 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 1122 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
1123 | unsigned long n; | 1123 | unsigned long n; |
1124 | 1124 | ||
1125 | free = get_page_cache_size(); | 1125 | free = global_page_state(NR_FILE_PAGES); |
1126 | free += nr_swap_pages; | 1126 | free += nr_swap_pages; |
1127 | 1127 | ||
1128 | /* | 1128 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 04dd2b01b2b7..8350720f98a8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2124,16 +2124,11 @@ static int page_alloc_cpu_notify(struct notifier_block *self, | |||
2124 | unsigned long action, void *hcpu) | 2124 | unsigned long action, void *hcpu) |
2125 | { | 2125 | { |
2126 | int cpu = (unsigned long)hcpu; | 2126 | int cpu = (unsigned long)hcpu; |
2127 | long *count; | ||
2128 | unsigned long *src, *dest; | 2127 | unsigned long *src, *dest; |
2129 | 2128 | ||
2130 | if (action == CPU_DEAD) { | 2129 | if (action == CPU_DEAD) { |
2131 | int i; | 2130 | int i; |
2132 | 2131 | ||
2133 | /* Drain local pagecache count. */ | ||
2134 | count = &per_cpu(nr_pagecache_local, cpu); | ||
2135 | atomic_add(*count, &nr_pagecache); | ||
2136 | *count = 0; | ||
2137 | local_irq_disable(); | 2132 | local_irq_disable(); |
2138 | __drain_pages(cpu); | 2133 | __drain_pages(cpu); |
2139 | 2134 | ||
diff --git a/mm/swap_state.c b/mm/swap_state.c index 7535211bb495..fccbd9bba77b 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -87,7 +87,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, | |||
87 | SetPageSwapCache(page); | 87 | SetPageSwapCache(page); |
88 | set_page_private(page, entry.val); | 88 | set_page_private(page, entry.val); |
89 | total_swapcache_pages++; | 89 | total_swapcache_pages++; |
90 | pagecache_acct(1); | 90 | __inc_zone_page_state(page, NR_FILE_PAGES); |
91 | } | 91 | } |
92 | write_unlock_irq(&swapper_space.tree_lock); | 92 | write_unlock_irq(&swapper_space.tree_lock); |
93 | radix_tree_preload_end(); | 93 | radix_tree_preload_end(); |
@@ -132,7 +132,7 @@ void __delete_from_swap_cache(struct page *page) | |||
132 | set_page_private(page, 0); | 132 | set_page_private(page, 0); |
133 | ClearPageSwapCache(page); | 133 | ClearPageSwapCache(page); |
134 | total_swapcache_pages--; | 134 | total_swapcache_pages--; |
135 | pagecache_acct(-1); | 135 | __dec_zone_page_state(page, NR_FILE_PAGES); |
136 | INC_CACHE_INFO(del_total); | 136 | INC_CACHE_INFO(del_total); |
137 | } | 137 | } |
138 | 138 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 4800091c129a..f16b33eb6d5c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -20,12 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | DEFINE_PER_CPU(struct page_state, page_states) = {0}; | 21 | DEFINE_PER_CPU(struct page_state, page_states) = {0}; |
22 | 22 | ||
23 | atomic_t nr_pagecache = ATOMIC_INIT(0); | ||
24 | EXPORT_SYMBOL(nr_pagecache); | ||
25 | #ifdef CONFIG_SMP | ||
26 | DEFINE_PER_CPU(long, nr_pagecache_local) = 0; | ||
27 | #endif | ||
28 | |||
29 | static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) | 23 | static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) |
30 | { | 24 | { |
31 | unsigned cpu; | 25 | unsigned cpu; |
@@ -402,6 +396,7 @@ struct seq_operations fragmentation_op = { | |||
402 | static char *vmstat_text[] = { | 396 | static char *vmstat_text[] = { |
403 | /* Zoned VM counters */ | 397 | /* Zoned VM counters */ |
404 | "nr_mapped", | 398 | "nr_mapped", |
399 | "nr_file_pages", | ||
405 | 400 | ||
406 | /* Page state */ | 401 | /* Page state */ |
407 | "nr_dirty", | 402 | "nr_dirty", |