aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/vmstat.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-09-11 17:21:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:31 -0400
commit2bb921e526656556e68f99f5f15a4a1bf2691844 (patch)
tree91b009a59938d7713de0781df9d5c0c2eacfc51f /include/linux/vmstat.h
parentd2cf5ad6312ca9913464fac40fb47ba47ad945c4 (diff)
vmstat: create separate function to fold per cpu diffs into local counters
The main idea behind this patchset is to reduce the vmstat update overhead by avoiding interrupt enable/disable and the use of per cpu atomics. This patch (of 3): It is better to have a separate folding function because refresh_cpu_vm_stats() also does other things like expire pages in the page allocator caches. If we have a separate function then refresh_cpu_vm_stats() is only called from the local cpu which allows additional optimizations. The folding function is only called when a cpu is being downed and therefore no other processor will be accessing the counters. Also simplifies synchronization. [akpm@linux-foundation.org: fix UP build] Signed-off-by: Christoph Lameter <cl@linux.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> CC: Tejun Heo <tj@kernel.org> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/vmstat.h')
-rw-r--r--include/linux/vmstat.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index c586679b6fef..502767f4e4d4 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -198,7 +198,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item);
198extern void dec_zone_state(struct zone *, enum zone_stat_item); 198extern void dec_zone_state(struct zone *, enum zone_stat_item);
199extern void __dec_zone_state(struct zone *, enum zone_stat_item); 199extern void __dec_zone_state(struct zone *, enum zone_stat_item);
200 200
201void refresh_cpu_vm_stats(int); 201void cpu_vm_stats_fold(int cpu);
202void refresh_zone_stat_thresholds(void); 202void refresh_zone_stat_thresholds(void);
203 203
204void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); 204void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
@@ -255,6 +255,7 @@ static inline void __dec_zone_page_state(struct page *page,
255 255
256static inline void refresh_cpu_vm_stats(int cpu) { } 256static inline void refresh_cpu_vm_stats(int cpu) { }
257static inline void refresh_zone_stat_thresholds(void) { } 257static inline void refresh_zone_stat_thresholds(void) { }
258static inline void cpu_vm_stats_fold(int cpu) { }
258 259
259static inline void drain_zonestat(struct zone *zone, 260static inline void drain_zonestat(struct zone *zone,
260 struct per_cpu_pageset *pset) { } 261 struct per_cpu_pageset *pset) { }