aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/vmstat.h3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmstat.c40
3 files changed, 37 insertions, 8 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index c586679b6fef..502767f4e4d4 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -198,7 +198,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item);
198extern void dec_zone_state(struct zone *, enum zone_stat_item); 198extern void dec_zone_state(struct zone *, enum zone_stat_item);
199extern void __dec_zone_state(struct zone *, enum zone_stat_item); 199extern void __dec_zone_state(struct zone *, enum zone_stat_item);
200 200
201void refresh_cpu_vm_stats(int); 201void cpu_vm_stats_fold(int cpu);
202void refresh_zone_stat_thresholds(void); 202void refresh_zone_stat_thresholds(void);
203 203
204void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); 204void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
@@ -255,6 +255,7 @@ static inline void __dec_zone_page_state(struct page *page,
255 255
256static inline void refresh_cpu_vm_stats(int cpu) { } 256static inline void refresh_cpu_vm_stats(int cpu) { }
257static inline void refresh_zone_stat_thresholds(void) { } 257static inline void refresh_zone_stat_thresholds(void) { }
258static inline void cpu_vm_stats_fold(int cpu) { }
258 259
259static inline void drain_zonestat(struct zone *zone, 260static inline void drain_zonestat(struct zone *zone,
260 struct per_cpu_pageset *pset) { } 261 struct per_cpu_pageset *pset) { }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 42c59300bacd..f885eb827159 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5435,7 +5435,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
5435 * This is only okay since the processor is dead and cannot 5435 * This is only okay since the processor is dead and cannot
5436 * race with what we are doing. 5436 * race with what we are doing.
5437 */ 5437 */
5438 refresh_cpu_vm_stats(cpu); 5438 cpu_vm_stats_fold(cpu);
5439 } 5439 }
5440 return NOTIFY_OK; 5440 return NOTIFY_OK;
5441} 5441}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8a8da1f9b044..aaee66330e01 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -415,11 +415,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
415#endif 415#endif
416 416
417/* 417/*
418 * Update the zone counters for one cpu. 418 * Update the zone counters for the current cpu.
419 *
420 * The cpu specified must be either the current cpu or a processor that
421 * is not online. If it is the current cpu then the execution thread must
422 * be pinned to the current cpu.
423 * 419 *
424 * Note that refresh_cpu_vm_stats strives to only access 420 * Note that refresh_cpu_vm_stats strives to only access
425 * node local memory. The per cpu pagesets on remote zones are placed 421 * node local memory. The per cpu pagesets on remote zones are placed
@@ -432,7 +428,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
432 * with the global counters. These could cause remote node cache line 428 * with the global counters. These could cause remote node cache line
433 * bouncing and will have to be only done when necessary. 429 * bouncing and will have to be only done when necessary.
434 */ 430 */
435void refresh_cpu_vm_stats(int cpu) 431static void refresh_cpu_vm_stats(int cpu)
436{ 432{
437 struct zone *zone; 433 struct zone *zone;
438 int i; 434 int i;
@@ -494,6 +490,38 @@ void refresh_cpu_vm_stats(int cpu)
494} 490}
495 491
496/* 492/*
493 * Fold the data for an offline cpu into the global array.
494 * There cannot be any access by the offline cpu and therefore
495 * synchronization is simplified.
496 */
497void cpu_vm_stats_fold(int cpu)
498{
499 struct zone *zone;
500 int i;
501 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
502
503 for_each_populated_zone(zone) {
504 struct per_cpu_pageset *p;
505
506 p = per_cpu_ptr(zone->pageset, cpu);
507
508 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
509 if (p->vm_stat_diff[i]) {
510 int v;
511
512 v = p->vm_stat_diff[i];
513 p->vm_stat_diff[i] = 0;
514 atomic_long_add(v, &zone->vm_stat[i]);
515 global_diff[i] += v;
516 }
517 }
518
519 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
520 if (global_diff[i])
521 atomic_long_add(global_diff[i], &vm_stat[i]);
522}
523
524/*
497 * this is only called if !populated_zone(zone), which implies no other users of 525 * this is only called if !populated_zone(zone), which implies no other users of
498 * pset->vm_stat_diff[] exsist. 526 * pset->vm_stat_diff[] exsist.
499 */ 527 */