aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmstat.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c81321f9feec..fa12ea3051fb 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/cpu.h> 16#include <linux/cpu.h>
16#include <linux/vmstat.h> 17#include <linux/vmstat.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
@@ -139,7 +140,8 @@ static void refresh_zone_stat_thresholds(void)
139 threshold = calculate_threshold(zone); 140 threshold = calculate_threshold(zone);
140 141
141 for_each_online_cpu(cpu) 142 for_each_online_cpu(cpu)
142 zone_pcp(zone, cpu)->stat_threshold = threshold; 143 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
144 = threshold;
143 } 145 }
144} 146}
145 147
@@ -149,7 +151,8 @@ static void refresh_zone_stat_thresholds(void)
149void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 151void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
150 int delta) 152 int delta)
151{ 153{
152 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 154 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
155
153 s8 *p = pcp->vm_stat_diff + item; 156 s8 *p = pcp->vm_stat_diff + item;
154 long x; 157 long x;
155 158
@@ -202,7 +205,7 @@ EXPORT_SYMBOL(mod_zone_page_state);
202 */ 205 */
203void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 206void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
204{ 207{
205 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 208 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
206 s8 *p = pcp->vm_stat_diff + item; 209 s8 *p = pcp->vm_stat_diff + item;
207 210
208 (*p)++; 211 (*p)++;
@@ -223,7 +226,7 @@ EXPORT_SYMBOL(__inc_zone_page_state);
223 226
224void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 227void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
225{ 228{
226 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 229 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
227 s8 *p = pcp->vm_stat_diff + item; 230 s8 *p = pcp->vm_stat_diff + item;
228 231
229 (*p)--; 232 (*p)--;
@@ -300,7 +303,7 @@ void refresh_cpu_vm_stats(int cpu)
300 for_each_populated_zone(zone) { 303 for_each_populated_zone(zone) {
301 struct per_cpu_pageset *p; 304 struct per_cpu_pageset *p;
302 305
303 p = zone_pcp(zone, cpu); 306 p = per_cpu_ptr(zone->pageset, cpu);
304 307
305 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 308 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
306 if (p->vm_stat_diff[i]) { 309 if (p->vm_stat_diff[i]) {
@@ -683,6 +686,9 @@ static const char * const vmstat_text[] = {
683 "slabs_scanned", 686 "slabs_scanned",
684 "kswapd_steal", 687 "kswapd_steal",
685 "kswapd_inodesteal", 688 "kswapd_inodesteal",
689 "kswapd_low_wmark_hit_quickly",
690 "kswapd_high_wmark_hit_quickly",
691 "kswapd_skip_congestion_wait",
686 "pageoutrun", 692 "pageoutrun",
687 "allocstall", 693 "allocstall",
688 694
@@ -738,7 +744,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
738 for_each_online_cpu(i) { 744 for_each_online_cpu(i) {
739 struct per_cpu_pageset *pageset; 745 struct per_cpu_pageset *pageset;
740 746
741 pageset = zone_pcp(zone, i); 747 pageset = per_cpu_ptr(zone->pageset, i);
742 seq_printf(m, 748 seq_printf(m,
743 "\n cpu: %i" 749 "\n cpu: %i"
744 "\n count: %i" 750 "\n count: %i"
@@ -758,7 +764,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
758 "\n prev_priority: %i" 764 "\n prev_priority: %i"
759 "\n start_pfn: %lu" 765 "\n start_pfn: %lu"
760 "\n inactive_ratio: %u", 766 "\n inactive_ratio: %u",
761 zone_is_all_unreclaimable(zone), 767 zone->all_unreclaimable,
762 zone->prev_priority, 768 zone->prev_priority,
763 zone->zone_start_pfn, 769 zone->zone_start_pfn,
764 zone->inactive_ratio); 770 zone->inactive_ratio);
@@ -883,11 +889,10 @@ static void vmstat_update(struct work_struct *w)
883 889
884static void __cpuinit start_cpu_timer(int cpu) 890static void __cpuinit start_cpu_timer(int cpu)
885{ 891{
886 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); 892 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
887 893
888 INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); 894 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
889 schedule_delayed_work_on(cpu, vmstat_work, 895 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
890 __round_jiffies_relative(HZ, cpu));
891} 896}
892 897
893/* 898/*
@@ -904,6 +909,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
904 case CPU_ONLINE: 909 case CPU_ONLINE:
905 case CPU_ONLINE_FROZEN: 910 case CPU_ONLINE_FROZEN:
906 start_cpu_timer(cpu); 911 start_cpu_timer(cpu);
912 node_set_state(cpu_to_node(cpu), N_CPU);
907 break; 913 break;
908 case CPU_DOWN_PREPARE: 914 case CPU_DOWN_PREPARE:
909 case CPU_DOWN_PREPARE_FROZEN: 915 case CPU_DOWN_PREPARE_FROZEN: