diff options
Diffstat (limited to 'mm/vmstat.c')
| -rw-r--r-- | mm/vmstat.c | 18 |
1 files changed, 6 insertions, 12 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c index 91149746bb8d..66f6130976cb 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) | |||
| 27 | 27 | ||
| 28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
| 29 | 29 | ||
| 30 | for_each_cpu_mask_nr(cpu, *cpumask) { | 30 | for_each_cpu(cpu, cpumask) { |
| 31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
| 32 | 32 | ||
| 33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
| @@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void) | |||
| 135 | int cpu; | 135 | int cpu; |
| 136 | int threshold; | 136 | int threshold; |
| 137 | 137 | ||
| 138 | for_each_zone(zone) { | 138 | for_each_populated_zone(zone) { |
| 139 | |||
| 140 | if (!zone->present_pages) | ||
| 141 | continue; | ||
| 142 | |||
| 143 | threshold = calculate_threshold(zone); | 139 | threshold = calculate_threshold(zone); |
| 144 | 140 | ||
| 145 | for_each_online_cpu(cpu) | 141 | for_each_online_cpu(cpu) |
| @@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu) | |||
| 301 | int i; | 297 | int i; |
| 302 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; | 298 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; |
| 303 | 299 | ||
| 304 | for_each_zone(zone) { | 300 | for_each_populated_zone(zone) { |
| 305 | struct per_cpu_pageset *p; | 301 | struct per_cpu_pageset *p; |
| 306 | 302 | ||
| 307 | if (!populated_zone(zone)) | ||
| 308 | continue; | ||
| 309 | |||
| 310 | p = zone_pcp(zone, cpu); | 303 | p = zone_pcp(zone, cpu); |
| 311 | 304 | ||
| 312 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 305 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| @@ -898,7 +891,7 @@ static void vmstat_update(struct work_struct *w) | |||
| 898 | { | 891 | { |
| 899 | refresh_cpu_vm_stats(smp_processor_id()); | 892 | refresh_cpu_vm_stats(smp_processor_id()); |
| 900 | schedule_delayed_work(&__get_cpu_var(vmstat_work), | 893 | schedule_delayed_work(&__get_cpu_var(vmstat_work), |
| 901 | sysctl_stat_interval); | 894 | round_jiffies_relative(sysctl_stat_interval)); |
| 902 | } | 895 | } |
| 903 | 896 | ||
| 904 | static void __cpuinit start_cpu_timer(int cpu) | 897 | static void __cpuinit start_cpu_timer(int cpu) |
| @@ -906,7 +899,8 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
| 906 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); | 899 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); |
| 907 | 900 | ||
| 908 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); | 901 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); |
| 909 | schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); | 902 | schedule_delayed_work_on(cpu, vmstat_work, |
| 903 | __round_jiffies_relative(HZ, cpu)); | ||
| 910 | } | 904 | } |
| 911 | 905 | ||
| 912 | /* | 906 | /* |
