aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmstat.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c61
1 files changed, 30 insertions, 31 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e8d846f57774..422d960ffcd8 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -21,21 +21,14 @@ EXPORT_PER_CPU_SYMBOL(vm_event_states);
21 21
22static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 22static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
23{ 23{
24 int cpu = 0; 24 int cpu;
25 int i; 25 int i;
26 26
27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28 28
29 cpu = first_cpu(*cpumask); 29 for_each_cpu_mask(cpu, *cpumask) {
30 while (cpu < NR_CPUS) {
31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 30 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
32 31
33 cpu = next_cpu(cpu, *cpumask);
34
35 if (cpu < NR_CPUS)
36 prefetch(&per_cpu(vm_event_states, cpu));
37
38
39 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 32 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
40 ret[i] += this->event[i]; 33 ret[i] += this->event[i];
41 } 34 }
@@ -284,6 +277,10 @@ EXPORT_SYMBOL(dec_zone_page_state);
284/* 277/*
285 * Update the zone counters for one cpu. 278 * Update the zone counters for one cpu.
286 * 279 *
280 * The cpu specified must be either the current cpu or a processor that
281 * is not online. If it is the current cpu then the execution thread must
282 * be pinned to the current cpu.
283 *
287 * Note that refresh_cpu_vm_stats strives to only access 284 * Note that refresh_cpu_vm_stats strives to only access
288 * node local memory. The per cpu pagesets on remote zones are placed 285 * node local memory. The per cpu pagesets on remote zones are placed
289 * in the memory local to the processor using that pageset. So the 286 * in the memory local to the processor using that pageset. So the
@@ -299,7 +296,7 @@ void refresh_cpu_vm_stats(int cpu)
299{ 296{
300 struct zone *zone; 297 struct zone *zone;
301 int i; 298 int i;
302 unsigned long flags; 299 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
303 300
304 for_each_zone(zone) { 301 for_each_zone(zone) {
305 struct per_cpu_pageset *p; 302 struct per_cpu_pageset *p;
@@ -311,15 +308,19 @@ void refresh_cpu_vm_stats(int cpu)
311 308
312 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 309 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
313 if (p->vm_stat_diff[i]) { 310 if (p->vm_stat_diff[i]) {
311 unsigned long flags;
312 int v;
313
314 local_irq_save(flags); 314 local_irq_save(flags);
315 zone_page_state_add(p->vm_stat_diff[i], 315 v = p->vm_stat_diff[i];
316 zone, i);
317 p->vm_stat_diff[i] = 0; 316 p->vm_stat_diff[i] = 0;
317 local_irq_restore(flags);
318 atomic_long_add(v, &zone->vm_stat[i]);
319 global_diff[i] += v;
318#ifdef CONFIG_NUMA 320#ifdef CONFIG_NUMA
319 /* 3 seconds idle till flush */ 321 /* 3 seconds idle till flush */
320 p->expire = 3; 322 p->expire = 3;
321#endif 323#endif
322 local_irq_restore(flags);
323 } 324 }
324#ifdef CONFIG_NUMA 325#ifdef CONFIG_NUMA
325 /* 326 /*
@@ -329,7 +330,7 @@ void refresh_cpu_vm_stats(int cpu)
329 * Check if there are pages remaining in this pageset 330 * Check if there are pages remaining in this pageset
330 * if not then there is nothing to expire. 331 * if not then there is nothing to expire.
331 */ 332 */
332 if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) 333 if (!p->expire || !p->pcp.count)
333 continue; 334 continue;
334 335
335 /* 336 /*
@@ -344,13 +345,14 @@ void refresh_cpu_vm_stats(int cpu)
344 if (p->expire) 345 if (p->expire)
345 continue; 346 continue;
346 347
347 if (p->pcp[0].count) 348 if (p->pcp.count)
348 drain_zone_pages(zone, p->pcp + 0); 349 drain_zone_pages(zone, &p->pcp);
349
350 if (p->pcp[1].count)
351 drain_zone_pages(zone, p->pcp + 1);
352#endif 350#endif
353 } 351 }
352
353 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
354 if (global_diff[i])
355 atomic_long_add(global_diff[i], &vm_stat[i]);
354} 356}
355 357
356#endif 358#endif
@@ -681,20 +683,17 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
681 "\n pagesets"); 683 "\n pagesets");
682 for_each_online_cpu(i) { 684 for_each_online_cpu(i) {
683 struct per_cpu_pageset *pageset; 685 struct per_cpu_pageset *pageset;
684 int j;
685 686
686 pageset = zone_pcp(zone, i); 687 pageset = zone_pcp(zone, i);
687 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 688 seq_printf(m,
688 seq_printf(m, 689 "\n cpu: %i"
689 "\n cpu: %i pcp: %i" 690 "\n count: %i"
690 "\n count: %i" 691 "\n high: %i"
691 "\n high: %i" 692 "\n batch: %i",
692 "\n batch: %i", 693 i,
693 i, j, 694 pageset->pcp.count,
694 pageset->pcp[j].count, 695 pageset->pcp.high,
695 pageset->pcp[j].high, 696 pageset->pcp.batch);
696 pageset->pcp[j].batch);
697 }
698#ifdef CONFIG_SMP 697#ifdef CONFIG_SMP
699 seq_printf(m, "\n vm stats threshold: %d", 698 seq_printf(m, "\n vm stats threshold: %d",
700 pageset->stat_threshold); 699 pageset->stat_threshold);