aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-09-11 17:21:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:31 -0400
commitfbc2edb05354480a88aa39db8a6acb5782fa1a1b (patch)
treeb3d10c8b1310e264a9d9d4b9a8ea3416b4d12c0d
parent4edb0748b23887140578d68f5f4e6e2de337a481 (diff)
vmstat: use this_cpu() to avoid irqon/off sequence in refresh_cpu_vm_stats
Disabling interrupts repeatedly can be avoided in the inner loop if we use a this_cpu operation. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> CC: Tejun Heo <tj@kernel.org> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmstat.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 158ca6494bc6..d57a09143bf9 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -437,33 +437,29 @@ static inline void fold_diff(int *diff)
437 * with the global counters. These could cause remote node cache line 437 * with the global counters. These could cause remote node cache line
438 * bouncing and will have to be only done when necessary. 438 * bouncing and will have to be only done when necessary.
439 */ 439 */
440static void refresh_cpu_vm_stats(int cpu) 440static void refresh_cpu_vm_stats(void)
441{ 441{
442 struct zone *zone; 442 struct zone *zone;
443 int i; 443 int i;
444 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 444 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
445 445
446 for_each_populated_zone(zone) { 446 for_each_populated_zone(zone) {
447 struct per_cpu_pageset *p; 447 struct per_cpu_pageset __percpu *p = zone->pageset;
448 448
449 p = per_cpu_ptr(zone->pageset, cpu); 449 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
450 int v;
450 451
451 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 452 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
452 if (p->vm_stat_diff[i]) { 453 if (v) {
453 unsigned long flags;
454 int v;
455 454
456 local_irq_save(flags);
457 v = p->vm_stat_diff[i];
458 p->vm_stat_diff[i] = 0;
459 local_irq_restore(flags);
460 atomic_long_add(v, &zone->vm_stat[i]); 455 atomic_long_add(v, &zone->vm_stat[i]);
461 global_diff[i] += v; 456 global_diff[i] += v;
462#ifdef CONFIG_NUMA 457#ifdef CONFIG_NUMA
463 /* 3 seconds idle till flush */ 458 /* 3 seconds idle till flush */
464 p->expire = 3; 459 __this_cpu_write(p->expire, 3);
465#endif 460#endif
466 } 461 }
462 }
467 cond_resched(); 463 cond_resched();
468#ifdef CONFIG_NUMA 464#ifdef CONFIG_NUMA
469 /* 465 /*
@@ -473,23 +469,24 @@ static void refresh_cpu_vm_stats(int cpu)
473 * Check if there are pages remaining in this pageset 469 * Check if there are pages remaining in this pageset
474 * if not then there is nothing to expire. 470 * if not then there is nothing to expire.
475 */ 471 */
476 if (!p->expire || !p->pcp.count) 472 if (!__this_cpu_read(p->expire) ||
473 !__this_cpu_read(p->pcp.count))
477 continue; 474 continue;
478 475
479 /* 476 /*
480 * We never drain zones local to this processor. 477 * We never drain zones local to this processor.
481 */ 478 */
482 if (zone_to_nid(zone) == numa_node_id()) { 479 if (zone_to_nid(zone) == numa_node_id()) {
483 p->expire = 0; 480 __this_cpu_write(p->expire, 0);
484 continue; 481 continue;
485 } 482 }
486 483
487 p->expire--; 484
488 if (p->expire) 485 if (__this_cpu_dec_return(p->expire))
489 continue; 486 continue;
490 487
491 if (p->pcp.count) 488 if (__this_cpu_read(p->pcp.count))
492 drain_zone_pages(zone, &p->pcp); 489 drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
493#endif 490#endif
494 } 491 }
495 fold_diff(global_diff); 492 fold_diff(global_diff);
@@ -1216,7 +1213,7 @@ int sysctl_stat_interval __read_mostly = HZ;
1216 1213
1217static void vmstat_update(struct work_struct *w) 1214static void vmstat_update(struct work_struct *w)
1218{ 1215{
1219 refresh_cpu_vm_stats(smp_processor_id()); 1216 refresh_cpu_vm_stats();
1220 schedule_delayed_work(&__get_cpu_var(vmstat_work), 1217 schedule_delayed_work(&__get_cpu_var(vmstat_work),
1221 round_jiffies_relative(sysctl_stat_interval)); 1218 round_jiffies_relative(sysctl_stat_interval));
1222} 1219}