aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmstat.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2016-01-14 18:21:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commit0eb77e9880321915322d42913c3b53241739c8aa (patch)
tree5beff1411a4178b6157b78fcd6ef7302050f2641 /mm/vmstat.c
parent686739f6af5e8d5687ffebbf1193ff066aada6d9 (diff)
vmstat: make vmstat_updater deferrable again and shut down on idle
Currently the vmstat updater is not deferrable as a result of commit ba4877b9ca51 ("vmstat: do not use deferrable delayed work for vmstat_update"). This in turn can cause multiple interruptions of the applications because the vmstat updater may run at Make vmstate_update deferrable again and provide a function that folds the differentials when the processor is going to idle mode thus addressing the issue of the above commit in a clean way. Note that the shepherd thread will continue scanning the differentials from another processor and will reenable the vmstat workers if it detects any changes. Fixes: ba4877b9ca51 ("vmstat: do not use deferrable delayed work for vmstat_update") Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c69
1 files changed, 44 insertions, 25 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c54fd2924f25..83a003bc3cae 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -460,7 +460,7 @@ static int fold_diff(int *diff)
460 * 460 *
461 * The function returns the number of global counters updated. 461 * The function returns the number of global counters updated.
462 */ 462 */
463static int refresh_cpu_vm_stats(void) 463static int refresh_cpu_vm_stats(bool do_pagesets)
464{ 464{
465 struct zone *zone; 465 struct zone *zone;
466 int i; 466 int i;
@@ -484,33 +484,35 @@ static int refresh_cpu_vm_stats(void)
484#endif 484#endif
485 } 485 }
486 } 486 }
487 cond_resched();
488#ifdef CONFIG_NUMA 487#ifdef CONFIG_NUMA
489 /* 488 if (do_pagesets) {
490 * Deal with draining the remote pageset of this 489 cond_resched();
491 * processor 490 /*
492 * 491 * Deal with draining the remote pageset of this
493 * Check if there are pages remaining in this pageset 492 * processor
494 * if not then there is nothing to expire. 493 *
495 */ 494 * Check if there are pages remaining in this pageset
496 if (!__this_cpu_read(p->expire) || 495 * if not then there is nothing to expire.
496 */
497 if (!__this_cpu_read(p->expire) ||
497 !__this_cpu_read(p->pcp.count)) 498 !__this_cpu_read(p->pcp.count))
498 continue; 499 continue;
499 500
500 /* 501 /*
501 * We never drain zones local to this processor. 502 * We never drain zones local to this processor.
502 */ 503 */
503 if (zone_to_nid(zone) == numa_node_id()) { 504 if (zone_to_nid(zone) == numa_node_id()) {
504 __this_cpu_write(p->expire, 0); 505 __this_cpu_write(p->expire, 0);
505 continue; 506 continue;
506 } 507 }
507 508
508 if (__this_cpu_dec_return(p->expire)) 509 if (__this_cpu_dec_return(p->expire))
509 continue; 510 continue;
510 511
511 if (__this_cpu_read(p->pcp.count)) { 512 if (__this_cpu_read(p->pcp.count)) {
512 drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); 513 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
513 changes++; 514 changes++;
515 }
514 } 516 }
515#endif 517#endif
516 } 518 }
@@ -1386,7 +1388,7 @@ static cpumask_var_t cpu_stat_off;
1386 1388
1387static void vmstat_update(struct work_struct *w) 1389static void vmstat_update(struct work_struct *w)
1388{ 1390{
1389 if (refresh_cpu_vm_stats()) { 1391 if (refresh_cpu_vm_stats(true)) {
1390 /* 1392 /*
1391 * Counters were updated so we expect more updates 1393 * Counters were updated so we expect more updates
1392 * to occur in the future. Keep on running the 1394 * to occur in the future. Keep on running the
@@ -1418,6 +1420,23 @@ static void vmstat_update(struct work_struct *w)
1418} 1420}
1419 1421
1420/* 1422/*
1423 * Switch off vmstat processing and then fold all the remaining differentials
1424 * until the diffs stay at zero. The function is used by NOHZ and can only be
1425 * invoked when tick processing is not active.
1426 */
1427void quiet_vmstat(void)
1428{
1429 if (system_state != SYSTEM_RUNNING)
1430 return;
1431
1432 do {
1433 if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
1434 cancel_delayed_work(this_cpu_ptr(&vmstat_work));
1435
1436 } while (refresh_cpu_vm_stats(false));
1437}
1438
1439/*
1421 * Check if the diffs for a certain cpu indicate that 1440 * Check if the diffs for a certain cpu indicate that
1422 * an update is needed. 1441 * an update is needed.
1423 */ 1442 */
@@ -1449,7 +1468,7 @@ static bool need_update(int cpu)
1449 */ 1468 */
1450static void vmstat_shepherd(struct work_struct *w); 1469static void vmstat_shepherd(struct work_struct *w);
1451 1470
1452static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd); 1471static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1453 1472
1454static void vmstat_shepherd(struct work_struct *w) 1473static void vmstat_shepherd(struct work_struct *w)
1455{ 1474{