diff options
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r-- | mm/vmstat.c | 70 |
1 files changed, 47 insertions, 23 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c index 40b2c74ddf16..084c6725b373 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1396,10 +1396,15 @@ static void vmstat_update(struct work_struct *w) | |||
1396 | * Counters were updated so we expect more updates | 1396 | * Counters were updated so we expect more updates |
1397 | * to occur in the future. Keep on running the | 1397 | * to occur in the future. Keep on running the |
1398 | * update worker thread. | 1398 | * update worker thread. |
1399 | * If we were marked on cpu_stat_off clear the flag | ||
1400 | * so that vmstat_shepherd doesn't schedule us again. | ||
1399 | */ | 1401 | */ |
1400 | queue_delayed_work_on(smp_processor_id(), vmstat_wq, | 1402 | if (!cpumask_test_and_clear_cpu(smp_processor_id(), |
1401 | this_cpu_ptr(&vmstat_work), | 1403 | cpu_stat_off)) { |
1402 | round_jiffies_relative(sysctl_stat_interval)); | 1404 | queue_delayed_work_on(smp_processor_id(), vmstat_wq, |
1405 | this_cpu_ptr(&vmstat_work), | ||
1406 | round_jiffies_relative(sysctl_stat_interval)); | ||
1407 | } | ||
1403 | } else { | 1408 | } else { |
1404 | /* | 1409 | /* |
1405 | * We did not update any counters so the app may be in | 1410 | * We did not update any counters so the app may be in |
@@ -1417,18 +1422,6 @@ static void vmstat_update(struct work_struct *w) | |||
1417 | * until the diffs stay at zero. The function is used by NOHZ and can only be | 1422 | * until the diffs stay at zero. The function is used by NOHZ and can only be |
1418 | * invoked when tick processing is not active. | 1423 | * invoked when tick processing is not active. |
1419 | */ | 1424 | */ |
1420 | void quiet_vmstat(void) | ||
1421 | { | ||
1422 | if (system_state != SYSTEM_RUNNING) | ||
1423 | return; | ||
1424 | |||
1425 | do { | ||
1426 | if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off)) | ||
1427 | cancel_delayed_work(this_cpu_ptr(&vmstat_work)); | ||
1428 | |||
1429 | } while (refresh_cpu_vm_stats(false)); | ||
1430 | } | ||
1431 | |||
1432 | /* | 1425 | /* |
1433 | * Check if the diffs for a certain cpu indicate that | 1426 | * Check if the diffs for a certain cpu indicate that |
1434 | * an update is needed. | 1427 | * an update is needed. |
@@ -1452,6 +1445,30 @@ static bool need_update(int cpu) | |||
1452 | return false; | 1445 | return false; |
1453 | } | 1446 | } |
1454 | 1447 | ||
1448 | void quiet_vmstat(void) | ||
1449 | { | ||
1450 | if (system_state != SYSTEM_RUNNING) | ||
1451 | return; | ||
1452 | |||
1453 | /* | ||
1454 | * If we are already in hands of the shepherd then there | ||
1455 | * is nothing for us to do here. | ||
1456 | */ | ||
1457 | if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off)) | ||
1458 | return; | ||
1459 | |||
1460 | if (!need_update(smp_processor_id())) | ||
1461 | return; | ||
1462 | |||
1463 | /* | ||
1464 | * Just refresh counters and do not care about the pending delayed | ||
1465 | * vmstat_update. It doesn't fire that often to matter and canceling | ||
1466 | * it would be too expensive from this path. | ||
1467 | * vmstat_shepherd will take care about that for us. | ||
1468 | */ | ||
1469 | refresh_cpu_vm_stats(false); | ||
1470 | } | ||
1471 | |||
1455 | 1472 | ||
1456 | /* | 1473 | /* |
1457 | * Shepherd worker thread that checks the | 1474 | * Shepherd worker thread that checks the |
@@ -1469,18 +1486,25 @@ static void vmstat_shepherd(struct work_struct *w) | |||
1469 | 1486 | ||
1470 | get_online_cpus(); | 1487 | get_online_cpus(); |
1471 | /* Check processors whose vmstat worker threads have been disabled */ | 1488 | /* Check processors whose vmstat worker threads have been disabled */ |
1472 | for_each_cpu(cpu, cpu_stat_off) | 1489 | for_each_cpu(cpu, cpu_stat_off) { |
1473 | if (need_update(cpu) && | 1490 | struct delayed_work *dw = &per_cpu(vmstat_work, cpu); |
1474 | cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) | ||
1475 | |||
1476 | queue_delayed_work_on(cpu, vmstat_wq, | ||
1477 | &per_cpu(vmstat_work, cpu), 0); | ||
1478 | 1491 | ||
1492 | if (need_update(cpu)) { | ||
1493 | if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) | ||
1494 | queue_delayed_work_on(cpu, vmstat_wq, dw, 0); | ||
1495 | } else { | ||
1496 | /* | ||
1497 | * Cancel the work if quiet_vmstat has put this | ||
1498 | * cpu on cpu_stat_off because the work item might | ||
1499 | * be still scheduled | ||
1500 | */ | ||
1501 | cancel_delayed_work(dw); | ||
1502 | } | ||
1503 | } | ||
1479 | put_online_cpus(); | 1504 | put_online_cpus(); |
1480 | 1505 | ||
1481 | schedule_delayed_work(&shepherd, | 1506 | schedule_delayed_work(&shepherd, |
1482 | round_jiffies_relative(sysctl_stat_interval)); | 1507 | round_jiffies_relative(sysctl_stat_interval)); |
1483 | |||
1484 | } | 1508 | } |
1485 | 1509 | ||
1486 | static void __init start_shepherd_timer(void) | 1510 | static void __init start_shepherd_timer(void) |
@@ -1488,7 +1512,7 @@ static void __init start_shepherd_timer(void) | |||
1488 | int cpu; | 1512 | int cpu; |
1489 | 1513 | ||
1490 | for_each_possible_cpu(cpu) | 1514 | for_each_possible_cpu(cpu) |
1491 | INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu), | 1515 | INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), |
1492 | vmstat_update); | 1516 | vmstat_update); |
1493 | 1517 | ||
1494 | if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL)) | 1518 | if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL)) |