aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-02-14 10:26:46 -0500
committerIngo Molnar <mingo@kernel.org>2018-03-09 01:59:28 -0500
commit31e77c93e432dec79c7d90b888bbfc3652592741 (patch)
tree6b556daaeb2373b3d60f771cf694f05e7e11af41
parent47ea54121e46a685aa2320df8b0f71aaeedff23f (diff)
sched/fair: Update blocked load when newly idle
When NEWLY_IDLE load balance is not triggered, we might need to update the blocked load anyway. We can kick an ilb so an idle CPU will take care of updating blocked load or we can try to update them locally before entering idle. In the latter case, we reuse part of the nohz_idle_balance. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: brendan.jackman@arm.com Cc: dietmar.eggemann@arm.com Cc: morten.rasmussen@foss.arm.com Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1518622006-16089-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/fair.c105
1 files changed, 87 insertions, 18 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 058badcfa94b..3582117e1580 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9375,10 +9375,14 @@ out:
9375} 9375}
9376 9376
9377/* 9377/*
9378 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 9378 * Internal function that runs load balance for all idle cpus. The load balance
9379 * rebalancing for all the CPUs for whom scheduler ticks are stopped. 9379 * can be a simple update of blocked load or a complete load balance with
9380 * tasks movement depending of flags.
9381 * The function returns false if the loop has stopped before running
9382 * through all idle CPUs.
9380 */ 9383 */
9381static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 9384static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
9385 enum cpu_idle_type idle)
9382{ 9386{
9383 /* Earliest time when we have to do rebalance again */ 9387 /* Earliest time when we have to do rebalance again */
9384 unsigned long now = jiffies; 9388 unsigned long now = jiffies;
@@ -9386,20 +9390,10 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9386 bool has_blocked_load = false; 9390 bool has_blocked_load = false;
9387 int update_next_balance = 0; 9391 int update_next_balance = 0;
9388 int this_cpu = this_rq->cpu; 9392 int this_cpu = this_rq->cpu;
9389 unsigned int flags;
9390 int balance_cpu; 9393 int balance_cpu;
9394 int ret = false;
9391 struct rq *rq; 9395 struct rq *rq;
9392 9396
9393 if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK))
9394 return false;
9395
9396 if (idle != CPU_IDLE) {
9397 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9398 return false;
9399 }
9400
9401 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9402
9403 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 9397 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
9404 9398
9405 /* 9399 /*
@@ -9443,10 +9437,10 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9443 if (time_after_eq(jiffies, rq->next_balance)) { 9437 if (time_after_eq(jiffies, rq->next_balance)) {
9444 struct rq_flags rf; 9438 struct rq_flags rf;
9445 9439
9446 rq_lock_irq(rq, &rf); 9440 rq_lock_irqsave(rq, &rf);
9447 update_rq_clock(rq); 9441 update_rq_clock(rq);
9448 cpu_load_update_idle(rq); 9442 cpu_load_update_idle(rq);
9449 rq_unlock_irq(rq, &rf); 9443 rq_unlock_irqrestore(rq, &rf);
9450 9444
9451 if (flags & NOHZ_BALANCE_KICK) 9445 if (flags & NOHZ_BALANCE_KICK)
9452 rebalance_domains(rq, CPU_IDLE); 9446 rebalance_domains(rq, CPU_IDLE);
@@ -9458,13 +9452,21 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9458 } 9452 }
9459 } 9453 }
9460 9454
9461 update_blocked_averages(this_cpu); 9455 /* Newly idle CPU doesn't need an update */
9456 if (idle != CPU_NEWLY_IDLE) {
9457 update_blocked_averages(this_cpu);
9458 has_blocked_load |= this_rq->has_blocked_load;
9459 }
9460
9462 if (flags & NOHZ_BALANCE_KICK) 9461 if (flags & NOHZ_BALANCE_KICK)
9463 rebalance_domains(this_rq, CPU_IDLE); 9462 rebalance_domains(this_rq, CPU_IDLE);
9464 9463
9465 WRITE_ONCE(nohz.next_blocked, 9464 WRITE_ONCE(nohz.next_blocked,
9466 now + msecs_to_jiffies(LOAD_AVG_PERIOD)); 9465 now + msecs_to_jiffies(LOAD_AVG_PERIOD));
9467 9466
9467 /* The full idle balance loop has been done */
9468 ret = true;
9469
9468abort: 9470abort:
9469 /* There is still blocked load, enable periodic update */ 9471 /* There is still blocked load, enable periodic update */
9470 if (has_blocked_load) 9472 if (has_blocked_load)
@@ -9478,15 +9480,79 @@ abort:
9478 if (likely(update_next_balance)) 9480 if (likely(update_next_balance))
9479 nohz.next_balance = next_balance; 9481 nohz.next_balance = next_balance;
9480 9482
9483 return ret;
9484}
9485
9486/*
9487 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
9488 * rebalancing for all the cpus for whom scheduler ticks are stopped.
9489 */
9490static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9491{
9492 int this_cpu = this_rq->cpu;
9493 unsigned int flags;
9494
9495 if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK))
9496 return false;
9497
9498 if (idle != CPU_IDLE) {
9499 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9500 return false;
9501 }
9502
9503 /*
9504 * barrier, pairs with nohz_balance_enter_idle(), ensures ...
9505 */
9506 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9507 if (!(flags & NOHZ_KICK_MASK))
9508 return false;
9509
9510 _nohz_idle_balance(this_rq, flags, idle);
9511
9481 return true; 9512 return true;
9482} 9513}
9514
9515static void nohz_newidle_balance(struct rq *this_rq)
9516{
9517 int this_cpu = this_rq->cpu;
9518
9519 /*
9520 * This CPU doesn't want to be disturbed by scheduler
9521 * housekeeping
9522 */
9523 if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
9524 return;
9525
9526 /* Will wake up very soon. No time for doing anything else*/
9527 if (this_rq->avg_idle < sysctl_sched_migration_cost)
9528 return;
9529
9530 /* Don't need to update blocked load of idle CPUs*/
9531 if (!READ_ONCE(nohz.has_blocked) ||
9532 time_before(jiffies, READ_ONCE(nohz.next_blocked)))
9533 return;
9534
9535 raw_spin_unlock(&this_rq->lock);
9536 /*
9537 * This CPU is going to be idle and blocked load of idle CPUs
9538 * need to be updated. Run the ilb locally as it is a good
9539 * candidate for ilb instead of waking up another idle CPU.
9540 * Kick an normal ilb if we failed to do the update.
9541 */
9542 if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
9543 kick_ilb(NOHZ_STATS_KICK);
9544 raw_spin_lock(&this_rq->lock);
9545}
9546
9483#else /* !CONFIG_NO_HZ_COMMON */ 9547#else /* !CONFIG_NO_HZ_COMMON */
9484static inline void nohz_balancer_kick(struct rq *rq) { } 9548static inline void nohz_balancer_kick(struct rq *rq) { }
9485 9549
9486static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 9550static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9487{ 9551{
9488 return false; 9552 return false;
9489} 9553}
9554
9555static inline void nohz_newidle_balance(struct rq *this_rq) { }
9490#endif /* CONFIG_NO_HZ_COMMON */ 9556#endif /* CONFIG_NO_HZ_COMMON */
9491 9557
9492/* 9558/*
@@ -9523,12 +9589,15 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
9523 9589
9524 if (this_rq->avg_idle < sysctl_sched_migration_cost || 9590 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
9525 !this_rq->rd->overload) { 9591 !this_rq->rd->overload) {
9592
9526 rcu_read_lock(); 9593 rcu_read_lock();
9527 sd = rcu_dereference_check_sched_domain(this_rq->sd); 9594 sd = rcu_dereference_check_sched_domain(this_rq->sd);
9528 if (sd) 9595 if (sd)
9529 update_next_balance(sd, &next_balance); 9596 update_next_balance(sd, &next_balance);
9530 rcu_read_unlock(); 9597 rcu_read_unlock();
9531 9598
9599 nohz_newidle_balance(this_rq);
9600
9532 goto out; 9601 goto out;
9533 } 9602 }
9534 9603