aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorValentin Schneider <valentin.schneider@arm.com>2019-02-11 12:59:46 -0500
committerIngo Molnar <mingo@kernel.org>2019-03-19 07:06:15 -0400
commitb9a7b8831600afc51c9ba52c05f12db2266f01c7 (patch)
tree6ba43d41cae48073c0fa5936e14b555214bafb6d /kernel/sched
parenta0fe2cf086aef213d1b4bca1b1291a3dee8357c9 (diff)
sched/fair: Skip LLC NOHZ logic for asymmetric systems
The LLC NOHZ condition will become true as soon as >=2 CPUs in a single LLC domain are busy. On big.LITTLE systems, this translates to two or more CPUs of a "cluster" (big or LITTLE) being busy. Issuing a NOHZ kick in these conditions isn't desired for asymmetric systems, as if the busy CPUs can provide enough compute capacity to the running tasks, then we can leave the NOHZ CPUs in peace. Skip the LLC NOHZ condition for asymmetric systems, and rely on nr_running & capacity checks to trigger NOHZ kicks when the system actually needs them. Suggested-by: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dietmar.Eggemann@arm.com Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: vincent.guittot@linaro.org Link: https://lkml.kernel.org/r/20190211175946.4961-4-valentin.schneider@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f0d2f8a352bf..51003e1c794d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9603,24 +9603,6 @@ static void nohz_balancer_kick(struct rq *rq)
9603 } 9603 }
9604 9604
9605 rcu_read_lock(); 9605 rcu_read_lock();
9606 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
9607 if (sds) {
9608 /*
9609 * If there is an imbalance between LLC domains (IOW we could
9610 * increase the overall cache use), we need some less-loaded LLC
9611 * domain to pull some load. Likewise, we may need to spread
9612 * load within the current LLC domain (e.g. packed SMT cores but
9613 * other CPUs are idle). We can't really know from here how busy
9614 * the others are - so just get a nohz balance going if it looks
9615 * like this LLC domain has tasks we could move.
9616 */
9617 nr_busy = atomic_read(&sds->nr_busy_cpus);
9618 if (nr_busy > 1) {
9619 flags = NOHZ_KICK_MASK;
9620 goto unlock;
9621 }
9622
9623 }
9624 9606
9625 sd = rcu_dereference(rq->sd); 9607 sd = rcu_dereference(rq->sd);
9626 if (sd) { 9608 if (sd) {
@@ -9635,6 +9617,21 @@ static void nohz_balancer_kick(struct rq *rq)
9635 } 9617 }
9636 } 9618 }
9637 9619
9620 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
9621 if (sd) {
9622 /*
9623 * When ASYM_PACKING; see if there's a more preferred CPU
9624 * currently idle; in which case, kick the ILB to move tasks
9625 * around.
9626 */
9627 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
9628 if (sched_asym_prefer(i, cpu)) {
9629 flags = NOHZ_KICK_MASK;
9630 goto unlock;
9631 }
9632 }
9633 }
9634
9638 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); 9635 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
9639 if (sd) { 9636 if (sd) {
9640 /* 9637 /*
@@ -9645,20 +9642,32 @@ static void nohz_balancer_kick(struct rq *rq)
9645 flags = NOHZ_KICK_MASK; 9642 flags = NOHZ_KICK_MASK;
9646 goto unlock; 9643 goto unlock;
9647 } 9644 }
9645
9646 /*
9647 * For asymmetric systems, we do not want to nicely balance
9648 * cache use, instead we want to embrace asymmetry and only
9649 * ensure tasks have enough CPU capacity.
9650 *
9651 * Skip the LLC logic because it's not relevant in that case.
9652 */
9653 goto unlock;
9648 } 9654 }
9649 9655
9650 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 9656 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
9651 if (sd) { 9657 if (sds) {
9652 /* 9658 /*
9653 * When ASYM_PACKING; see if there's a more preferred CPU 9659 * If there is an imbalance between LLC domains (IOW we could
9654 * currently idle; in which case, kick the ILB to move tasks 9660 * increase the overall cache use), we need some less-loaded LLC
9655 * around. 9661 * domain to pull some load. Likewise, we may need to spread
9662 * load within the current LLC domain (e.g. packed SMT cores but
9663 * other CPUs are idle). We can't really know from here how busy
9664 * the others are - so just get a nohz balance going if it looks
9665 * like this LLC domain has tasks we could move.
9656 */ 9666 */
9657 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { 9667 nr_busy = atomic_read(&sds->nr_busy_cpus);
9658 if (sched_asym_prefer(i, cpu)) { 9668 if (nr_busy > 1) {
9659 flags = NOHZ_KICK_MASK; 9669 flags = NOHZ_KICK_MASK;
9660 goto unlock; 9670 goto unlock;
9661 }
9662 } 9671 }
9663 } 9672 }
9664unlock: 9673unlock: