diff options
-rw-r--r-- | kernel/sched/fair.c | 65 |
1 files changed, 37 insertions, 28 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f0d2f8a352bf..51003e1c794d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -9603,24 +9603,6 @@ static void nohz_balancer_kick(struct rq *rq) | |||
9603 | } | 9603 | } |
9604 | 9604 | ||
9605 | rcu_read_lock(); | 9605 | rcu_read_lock(); |
9606 | sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); | ||
9607 | if (sds) { | ||
9608 | /* | ||
9609 | * If there is an imbalance between LLC domains (IOW we could | ||
9610 | * increase the overall cache use), we need some less-loaded LLC | ||
9611 | * domain to pull some load. Likewise, we may need to spread | ||
9612 | * load within the current LLC domain (e.g. packed SMT cores but | ||
9613 | * other CPUs are idle). We can't really know from here how busy | ||
9614 | * the others are - so just get a nohz balance going if it looks | ||
9615 | * like this LLC domain has tasks we could move. | ||
9616 | */ | ||
9617 | nr_busy = atomic_read(&sds->nr_busy_cpus); | ||
9618 | if (nr_busy > 1) { | ||
9619 | flags = NOHZ_KICK_MASK; | ||
9620 | goto unlock; | ||
9621 | } | ||
9622 | |||
9623 | } | ||
9624 | 9606 | ||
9625 | sd = rcu_dereference(rq->sd); | 9607 | sd = rcu_dereference(rq->sd); |
9626 | if (sd) { | 9608 | if (sd) { |
@@ -9635,6 +9617,21 @@ static void nohz_balancer_kick(struct rq *rq) | |||
9635 | } | 9617 | } |
9636 | } | 9618 | } |
9637 | 9619 | ||
9620 | sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); | ||
9621 | if (sd) { | ||
9622 | /* | ||
9623 | * When ASYM_PACKING; see if there's a more preferred CPU | ||
9624 | * currently idle; in which case, kick the ILB to move tasks | ||
9625 | * around. | ||
9626 | */ | ||
9627 | for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { | ||
9628 | if (sched_asym_prefer(i, cpu)) { | ||
9629 | flags = NOHZ_KICK_MASK; | ||
9630 | goto unlock; | ||
9631 | } | ||
9632 | } | ||
9633 | } | ||
9634 | |||
9638 | sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); | 9635 | sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); |
9639 | if (sd) { | 9636 | if (sd) { |
9640 | /* | 9637 | /* |
@@ -9645,20 +9642,32 @@ static void nohz_balancer_kick(struct rq *rq) | |||
9645 | flags = NOHZ_KICK_MASK; | 9642 | flags = NOHZ_KICK_MASK; |
9646 | goto unlock; | 9643 | goto unlock; |
9647 | } | 9644 | } |
9645 | |||
9646 | /* | ||
9647 | * For asymmetric systems, we do not want to nicely balance | ||
9648 | * cache use, instead we want to embrace asymmetry and only | ||
9649 | * ensure tasks have enough CPU capacity. | ||
9650 | * | ||
9651 | * Skip the LLC logic because it's not relevant in that case. | ||
9652 | */ | ||
9653 | goto unlock; | ||
9648 | } | 9654 | } |
9649 | 9655 | ||
9650 | sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); | 9656 | sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); |
9651 | if (sd) { | 9657 | if (sds) { |
9652 | /* | 9658 | /* |
9653 | * When ASYM_PACKING; see if there's a more preferred CPU | 9659 | * If there is an imbalance between LLC domains (IOW we could |
9654 | * currently idle; in which case, kick the ILB to move tasks | 9660 | * increase the overall cache use), we need some less-loaded LLC |
9655 | * around. | 9661 | * domain to pull some load. Likewise, we may need to spread |
9662 | * load within the current LLC domain (e.g. packed SMT cores but | ||
9663 | * other CPUs are idle). We can't really know from here how busy | ||
9664 | * the others are - so just get a nohz balance going if it looks | ||
9665 | * like this LLC domain has tasks we could move. | ||
9656 | */ | 9666 | */ |
9657 | for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { | 9667 | nr_busy = atomic_read(&sds->nr_busy_cpus); |
9658 | if (sched_asym_prefer(i, cpu)) { | 9668 | if (nr_busy > 1) { |
9659 | flags = NOHZ_KICK_MASK; | 9669 | flags = NOHZ_KICK_MASK; |
9660 | goto unlock; | 9670 | goto unlock; |
9661 | } | ||
9662 | } | 9671 | } |
9663 | } | 9672 | } |
9664 | unlock: | 9673 | unlock: |