diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2018-04-26 06:30:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-05-04 04:00:07 -0400 |
commit | f1d88b4468188ddcd2620b8d612068faf6662a62 (patch) | |
tree | 7f9ee18eb9d6e7974e608112a151a37837e42e17 /kernel/sched | |
parent | b5bf9a90bbebffba888c9144c5a8a10317b04064 (diff) |
sched/fair: Rearrange select_task_rq_fair() to optimize it
Rearrange select_task_rq_fair() a bit to avoid executing some
conditional statements in few specific code-paths. That gets rid of the
goto as well.
This shouldn't result in any functional changes.
Tested-by: Rohit Jain <rohit.k.jain@oracle.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: http://lkml.kernel.org/r/20831b8d237bf3a20e4e328286f678b425ff04c9.1524738578.git.viresh.kumar@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e3002e5ada31..4b346f358005 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -6613,7 +6613,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) | |||
6613 | static int | 6613 | static int |
6614 | select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) | 6614 | select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) |
6615 | { | 6615 | { |
6616 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 6616 | struct sched_domain *tmp, *sd = NULL; |
6617 | int cpu = smp_processor_id(); | 6617 | int cpu = smp_processor_id(); |
6618 | int new_cpu = prev_cpu; | 6618 | int new_cpu = prev_cpu; |
6619 | int want_affine = 0; | 6619 | int want_affine = 0; |
@@ -6636,7 +6636,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f | |||
6636 | */ | 6636 | */ |
6637 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 6637 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
6638 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 6638 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
6639 | affine_sd = tmp; | 6639 | if (cpu != prev_cpu) |
6640 | new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); | ||
6641 | |||
6642 | sd = NULL; /* Prefer wake_affine over balance flags */ | ||
6640 | break; | 6643 | break; |
6641 | } | 6644 | } |
6642 | 6645 | ||
@@ -6646,33 +6649,25 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f | |||
6646 | break; | 6649 | break; |
6647 | } | 6650 | } |
6648 | 6651 | ||
6649 | if (affine_sd) { | 6652 | if (unlikely(sd)) { |
6650 | sd = NULL; /* Prefer wake_affine over balance flags */ | 6653 | /* Slow path */ |
6651 | if (cpu == prev_cpu) | ||
6652 | goto pick_cpu; | ||
6653 | |||
6654 | new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, sync); | ||
6655 | } | ||
6656 | 6654 | ||
6657 | if (sd && !(sd_flag & SD_BALANCE_FORK)) { | ||
6658 | /* | 6655 | /* |
6659 | * We're going to need the task's util for capacity_spare_wake | 6656 | * We're going to need the task's util for capacity_spare_wake |
6660 | * in find_idlest_group. Sync it up to prev_cpu's | 6657 | * in find_idlest_group. Sync it up to prev_cpu's |
6661 | * last_update_time. | 6658 | * last_update_time. |
6662 | */ | 6659 | */ |
6663 | sync_entity_load_avg(&p->se); | 6660 | if (!(sd_flag & SD_BALANCE_FORK)) |
6664 | } | 6661 | sync_entity_load_avg(&p->se); |
6665 | |||
6666 | if (!sd) { | ||
6667 | pick_cpu: | ||
6668 | if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ | ||
6669 | new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); | ||
6670 | 6662 | ||
6671 | if (want_affine) | ||
6672 | current->recent_used_cpu = cpu; | ||
6673 | } | ||
6674 | } else { | ||
6675 | new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); | 6663 | new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); |
6664 | } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ | ||
6665 | /* Fast path */ | ||
6666 | |||
6667 | new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); | ||
6668 | |||
6669 | if (want_affine) | ||
6670 | current->recent_used_cpu = cpu; | ||
6676 | } | 6671 | } |
6677 | rcu_read_unlock(); | 6672 | rcu_read_unlock(); |
6678 | 6673 | ||