diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9ccd91e5b65b..afc59f274e58 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2017,6 +2017,8 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
2017 | spin_lock(&rq1->lock); | 2017 | spin_lock(&rq1->lock); |
2018 | } | 2018 | } |
2019 | } | 2019 | } |
2020 | update_rq_clock(rq1); | ||
2021 | update_rq_clock(rq2); | ||
2020 | } | 2022 | } |
2021 | 2023 | ||
2022 | /* | 2024 | /* |
@@ -2113,10 +2115,8 @@ void sched_exec(void) | |||
2113 | static void pull_task(struct rq *src_rq, struct task_struct *p, | 2115 | static void pull_task(struct rq *src_rq, struct task_struct *p, |
2114 | struct rq *this_rq, int this_cpu) | 2116 | struct rq *this_rq, int this_cpu) |
2115 | { | 2117 | { |
2116 | update_rq_clock(src_rq); | ||
2117 | deactivate_task(src_rq, p, 0); | 2118 | deactivate_task(src_rq, p, 0); |
2118 | set_task_cpu(p, this_cpu); | 2119 | set_task_cpu(p, this_cpu); |
2119 | __update_rq_clock(this_rq); | ||
2120 | activate_task(this_rq, p, 0); | 2120 | activate_task(this_rq, p, 0); |
2121 | /* | 2121 | /* |
2122 | * Note that idle threads have a prio of MAX_PRIO, for this test | 2122 | * Note that idle threads have a prio of MAX_PRIO, for this test |
@@ -2798,6 +2798,8 @@ redo: | |||
2798 | if (busiest->nr_running > 1) { | 2798 | if (busiest->nr_running > 1) { |
2799 | /* Attempt to move tasks */ | 2799 | /* Attempt to move tasks */ |
2800 | double_lock_balance(this_rq, busiest); | 2800 | double_lock_balance(this_rq, busiest); |
2801 | /* this_rq->clock is already updated */ | ||
2802 | update_rq_clock(busiest); | ||
2801 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 2803 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
2802 | imbalance, sd, CPU_NEWLY_IDLE, | 2804 | imbalance, sd, CPU_NEWLY_IDLE, |
2803 | &all_pinned); | 2805 | &all_pinned); |
@@ -2895,6 +2897,8 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2895 | 2897 | ||
2896 | /* move a task from busiest_rq to target_rq */ | 2898 | /* move a task from busiest_rq to target_rq */ |
2897 | double_lock_balance(busiest_rq, target_rq); | 2899 | double_lock_balance(busiest_rq, target_rq); |
2900 | update_rq_clock(busiest_rq); | ||
2901 | update_rq_clock(target_rq); | ||
2898 | 2902 | ||
2899 | /* Search for an sd spanning us and the target CPU. */ | 2903 | /* Search for an sd spanning us and the target CPU. */ |
2900 | for_each_domain(target_cpu, sd) { | 2904 | for_each_domain(target_cpu, sd) { |
@@ -4962,13 +4966,11 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4962 | goto out; | 4966 | goto out; |
4963 | 4967 | ||
4964 | on_rq = p->se.on_rq; | 4968 | on_rq = p->se.on_rq; |
4965 | if (on_rq) { | 4969 | if (on_rq) |
4966 | update_rq_clock(rq_src); | ||
4967 | deactivate_task(rq_src, p, 0); | 4970 | deactivate_task(rq_src, p, 0); |
4968 | } | 4971 | |
4969 | set_task_cpu(p, dest_cpu); | 4972 | set_task_cpu(p, dest_cpu); |
4970 | if (on_rq) { | 4973 | if (on_rq) { |
4971 | update_rq_clock(rq_dest); | ||
4972 | activate_task(rq_dest, p, 0); | 4974 | activate_task(rq_dest, p, 0); |
4973 | check_preempt_curr(rq_dest, p); | 4975 | check_preempt_curr(rq_dest, p); |
4974 | } | 4976 | } |