diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/sched_fair.c | 9 |
2 files changed, 13 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 893211054790..d67345175179 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1967,9 +1967,12 @@ static void update_cpu_load(struct rq *this_rq) | |||
1967 | unsigned long total_load = this_rq->ls.load.weight; | 1967 | unsigned long total_load = this_rq->ls.load.weight; |
1968 | unsigned long this_load = total_load; | 1968 | unsigned long this_load = total_load; |
1969 | struct load_stat *ls = &this_rq->ls; | 1969 | struct load_stat *ls = &this_rq->ls; |
1970 | u64 now = __rq_clock(this_rq); | 1970 | u64 now; |
1971 | int i, scale; | 1971 | int i, scale; |
1972 | 1972 | ||
1973 | __update_rq_clock(this_rq); | ||
1974 | now = this_rq->clock; | ||
1975 | |||
1973 | this_rq->nr_load_updates++; | 1976 | this_rq->nr_load_updates++; |
1974 | if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD))) | 1977 | if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD))) |
1975 | goto do_avg; | 1978 | goto do_avg; |
@@ -3458,7 +3461,8 @@ need_resched_nonpreemptible: | |||
3458 | 3461 | ||
3459 | spin_lock_irq(&rq->lock); | 3462 | spin_lock_irq(&rq->lock); |
3460 | clear_tsk_need_resched(prev); | 3463 | clear_tsk_need_resched(prev); |
3461 | now = __rq_clock(rq); | 3464 | __update_rq_clock(rq); |
3465 | now = rq->clock; | ||
3462 | 3466 | ||
3463 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3467 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3464 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | 3468 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 969f08c8bd34..bd20fad3deff 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -672,7 +672,10 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
672 | { | 672 | { |
673 | struct rq *rq = rq_of(cfs_rq); | 673 | struct rq *rq = rq_of(cfs_rq); |
674 | struct sched_entity *next; | 674 | struct sched_entity *next; |
675 | u64 now = __rq_clock(rq); | 675 | u64 now; |
676 | |||
677 | __update_rq_clock(rq); | ||
678 | now = rq->clock; | ||
676 | 679 | ||
677 | /* | 680 | /* |
678 | * Dequeue and enqueue the task to update its | 681 | * Dequeue and enqueue the task to update its |
@@ -824,8 +827,10 @@ dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep, u64 now) | |||
824 | static void yield_task_fair(struct rq *rq, struct task_struct *p) | 827 | static void yield_task_fair(struct rq *rq, struct task_struct *p) |
825 | { | 828 | { |
826 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 829 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
827 | u64 now = __rq_clock(rq); | 830 | u64 now; |
828 | 831 | ||
832 | __update_rq_clock(rq); | ||
833 | now = rq->clock; | ||
829 | /* | 834 | /* |
830 | * Dequeue and enqueue the task to update its | 835 | * Dequeue and enqueue the task to update its |
831 | * position within the tree: | 836 | * position within the tree: |