diff options
-rw-r--r-- | kernel/sched.c | 49 | ||||
-rw-r--r-- | kernel/sched_fair.c | 8 |
2 files changed, 40 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d613723f324f..fe3c152d0c68 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -927,7 +927,10 @@ static int effective_prio(struct task_struct *p) | |||
927 | */ | 927 | */ |
928 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 928 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) |
929 | { | 929 | { |
930 | u64 now = rq_clock(rq); | 930 | u64 now; |
931 | |||
932 | update_rq_clock(rq); | ||
933 | now = rq->clock; | ||
931 | 934 | ||
932 | if (p->state == TASK_UNINTERRUPTIBLE) | 935 | if (p->state == TASK_UNINTERRUPTIBLE) |
933 | rq->nr_uninterruptible--; | 936 | rq->nr_uninterruptible--; |
@@ -941,7 +944,10 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
941 | */ | 944 | */ |
942 | static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | 945 | static inline void activate_idle_task(struct task_struct *p, struct rq *rq) |
943 | { | 946 | { |
944 | u64 now = rq_clock(rq); | 947 | u64 now; |
948 | |||
949 | update_rq_clock(rq); | ||
950 | now = rq->clock; | ||
945 | 951 | ||
946 | if (p->state == TASK_UNINTERRUPTIBLE) | 952 | if (p->state == TASK_UNINTERRUPTIBLE) |
947 | rq->nr_uninterruptible--; | 953 | rq->nr_uninterruptible--; |
@@ -1664,7 +1670,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1664 | rq = task_rq_lock(p, &flags); | 1670 | rq = task_rq_lock(p, &flags); |
1665 | BUG_ON(p->state != TASK_RUNNING); | 1671 | BUG_ON(p->state != TASK_RUNNING); |
1666 | this_cpu = smp_processor_id(); /* parent's CPU */ | 1672 | this_cpu = smp_processor_id(); /* parent's CPU */ |
1667 | now = rq_clock(rq); | 1673 | update_rq_clock(rq); |
1674 | now = rq->clock; | ||
1668 | 1675 | ||
1669 | p->prio = effective_prio(p); | 1676 | p->prio = effective_prio(p); |
1670 | 1677 | ||
@@ -2134,7 +2141,8 @@ void sched_exec(void) | |||
2134 | static void pull_task(struct rq *src_rq, struct task_struct *p, | 2141 | static void pull_task(struct rq *src_rq, struct task_struct *p, |
2135 | struct rq *this_rq, int this_cpu) | 2142 | struct rq *this_rq, int this_cpu) |
2136 | { | 2143 | { |
2137 | deactivate_task(src_rq, p, 0, rq_clock(src_rq)); | 2144 | update_rq_clock(src_rq); |
2145 | deactivate_task(src_rq, p, 0, src_rq->clock); | ||
2138 | set_task_cpu(p, this_cpu); | 2146 | set_task_cpu(p, this_cpu); |
2139 | activate_task(this_rq, p, 0); | 2147 | activate_task(this_rq, p, 0); |
2140 | /* | 2148 | /* |
@@ -3221,7 +3229,8 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3221 | rq = task_rq_lock(p, &flags); | 3229 | rq = task_rq_lock(p, &flags); |
3222 | ns = p->se.sum_exec_runtime; | 3230 | ns = p->se.sum_exec_runtime; |
3223 | if (rq->curr == p) { | 3231 | if (rq->curr == p) { |
3224 | delta_exec = rq_clock(rq) - p->se.exec_start; | 3232 | update_rq_clock(rq); |
3233 | delta_exec = rq->clock - p->se.exec_start; | ||
3225 | if ((s64)delta_exec > 0) | 3234 | if ((s64)delta_exec > 0) |
3226 | ns += delta_exec; | 3235 | ns += delta_exec; |
3227 | } | 3236 | } |
@@ -3919,7 +3928,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3919 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 3928 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
3920 | 3929 | ||
3921 | rq = task_rq_lock(p, &flags); | 3930 | rq = task_rq_lock(p, &flags); |
3922 | now = rq_clock(rq); | 3931 | update_rq_clock(rq); |
3932 | now = rq->clock; | ||
3923 | 3933 | ||
3924 | oldprio = p->prio; | 3934 | oldprio = p->prio; |
3925 | on_rq = p->se.on_rq; | 3935 | on_rq = p->se.on_rq; |
@@ -3966,7 +3976,8 @@ void set_user_nice(struct task_struct *p, long nice) | |||
3966 | * the task might be in the middle of scheduling on another CPU. | 3976 | * the task might be in the middle of scheduling on another CPU. |
3967 | */ | 3977 | */ |
3968 | rq = task_rq_lock(p, &flags); | 3978 | rq = task_rq_lock(p, &flags); |
3969 | now = rq_clock(rq); | 3979 | update_rq_clock(rq); |
3980 | now = rq->clock; | ||
3970 | /* | 3981 | /* |
3971 | * The RT priorities are set via sched_setscheduler(), but we still | 3982 | * The RT priorities are set via sched_setscheduler(), but we still |
3972 | * allow the 'normal' nice value to be set - but as expected | 3983 | * allow the 'normal' nice value to be set - but as expected |
@@ -4228,8 +4239,10 @@ recheck: | |||
4228 | goto recheck; | 4239 | goto recheck; |
4229 | } | 4240 | } |
4230 | on_rq = p->se.on_rq; | 4241 | on_rq = p->se.on_rq; |
4231 | if (on_rq) | 4242 | if (on_rq) { |
4232 | deactivate_task(rq, p, 0, rq_clock(rq)); | 4243 | update_rq_clock(rq); |
4244 | deactivate_task(rq, p, 0, rq->clock); | ||
4245 | } | ||
4233 | oldprio = p->prio; | 4246 | oldprio = p->prio; |
4234 | __setscheduler(rq, p, policy, param->sched_priority); | 4247 | __setscheduler(rq, p, policy, param->sched_priority); |
4235 | if (on_rq) { | 4248 | if (on_rq) { |
@@ -4981,8 +4994,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4981 | goto out; | 4994 | goto out; |
4982 | 4995 | ||
4983 | on_rq = p->se.on_rq; | 4996 | on_rq = p->se.on_rq; |
4984 | if (on_rq) | 4997 | if (on_rq) { |
4985 | deactivate_task(rq_src, p, 0, rq_clock(rq_src)); | 4998 | update_rq_clock(rq_src); |
4999 | deactivate_task(rq_src, p, 0, rq_src->clock); | ||
5000 | } | ||
4986 | set_task_cpu(p, dest_cpu); | 5001 | set_task_cpu(p, dest_cpu); |
4987 | if (on_rq) { | 5002 | if (on_rq) { |
4988 | activate_task(rq_dest, p, 0); | 5003 | activate_task(rq_dest, p, 0); |
@@ -5215,7 +5230,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5215 | for ( ; ; ) { | 5230 | for ( ; ; ) { |
5216 | if (!rq->nr_running) | 5231 | if (!rq->nr_running) |
5217 | break; | 5232 | break; |
5218 | next = pick_next_task(rq, rq->curr, rq_clock(rq)); | 5233 | update_rq_clock(rq); |
5234 | next = pick_next_task(rq, rq->curr, rq->clock); | ||
5219 | if (!next) | 5235 | if (!next) |
5220 | break; | 5236 | break; |
5221 | migrate_dead(dead_cpu, next); | 5237 | migrate_dead(dead_cpu, next); |
@@ -5400,7 +5416,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5400 | rq->migration_thread = NULL; | 5416 | rq->migration_thread = NULL; |
5401 | /* Idle task back to normal (off runqueue, low prio) */ | 5417 | /* Idle task back to normal (off runqueue, low prio) */ |
5402 | rq = task_rq_lock(rq->idle, &flags); | 5418 | rq = task_rq_lock(rq->idle, &flags); |
5403 | deactivate_task(rq, rq->idle, 0, rq_clock(rq)); | 5419 | update_rq_clock(rq); |
5420 | deactivate_task(rq, rq->idle, 0, rq->clock); | ||
5404 | rq->idle->static_prio = MAX_PRIO; | 5421 | rq->idle->static_prio = MAX_PRIO; |
5405 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 5422 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
5406 | rq->idle->sched_class = &idle_sched_class; | 5423 | rq->idle->sched_class = &idle_sched_class; |
@@ -6638,8 +6655,10 @@ void normalize_rt_tasks(void) | |||
6638 | #endif | 6655 | #endif |
6639 | 6656 | ||
6640 | on_rq = p->se.on_rq; | 6657 | on_rq = p->se.on_rq; |
6641 | if (on_rq) | 6658 | if (on_rq) { |
6642 | deactivate_task(task_rq(p), p, 0, rq_clock(task_rq(p))); | 6659 | update_rq_clock(task_rq(p)); |
6660 | deactivate_task(task_rq(p), p, 0, task_rq(p)->clock); | ||
6661 | } | ||
6643 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 6662 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
6644 | if (on_rq) { | 6663 | if (on_rq) { |
6645 | activate_task(task_rq(p), p, 0); | 6664 | activate_task(task_rq(p), p, 0); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 923bed0b0c42..969f08c8bd34 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -844,7 +844,8 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p) | |||
844 | unsigned long gran; | 844 | unsigned long gran; |
845 | 845 | ||
846 | if (unlikely(rt_prio(p->prio))) { | 846 | if (unlikely(rt_prio(p->prio))) { |
847 | update_curr(cfs_rq, rq_clock(rq)); | 847 | update_rq_clock(rq); |
848 | update_curr(cfs_rq, rq->clock); | ||
848 | resched_task(curr); | 849 | resched_task(curr); |
849 | return; | 850 | return; |
850 | } | 851 | } |
@@ -1063,9 +1064,12 @@ static void set_curr_task_fair(struct rq *rq) | |||
1063 | { | 1064 | { |
1064 | struct task_struct *curr = rq->curr; | 1065 | struct task_struct *curr = rq->curr; |
1065 | struct sched_entity *se = &curr->se; | 1066 | struct sched_entity *se = &curr->se; |
1066 | u64 now = rq_clock(rq); | 1067 | u64 now; |
1067 | struct cfs_rq *cfs_rq; | 1068 | struct cfs_rq *cfs_rq; |
1068 | 1069 | ||
1070 | update_rq_clock(rq); | ||
1071 | now = rq->clock; | ||
1072 | |||
1069 | for_each_sched_entity(se) { | 1073 | for_each_sched_entity(se) { |
1070 | cfs_rq = cfs_rq_of(se); | 1074 | cfs_rq = cfs_rq_of(se); |
1071 | set_next_entity(cfs_rq, se, now); | 1075 | set_next_entity(cfs_rq, se, now); |