diff options
author | Mike Galbraith <efault@gmx.de> | 2008-12-16 02:45:30 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 03:45:38 -0500 |
commit | 03e89e4574a680af15f59329b061f35d9813aff4 (patch) | |
tree | 2cf03e33021fbf8cf2aaf6ce99314eb45ed34dbc /kernel | |
parent | d65bd5ecb2bd166cea4952a59b7e16cc3ad6ef6c (diff) |
sched: fix wakeup preemption clock
Impact: sharpen the wakeup-granularity to always be against current scheduler time
It was possible to do the preemption check against an old time stamp.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 7 |
2 files changed, 4 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ad7b93be5691..88215066efae 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2266,6 +2266,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | 2266 | ||
2267 | smp_wmb(); | 2267 | smp_wmb(); |
2268 | rq = task_rq_lock(p, &flags); | 2268 | rq = task_rq_lock(p, &flags); |
2269 | update_rq_clock(rq); | ||
2269 | old_state = p->state; | 2270 | old_state = p->state; |
2270 | if (!(old_state & state)) | 2271 | if (!(old_state & state)) |
2271 | goto out; | 2272 | goto out; |
@@ -2323,7 +2324,6 @@ out_activate: | |||
2323 | schedstat_inc(p, se.nr_wakeups_local); | 2324 | schedstat_inc(p, se.nr_wakeups_local); |
2324 | else | 2325 | else |
2325 | schedstat_inc(p, se.nr_wakeups_remote); | 2326 | schedstat_inc(p, se.nr_wakeups_remote); |
2326 | update_rq_clock(rq); | ||
2327 | activate_task(rq, p, 1); | 2327 | activate_task(rq, p, 1); |
2328 | success = 1; | 2328 | success = 1; |
2329 | 2329 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 98345e45b059..928cd74cff0d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1345,12 +1345,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1345 | { | 1345 | { |
1346 | struct task_struct *curr = rq->curr; | 1346 | struct task_struct *curr = rq->curr; |
1347 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1347 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1348 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1348 | 1349 | ||
1349 | if (unlikely(rt_prio(p->prio))) { | 1350 | update_curr(cfs_rq); |
1350 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1351 | 1351 | ||
1352 | update_rq_clock(rq); | 1352 | if (unlikely(rt_prio(p->prio))) { |
1353 | update_curr(cfs_rq); | ||
1354 | resched_task(curr); | 1353 | resched_task(curr); |
1355 | return; | 1354 | return; |
1356 | } | 1355 | } |