aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2009-11-12 05:07:44 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-12 06:28:29 -0500
commit055a00865dcfc8e61f3cbefbb879c9577bd36ae5 (patch)
tree73d406a3e05b5226c8a83f9ce3f6d0c67a063cb7 /kernel/sched.c
parentaa021baa3295fa6e3f367d80f8955dd5176656eb (diff)
sched: Fix/add missing update_rq_clock() calls
kthread_bind(), migrate_task() and sched_fork were missing updates, and try_to_wake_up() was updating after having already used the stale clock. Aside from preventing potential latency hits, there' a side benefit in that early boot printk time stamps become monotonic. Signed-off-by: Mike Galbraith <efault@gmx.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1258020464.6491.2.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> LKML-Reference: <new-submission>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c11ae0a948d..701eca4958a2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2017,6 +2017,7 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
2017 } 2017 }
2018 2018
2019 spin_lock_irqsave(&rq->lock, flags); 2019 spin_lock_irqsave(&rq->lock, flags);
2020 update_rq_clock(rq);
2020 set_task_cpu(p, cpu); 2021 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu); 2022 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1; 2023 p->rt.nr_cpus_allowed = 1;
@@ -2115,6 +2116,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2115 * it is sufficient to simply update the task's cpu field. 2116 * it is sufficient to simply update the task's cpu field.
2116 */ 2117 */
2117 if (!p->se.on_rq && !task_running(rq, p)) { 2118 if (!p->se.on_rq && !task_running(rq, p)) {
2119 update_rq_clock(rq);
2118 set_task_cpu(p, dest_cpu); 2120 set_task_cpu(p, dest_cpu);
2119 return 0; 2121 return 0;
2120 } 2122 }
@@ -2376,14 +2378,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2376 task_rq_unlock(rq, &flags); 2378 task_rq_unlock(rq, &flags);
2377 2379
2378 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2380 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2379 if (cpu != orig_cpu) 2381 if (cpu != orig_cpu) {
2382 local_irq_save(flags);
2383 rq = cpu_rq(cpu);
2384 update_rq_clock(rq);
2380 set_task_cpu(p, cpu); 2385 set_task_cpu(p, cpu);
2381 2386 local_irq_restore(flags);
2387 }
2382 rq = task_rq_lock(p, &flags); 2388 rq = task_rq_lock(p, &flags);
2383 2389
2384 if (rq != orig_rq)
2385 update_rq_clock(rq);
2386
2387 WARN_ON(p->state != TASK_WAKING); 2390 WARN_ON(p->state != TASK_WAKING);
2388 cpu = task_cpu(p); 2391 cpu = task_cpu(p);
2389 2392
@@ -2545,6 +2548,7 @@ static void __sched_fork(struct task_struct *p)
2545void sched_fork(struct task_struct *p, int clone_flags) 2548void sched_fork(struct task_struct *p, int clone_flags)
2546{ 2549{
2547 int cpu = get_cpu(); 2550 int cpu = get_cpu();
2551 unsigned long flags;
2548 2552
2549 __sched_fork(p); 2553 __sched_fork(p);
2550 2554
@@ -2581,7 +2585,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
2581#ifdef CONFIG_SMP 2585#ifdef CONFIG_SMP
2582 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); 2586 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
2583#endif 2587#endif
2588 local_irq_save(flags);
2589 update_rq_clock(cpu_rq(cpu));
2584 set_task_cpu(p, cpu); 2590 set_task_cpu(p, cpu);
2591 local_irq_restore(flags);
2585 2592
2586#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2593#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2587 if (likely(sched_info_on())) 2594 if (likely(sched_info_on()))