diff options
-rw-r--r-- | kernel/sched.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 60fbae0c747c..1b13c8e1cfc2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -578,8 +578,14 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
578 | * A queue event has occurred, and we're going to schedule. In | 578 | * A queue event has occurred, and we're going to schedule. In |
579 | * this case, we can save a useless back to back clock update. | 579 | * this case, we can save a useless back to back clock update. |
580 | */ | 580 | */ |
581 | /* LITMUS^RT: turning off the clock update is buggy in Linux 2.6.36; | ||
582 | * the scheduler can "forget" to renable the runqueue clock in some | ||
583 | * cases. LITMUS^RT amplifies the effects of this problem. Hence, we | ||
584 | * turn it off to avoid stalling clocks. */ | ||
585 | /* | ||
581 | if (test_tsk_need_resched(p)) | 586 | if (test_tsk_need_resched(p)) |
582 | rq->skip_clock_update = 1; | 587 | rq->skip_clock_update = 1; |
588 | */ | ||
583 | } | 589 | } |
584 | 590 | ||
585 | static inline int cpu_of(struct rq *rq) | 591 | static inline int cpu_of(struct rq *rq) |