aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6777dc7942a0..1b13c8e1cfc2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -82,6 +82,8 @@
82#include <litmus/sched_trace.h> 82#include <litmus/sched_trace.h>
83#include <litmus/trace.h> 83#include <litmus/trace.h>
84 84
85static void litmus_tick(struct rq*, struct task_struct*);
86
85#define CREATE_TRACE_POINTS 87#define CREATE_TRACE_POINTS
86#include <trace/events/sched.h> 88#include <trace/events/sched.h>
87 89
@@ -576,8 +578,14 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
576 * A queue event has occurred, and we're going to schedule. In 578 * A queue event has occurred, and we're going to schedule. In
577 * this case, we can save a useless back to back clock update. 579 * this case, we can save a useless back to back clock update.
578 */ 580 */
581 /* LITMUS^RT: turning off the clock update is buggy in Linux 2.6.36;
582 * the scheduler can "forget" to renable the runqueue clock in some
583 * cases. LITMUS^RT amplifies the effects of this problem. Hence, we
584 * turn it off to avoid stalling clocks. */
585 /*
579 if (test_tsk_need_resched(p)) 586 if (test_tsk_need_resched(p))
580 rq->skip_clock_update = 1; 587 rq->skip_clock_update = 1;
588 */
581} 589}
582 590
583static inline int cpu_of(struct rq *rq) 591static inline int cpu_of(struct rq *rq)
@@ -1052,6 +1060,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1052 raw_spin_lock(&rq->lock); 1060 raw_spin_lock(&rq->lock);
1053 update_rq_clock(rq); 1061 update_rq_clock(rq);
1054 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1062 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1063 litmus_tick(rq, rq->curr);
1055 raw_spin_unlock(&rq->lock); 1064 raw_spin_unlock(&rq->lock);
1056 1065
1057 return HRTIMER_NORESTART; 1066 return HRTIMER_NORESTART;
@@ -3791,6 +3800,7 @@ asmlinkage void __sched schedule(void)
3791 3800
3792need_resched: 3801need_resched:
3793 preempt_disable(); 3802 preempt_disable();
3803 sched_state_entered_schedule();
3794 cpu = smp_processor_id(); 3804 cpu = smp_processor_id();
3795 rq = cpu_rq(cpu); 3805 rq = cpu_rq(cpu);
3796 rcu_note_context_switch(cpu); 3806 rcu_note_context_switch(cpu);
@@ -3869,7 +3879,7 @@ need_resched_nonpreemptible:
3869 3879
3870 post_schedule(rq); 3880 post_schedule(rq);
3871 3881
3872 if (unlikely(reacquire_kernel_lock(prev))) 3882 if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev)))
3873 goto need_resched_nonpreemptible; 3883 goto need_resched_nonpreemptible;
3874 3884
3875 preempt_enable_no_resched(); 3885 preempt_enable_no_resched();