From 15a3dd60f0dc56db35d3335e7ea0ea8ab67eedfb Mon Sep 17 00:00:00 2001 From: Bjoern Brandenburg Date: Tue, 25 Jun 2013 07:30:56 +0200 Subject: Integrate preemption state machine with Linux scheduler Track when a processor is going to schedule "soon". --- kernel/sched/core.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5a51ce671fd2..b073ffda2bf2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1960,8 +1960,12 @@ static inline void post_schedule(struct rq *rq) asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { - struct rq *rq = this_rq(); + struct rq *rq; + + preempt_disable(); + + rq = this_rq(); finish_task_switch(rq, prev); /* @@ -1970,6 +1974,11 @@ asmlinkage void schedule_tail(struct task_struct *prev) */ post_schedule(rq); + if (sched_state_validate_switch()) + litmus_reschedule_local(); + + preempt_enable(); + #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); @@ -2967,11 +2976,16 @@ static void __sched __schedule(void) need_resched: preempt_disable(); + sched_state_entered_schedule(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_note_context_switch(cpu); prev = rq->curr; + /* LITMUS^RT: quickly re-evaluate the scheduling decision + * if the previous one is no longer valid after context switch. + */ +litmus_need_resched_nonpreemptible: TS_SCHED_START; schedule_debug(prev); @@ -3041,6 +3055,11 @@ need_resched: post_schedule(rq); + if (sched_state_validate_switch()) { + TS_SCHED2_END(prev); + goto litmus_need_resched_nonpreemptible; + } + sched_preempt_enable_no_resched(); TS_SCHED2_END(prev); -- cgit v1.2.2