From 15a3dd60f0dc56db35d3335e7ea0ea8ab67eedfb Mon Sep 17 00:00:00 2001 From: Bjoern Brandenburg Date: Tue, 25 Jun 2013 07:30:56 +0200 Subject: Integrate preemption state machine with Linux scheduler Track when a processor is going to schedule "soon". --- arch/arm/kernel/smp.c | 4 ++++ arch/x86/kernel/smp.c | 6 ++++++ include/linux/sched.h | 2 ++ kernel/sched/core.c | 21 ++++++++++++++++++++- 4 files changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5919eb451bb9..1a945e27d310 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -46,6 +46,8 @@ #include #include +#include + /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core @@ -617,6 +619,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #endif case IPI_RESCHEDULE: + /* LITMUS^RT: take action based on scheduler state */ + sched_state_ipi(); scheduler_ipi(); break; diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index a52ef7fd6862..becf5c332d19 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -269,6 +270,11 @@ void smp_reschedule_interrupt(struct pt_regs *regs) /* * KVM uses this interrupt to force a cpu out of guest mode */ + + /* LITMUS^RT: this IPI might need to trigger the sched state machine. + * Starting from 3.0 schedule_ipi() actually does something. This may + * increase IPI latencies compared with previous versions. */ + sched_state_ipi(); } void smp_call_function_interrupt(struct pt_regs *regs) diff --git a/include/linux/sched.h b/include/linux/sched.h index 706299238695..767816b6ca5a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -56,6 +56,7 @@ struct sched_param { #include #include +#include struct exec_domain; struct futex_pi_state; @@ -2371,6 +2372,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) static inline void set_tsk_need_resched(struct task_struct *tsk) { set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); + sched_state_will_schedule(tsk); } static inline void clear_tsk_need_resched(struct task_struct *tsk) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5a51ce671fd2..b073ffda2bf2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1960,8 +1960,12 @@ static inline void post_schedule(struct rq *rq) asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { - struct rq *rq = this_rq(); + struct rq *rq; + + preempt_disable(); + + rq = this_rq(); finish_task_switch(rq, prev); /* @@ -1970,6 +1974,11 @@ asmlinkage void schedule_tail(struct task_struct *prev) */ post_schedule(rq); + if (sched_state_validate_switch()) + litmus_reschedule_local(); + + preempt_enable(); + #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); @@ -2967,11 +2976,16 @@ static void __sched __schedule(void) need_resched: preempt_disable(); + sched_state_entered_schedule(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_note_context_switch(cpu); prev = rq->curr; + /* LITMUS^RT: quickly re-evaluate the scheduling decision + * if the previous one is no longer valid after context switch. + */ +litmus_need_resched_nonpreemptible: TS_SCHED_START; schedule_debug(prev); @@ -3041,6 +3055,11 @@ need_resched: post_schedule(rq); + if (sched_state_validate_switch()) { + TS_SCHED2_END(prev); + goto litmus_need_resched_nonpreemptible; + } + sched_preempt_enable_no_resched(); TS_SCHED2_END(prev); -- cgit v1.2.2