aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-25 01:30:56 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:46:51 -0400
commit15a3dd60f0dc56db35d3335e7ea0ea8ab67eedfb (patch)
treef34e044fe45e8832891486c822c3eae4bcfd0ca5 /kernel
parent8f88280b7201efb67751b904728d7c8ed9786f93 (diff)
Integrate preemption state machine with Linux scheduler
Track when a processor is going to schedule "soon".
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5a51ce671fd2..b073ffda2bf2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1960,8 +1960,12 @@ static inline void post_schedule(struct rq *rq)
1960asmlinkage void schedule_tail(struct task_struct *prev) 1960asmlinkage void schedule_tail(struct task_struct *prev)
1961 __releases(rq->lock) 1961 __releases(rq->lock)
1962{ 1962{
1963 struct rq *rq = this_rq(); 1963 struct rq *rq;
1964
1964 1965
1966 preempt_disable();
1967
1968 rq = this_rq();
1965 finish_task_switch(rq, prev); 1969 finish_task_switch(rq, prev);
1966 1970
1967 /* 1971 /*
@@ -1970,6 +1974,11 @@ asmlinkage void schedule_tail(struct task_struct *prev)
1970 */ 1974 */
1971 post_schedule(rq); 1975 post_schedule(rq);
1972 1976
1977 if (sched_state_validate_switch())
1978 litmus_reschedule_local();
1979
1980 preempt_enable();
1981
1973#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1982#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1974 /* In this case, finish_task_switch does not reenable preemption */ 1983 /* In this case, finish_task_switch does not reenable preemption */
1975 preempt_enable(); 1984 preempt_enable();
@@ -2967,11 +2976,16 @@ static void __sched __schedule(void)
2967 2976
2968need_resched: 2977need_resched:
2969 preempt_disable(); 2978 preempt_disable();
2979 sched_state_entered_schedule();
2970 cpu = smp_processor_id(); 2980 cpu = smp_processor_id();
2971 rq = cpu_rq(cpu); 2981 rq = cpu_rq(cpu);
2972 rcu_note_context_switch(cpu); 2982 rcu_note_context_switch(cpu);
2973 prev = rq->curr; 2983 prev = rq->curr;
2974 2984
2985 /* LITMUS^RT: quickly re-evaluate the scheduling decision
2986 * if the previous one is no longer valid after context switch.
2987 */
2988litmus_need_resched_nonpreemptible:
2975 TS_SCHED_START; 2989 TS_SCHED_START;
2976 2990
2977 schedule_debug(prev); 2991 schedule_debug(prev);
@@ -3041,6 +3055,11 @@ need_resched:
3041 3055
3042 post_schedule(rq); 3056 post_schedule(rq);
3043 3057
3058 if (sched_state_validate_switch()) {
3059 TS_SCHED2_END(prev);
3060 goto litmus_need_resched_nonpreemptible;
3061 }
3062
3044 sched_preempt_enable_no_resched(); 3063 sched_preempt_enable_no_resched();
3045 3064
3046 TS_SCHED2_END(prev); 3065 TS_SCHED2_END(prev);