diff options
-rw-r--r-- | arch/arm/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 21 |
4 files changed, 32 insertions, 1 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5919eb451bb9..1a945e27d310 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #include <asm/virt.h> | 46 | #include <asm/virt.h> |
47 | #include <asm/mach/arch.h> | 47 | #include <asm/mach/arch.h> |
48 | 48 | ||
49 | #include <litmus/preempt.h> | ||
50 | |||
49 | /* | 51 | /* |
50 | * as from 2.5, kernels no longer have an init_tasks structure | 52 | * as from 2.5, kernels no longer have an init_tasks structure |
51 | * so we need some other way of telling a new secondary core | 53 | * so we need some other way of telling a new secondary core |
@@ -617,6 +619,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |||
617 | #endif | 619 | #endif |
618 | 620 | ||
619 | case IPI_RESCHEDULE: | 621 | case IPI_RESCHEDULE: |
622 | /* LITMUS^RT: take action based on scheduler state */ | ||
623 | sched_state_ipi(); | ||
620 | scheduler_ipi(); | 624 | scheduler_ipi(); |
621 | break; | 625 | break; |
622 | 626 | ||
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index a52ef7fd6862..becf5c332d19 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | 26 | ||
27 | #include <litmus/preempt.h> | ||
27 | #include <litmus/debug_trace.h> | 28 | #include <litmus/debug_trace.h> |
28 | 29 | ||
29 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
@@ -269,6 +270,11 @@ void smp_reschedule_interrupt(struct pt_regs *regs) | |||
269 | /* | 270 | /* |
270 | * KVM uses this interrupt to force a cpu out of guest mode | 271 | * KVM uses this interrupt to force a cpu out of guest mode |
271 | */ | 272 | */ |
273 | |||
274 | /* LITMUS^RT: this IPI might need to trigger the sched state machine. | ||
275 | * Starting from 3.0 schedule_ipi() actually does something. This may | ||
276 | * increase IPI latencies compared with previous versions. */ | ||
277 | sched_state_ipi(); | ||
272 | } | 278 | } |
273 | 279 | ||
274 | void smp_call_function_interrupt(struct pt_regs *regs) | 280 | void smp_call_function_interrupt(struct pt_regs *regs) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 706299238695..767816b6ca5a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -56,6 +56,7 @@ struct sched_param { | |||
56 | #include <asm/processor.h> | 56 | #include <asm/processor.h> |
57 | 57 | ||
58 | #include <litmus/rt_param.h> | 58 | #include <litmus/rt_param.h> |
59 | #include <litmus/preempt.h> | ||
59 | 60 | ||
60 | struct exec_domain; | 61 | struct exec_domain; |
61 | struct futex_pi_state; | 62 | struct futex_pi_state; |
@@ -2371,6 +2372,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | |||
2371 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 2372 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
2372 | { | 2373 | { |
2373 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2374 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
2375 | sched_state_will_schedule(tsk); | ||
2374 | } | 2376 | } |
2375 | 2377 | ||
2376 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 2378 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5a51ce671fd2..b073ffda2bf2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1960,8 +1960,12 @@ static inline void post_schedule(struct rq *rq) | |||
1960 | asmlinkage void schedule_tail(struct task_struct *prev) | 1960 | asmlinkage void schedule_tail(struct task_struct *prev) |
1961 | __releases(rq->lock) | 1961 | __releases(rq->lock) |
1962 | { | 1962 | { |
1963 | struct rq *rq = this_rq(); | 1963 | struct rq *rq; |
1964 | |||
1964 | 1965 | ||
1966 | preempt_disable(); | ||
1967 | |||
1968 | rq = this_rq(); | ||
1965 | finish_task_switch(rq, prev); | 1969 | finish_task_switch(rq, prev); |
1966 | 1970 | ||
1967 | /* | 1971 | /* |
@@ -1970,6 +1974,11 @@ asmlinkage void schedule_tail(struct task_struct *prev) | |||
1970 | */ | 1974 | */ |
1971 | post_schedule(rq); | 1975 | post_schedule(rq); |
1972 | 1976 | ||
1977 | if (sched_state_validate_switch()) | ||
1978 | litmus_reschedule_local(); | ||
1979 | |||
1980 | preempt_enable(); | ||
1981 | |||
1973 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 1982 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
1974 | /* In this case, finish_task_switch does not reenable preemption */ | 1983 | /* In this case, finish_task_switch does not reenable preemption */ |
1975 | preempt_enable(); | 1984 | preempt_enable(); |
@@ -2967,11 +2976,16 @@ static void __sched __schedule(void) | |||
2967 | 2976 | ||
2968 | need_resched: | 2977 | need_resched: |
2969 | preempt_disable(); | 2978 | preempt_disable(); |
2979 | sched_state_entered_schedule(); | ||
2970 | cpu = smp_processor_id(); | 2980 | cpu = smp_processor_id(); |
2971 | rq = cpu_rq(cpu); | 2981 | rq = cpu_rq(cpu); |
2972 | rcu_note_context_switch(cpu); | 2982 | rcu_note_context_switch(cpu); |
2973 | prev = rq->curr; | 2983 | prev = rq->curr; |
2974 | 2984 | ||
2985 | /* LITMUS^RT: quickly re-evaluate the scheduling decision | ||
2986 | * if the previous one is no longer valid after context switch. | ||
2987 | */ | ||
2988 | litmus_need_resched_nonpreemptible: | ||
2975 | TS_SCHED_START; | 2989 | TS_SCHED_START; |
2976 | 2990 | ||
2977 | schedule_debug(prev); | 2991 | schedule_debug(prev); |
@@ -3041,6 +3055,11 @@ need_resched: | |||
3041 | 3055 | ||
3042 | post_schedule(rq); | 3056 | post_schedule(rq); |
3043 | 3057 | ||
3058 | if (sched_state_validate_switch()) { | ||
3059 | TS_SCHED2_END(prev); | ||
3060 | goto litmus_need_resched_nonpreemptible; | ||
3061 | } | ||
3062 | |||
3044 | sched_preempt_enable_no_resched(); | 3063 | sched_preempt_enable_no_resched(); |
3045 | 3064 | ||
3046 | TS_SCHED2_END(prev); | 3065 | TS_SCHED2_END(prev); |