aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:49 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2017-05-26 17:12:28 -0400
commitf77cd98179f8758cb9b3bbb0bccbf576a6a79cb9 (patch)
treea312ce3b9e311e02ae8efe09e58548b9d0124c4e
parent3baa55c19ffb567aa48568fa69dd17ad6f70d31d (diff)
Integrate preemption state machine with Linux scheduler
Track when a processor is going to schedule "soon".
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/x86/kernel/smp.c9
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c10
5 files changed, 26 insertions, 1 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7dd14e8395e6..b89255cf8f24 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -51,6 +51,8 @@
51#define CREATE_TRACE_POINTS 51#define CREATE_TRACE_POINTS
52#include <trace/events/ipi.h> 52#include <trace/events/ipi.h>
53 53
54#include <litmus/preempt.h>
55
54/* 56/*
55 * as from 2.5, kernels no longer have an init_tasks structure 57 * as from 2.5, kernels no longer have an init_tasks structure
56 * so we need some other way of telling a new secondary core 58 * so we need some other way of telling a new secondary core
@@ -614,6 +616,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
614#endif 616#endif
615 617
616 case IPI_RESCHEDULE: 618 case IPI_RESCHEDULE:
619 /* LITMUS^RT: take action based on scheduler state */
620 sched_state_ipi();
617 scheduler_ipi(); 621 scheduler_ipi();
618 break; 622 break;
619 623
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index c00cb64bc0a1..ec28c740cc55 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -24,6 +24,8 @@
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26 26
27#include <litmus/preempt.h>
28
27#include <asm/mtrr.h> 29#include <asm/mtrr.h>
28#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
29#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
@@ -218,7 +220,7 @@ static void native_stop_other_cpus(int wait)
218 while (num_online_cpus() > 1 && (wait || timeout--)) 220 while (num_online_cpus() > 1 && (wait || timeout--))
219 udelay(1); 221 udelay(1);
220 } 222 }
221 223
222 /* if the REBOOT_VECTOR didn't work, try with the NMI */ 224 /* if the REBOOT_VECTOR didn't work, try with the NMI */
223 if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { 225 if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
224 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, 226 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
@@ -268,6 +270,11 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs)
268 /* 270 /*
269 * KVM uses this interrupt to force a cpu out of guest mode 271 * KVM uses this interrupt to force a cpu out of guest mode
270 */ 272 */
273
274 /* LITMUS^RT: this IPI might need to trigger the sched state machine.
275 * Starting from 3.0 schedule_ipi() actually does something. This may
276 * increase IPI latencies compared with previous versions. */
277 sched_state_ipi();
271} 278}
272 279
273__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs) 280__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 137afbbd0590..ef65354b3faa 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -66,6 +66,8 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
66 inc_irq_stat(irq_resched_count); 66 inc_irq_stat(irq_resched_count);
67 scheduler_ipi(); 67 scheduler_ipi();
68 68
69 sched_state_ipi();
70
69 return IRQ_HANDLED; 71 return IRQ_HANDLED;
70} 72}
71 73
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78566173465b..52d2f5c2c926 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -127,6 +127,7 @@ struct sched_attr {
127}; 127};
128 128
129#include <litmus/rt_param.h> 129#include <litmus/rt_param.h>
130#include <litmus/preempt.h>
130 131
131struct futex_pi_state; 132struct futex_pi_state;
132struct robust_list_head; 133struct robust_list_head;
@@ -3274,6 +3275,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3274static inline void set_tsk_need_resched(struct task_struct *tsk) 3275static inline void set_tsk_need_resched(struct task_struct *tsk)
3275{ 3276{
3276 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 3277 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3278 sched_state_will_schedule(tsk);
3277} 3279}
3278 3280
3279static inline void clear_tsk_need_resched(struct task_struct *tsk) 3281static inline void clear_tsk_need_resched(struct task_struct *tsk)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 32bffe174d75..f2f3a3000a1c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2857,6 +2857,10 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
2857 */ 2857 */
2858 2858
2859 rq = finish_task_switch(prev); 2859 rq = finish_task_switch(prev);
2860
2861 if (unlikely(sched_state_validate_switch()))
2862 litmus_reschedule_local();
2863
2860 balance_callback(rq); 2864 balance_callback(rq);
2861 preempt_enable(); 2865 preempt_enable();
2862 2866
@@ -3349,6 +3353,8 @@ static void __sched notrace __schedule(bool preempt)
3349 3353
3350 TS_SCHED_START; 3354 TS_SCHED_START;
3351 3355
3356 sched_state_entered_schedule();
3357
3352 cpu = smp_processor_id(); 3358 cpu = smp_processor_id();
3353 rq = cpu_rq(cpu); 3359 rq = cpu_rq(cpu);
3354 prev = rq->curr; 3360 prev = rq->curr;
@@ -3421,6 +3427,10 @@ static void __sched notrace __schedule(bool preempt)
3421 } 3427 }
3422 3428
3423 TS_SCHED2_START(prev); 3429 TS_SCHED2_START(prev);
3430
3431 if (unlikely(sched_state_validate_switch()))
3432 litmus_reschedule_local();
3433
3424 balance_callback(rq); 3434 balance_callback(rq);
3425 TS_SCHED2_END(prev); 3435 TS_SCHED2_END(prev);
3426} 3436}