aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/budget.c3
-rw-r--r--litmus/preempt.c131
-rw-r--r--litmus/sched_cedf.c4
-rw-r--r--litmus/sched_gsn_edf.c19
-rw-r--r--litmus/sched_litmus.c5
-rw-r--r--litmus/sched_pfair.c11
-rw-r--r--litmus/sched_plugin.c45
-rw-r--r--litmus/sched_psn_edf.c4
9 files changed, 173 insertions, 50 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 7bd1abdcb84a..b7366b530749 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-y = sched_plugin.o litmus.o \ 5obj-y = sched_plugin.o litmus.o \
6 preempt.o \
6 litmus_proc.o \ 7 litmus_proc.o \
7 budget.o \ 8 budget.o \
8 jobs.o \ 9 jobs.o \
diff --git a/litmus/budget.c b/litmus/budget.c
index f6f5ca81c9d6..310e9a3d4172 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -3,6 +3,7 @@
3#include <linux/hrtimer.h> 3#include <linux/hrtimer.h>
4 4
5#include <litmus/litmus.h> 5#include <litmus/litmus.h>
6#include <litmus/preempt.h>
6 7
7struct enforcement_timer { 8struct enforcement_timer {
8 /* The enforcement timer is used to accurately police 9 /* The enforcement timer is used to accurately police
@@ -24,7 +25,7 @@ static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer)
24 TRACE("enforcement timer fired.\n"); 25 TRACE("enforcement timer fired.\n");
25 et->armed = 0; 26 et->armed = 0;
26 /* activate scheduler */ 27 /* activate scheduler */
27 set_tsk_need_resched(current); 28 litmus_reschedule_local();
28 local_irq_restore(flags); 29 local_irq_restore(flags);
29 30
30 return HRTIMER_NORESTART; 31 return HRTIMER_NORESTART;
diff --git a/litmus/preempt.c b/litmus/preempt.c
new file mode 100644
index 000000000000..ebe2e3461895
--- /dev/null
+++ b/litmus/preempt.c
@@ -0,0 +1,131 @@
1#include <linux/sched.h>
2
3#include <litmus/litmus.h>
4#include <litmus/preempt.h>
5
6/* The rescheduling state of each processor.
7 */
8DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
9
10void sched_state_will_schedule(struct task_struct* tsk)
11{
12 /* Litmus hack: we only care about processor-local invocations of
13 * set_tsk_need_resched(). We can't reliably set the flag remotely
14 * since it might race with other updates to the scheduling state. We
15 * can't rely on the runqueue lock protecting updates to the sched
16 * state since processors do not acquire the runqueue locks for all
17 * updates to the sched state (to avoid acquiring two runqueue locks at
18 * the same time). Further, if tsk is residing on a remote processor,
19 * then that processor doesn't actually know yet that it is going to
20 * reschedule; it still must receive an IPI (unless a local invocation
21 * races).
22 */
23 if (likely(task_cpu(tsk) == smp_processor_id())) {
24 VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);
25 if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK))
26 set_sched_state(PICKED_WRONG_TASK);
27 else
28 set_sched_state(WILL_SCHEDULE);
29 } else
30 /* Litmus tasks should never be subject to a remote
31 * set_tsk_need_resched(). */
32 BUG_ON(is_realtime(tsk));
33 TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
34 __builtin_return_address(0));
35}
36
37/* Called by the IPI handler after another CPU called smp_send_resched(). */
38void sched_state_ipi(void)
39{
40 /* If the IPI was slow, we might be in any state right now. The IPI is
41 * only meaningful if we are in SHOULD_SCHEDULE. */
42 if (is_in_sched_state(SHOULD_SCHEDULE)) {
43 /* Cause scheduler to be invoked.
44 * This will cause a transition to WILL_SCHEDULE. */
45 set_tsk_need_resched(current);
46 TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n",
47 current->comm, current->pid);
48 } else {
49 /* ignore */
50 TRACE_STATE("ignoring IPI in state %x (%s)\n",
51 get_sched_state(),
52 sched_state_name(get_sched_state()));
53 }
54}
55
56/* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must
57 * hold the lock that is used to serialize scheduling decisions. */
58void litmus_reschedule(int cpu)
59{
60 int picked_transition_ok = 0;
61 int scheduled_transition_ok = 0;
62
63 /* The (remote) CPU could be in any state. */
64
65 /* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU
66 * is not aware of the need to reschedule at this point. */
67
68 /* is a context switch in progress? */
69 if (cpu_is_in_sched_state(cpu, TASK_PICKED))
70 picked_transition_ok = sched_state_transition_on(
71 cpu, TASK_PICKED, PICKED_WRONG_TASK);
72
73 if (!picked_transition_ok &&
74 cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) {
75 /* We either raced with the end of the context switch, or the
76 * CPU was in TASK_SCHEDULED anyway. */
77 scheduled_transition_ok = sched_state_transition_on(
78 cpu, TASK_SCHEDULED, SHOULD_SCHEDULE);
79 }
80
81 /* If the CPU was in state TASK_SCHEDULED, then we need to cause the
82 * scheduler to be invoked. */
83 if (scheduled_transition_ok) {
84 if (smp_processor_id() == cpu)
85 set_tsk_need_resched(current);
86 else
87 smp_send_reschedule(cpu);
88 }
89
90 TRACE_STATE("%s picked-ok:%d sched-ok:%d\n",
91 __FUNCTION__,
92 picked_transition_ok,
93 scheduled_transition_ok);
94}
95
96void litmus_reschedule_local(void)
97{
98 if (is_in_sched_state(TASK_PICKED))
99 set_sched_state(PICKED_WRONG_TASK);
100 else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) {
101 set_sched_state(WILL_SCHEDULE);
102 set_tsk_need_resched(current);
103 }
104}
105
106#ifdef CONFIG_DEBUG_KERNEL
107
108void sched_state_plugin_check(void)
109{
110 if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) {
111 TRACE("!!!! plugin did not call sched_state_task_picked()!"
112 "Calling sched_state_task_picked() is mandatory---fix this.\n");
113 set_sched_state(TASK_PICKED);
114 }
115}
116
117#define NAME_CHECK(x) case x: return #x
118const char* sched_state_name(int s)
119{
120 switch (s) {
121 NAME_CHECK(TASK_SCHEDULED);
122 NAME_CHECK(SHOULD_SCHEDULE);
123 NAME_CHECK(WILL_SCHEDULE);
124 NAME_CHECK(TASK_PICKED);
125 NAME_CHECK(PICKED_WRONG_TASK);
126 default:
127 return "UNKNOWN";
128 };
129}
130
131#endif
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index a729d97535e9..615560f21d60 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -34,6 +34,7 @@
34 34
35#include <litmus/litmus.h> 35#include <litmus/litmus.h>
36#include <litmus/jobs.h> 36#include <litmus/jobs.h>
37#include <litmus/preempt.h>
37#include <litmus/sched_plugin.h> 38#include <litmus/sched_plugin.h>
38#include <litmus/edf_common.h> 39#include <litmus/edf_common.h>
39#include <litmus/sched_trace.h> 40#include <litmus/sched_trace.h>
@@ -341,7 +342,7 @@ static void cedf_tick(struct task_struct* t)
341 /* np tasks will be preempted when they become 342 /* np tasks will be preempted when they become
342 * preemptable again 343 * preemptable again
343 */ 344 */
344 set_tsk_need_resched(t); 345 litmus_reschedule_local();
345 set_will_schedule(); 346 set_will_schedule();
346 TRACE("cedf_scheduler_tick: " 347 TRACE("cedf_scheduler_tick: "
347 "%d is preemptable " 348 "%d is preemptable "
@@ -466,6 +467,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
466 if (exists) 467 if (exists)
467 next = prev; 468 next = prev;
468 469
470 sched_state_task_picked();
469 raw_spin_unlock(&cluster->lock); 471 raw_spin_unlock(&cluster->lock);
470 472
471#ifdef WANT_ALL_SCHED_EVENTS 473#ifdef WANT_ALL_SCHED_EVENTS
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index e101768740ad..f0337cfd8631 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -18,6 +18,8 @@
18#include <litmus/edf_common.h> 18#include <litmus/edf_common.h>
19#include <litmus/sched_trace.h> 19#include <litmus/sched_trace.h>
20 20
21#include <litmus/preempt.h>
22
21#include <litmus/bheap.h> 23#include <litmus/bheap.h>
22 24
23#include <linux/module.h> 25#include <linux/module.h>
@@ -95,21 +97,12 @@ typedef struct {
95 int cpu; 97 int cpu;
96 struct task_struct* linked; /* only RT tasks */ 98 struct task_struct* linked; /* only RT tasks */
97 struct task_struct* scheduled; /* only RT tasks */ 99 struct task_struct* scheduled; /* only RT tasks */
98 atomic_t will_schedule; /* prevent unneeded IPIs */
99 struct bheap_node* hn; 100 struct bheap_node* hn;
100} cpu_entry_t; 101} cpu_entry_t;
101DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 102DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
102 103
103cpu_entry_t* gsnedf_cpus[NR_CPUS]; 104cpu_entry_t* gsnedf_cpus[NR_CPUS];
104 105
105#define set_will_schedule() \
106 (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1))
107#define clear_will_schedule() \
108 (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 0))
109#define test_will_schedule(cpu) \
110 (atomic_read(&per_cpu(gsnedf_cpu_entries, cpu).will_schedule))
111
112
113/* the cpus queue themselves according to priority in here */ 106/* the cpus queue themselves according to priority in here */
114static struct bheap_node gsnedf_heap_node[NR_CPUS]; 107static struct bheap_node gsnedf_heap_node[NR_CPUS];
115static struct bheap gsnedf_cpu_heap; 108static struct bheap gsnedf_cpu_heap;
@@ -341,8 +334,7 @@ static void gsnedf_tick(struct task_struct* t)
341 /* np tasks will be preempted when they become 334 /* np tasks will be preempted when they become
342 * preemptable again 335 * preemptable again
343 */ 336 */
344 set_tsk_need_resched(t); 337 litmus_reschedule_local();
345 set_will_schedule();
346 TRACE("gsnedf_scheduler_tick: " 338 TRACE("gsnedf_scheduler_tick: "
347 "%d is preemptable " 339 "%d is preemptable "
348 " => FORCE_RESCHED\n", t->pid); 340 " => FORCE_RESCHED\n", t->pid);
@@ -391,7 +383,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
391#endif 383#endif
392 384
393 raw_spin_lock(&gsnedf_lock); 385 raw_spin_lock(&gsnedf_lock);
394 clear_will_schedule();
395 386
396 /* sanity checking */ 387 /* sanity checking */
397 BUG_ON(entry->scheduled && entry->scheduled != prev); 388 BUG_ON(entry->scheduled && entry->scheduled != prev);
@@ -473,6 +464,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
473 if (exists) 464 if (exists)
474 next = prev; 465 next = prev;
475 466
467 sched_state_task_picked();
468
476 raw_spin_unlock(&gsnedf_lock); 469 raw_spin_unlock(&gsnedf_lock);
477 470
478#ifdef WANT_ALL_SCHED_EVENTS 471#ifdef WANT_ALL_SCHED_EVENTS
@@ -780,7 +773,6 @@ static long gsnedf_activate_plugin(void)
780 for_each_online_cpu(cpu) { 773 for_each_online_cpu(cpu) {
781 entry = &per_cpu(gsnedf_cpu_entries, cpu); 774 entry = &per_cpu(gsnedf_cpu_entries, cpu);
782 bheap_node_init(&entry->hn, entry); 775 bheap_node_init(&entry->hn, entry);
783 atomic_set(&entry->will_schedule, 0);
784 entry->linked = NULL; 776 entry->linked = NULL;
785 entry->scheduled = NULL; 777 entry->scheduled = NULL;
786#ifdef CONFIG_RELEASE_MASTER 778#ifdef CONFIG_RELEASE_MASTER
@@ -829,7 +821,6 @@ static int __init init_gsn_edf(void)
829 for (cpu = 0; cpu < NR_CPUS; cpu++) { 821 for (cpu = 0; cpu < NR_CPUS; cpu++) {
830 entry = &per_cpu(gsnedf_cpu_entries, cpu); 822 entry = &per_cpu(gsnedf_cpu_entries, cpu);
831 gsnedf_cpus[cpu] = entry; 823 gsnedf_cpus[cpu] = entry;
832 atomic_set(&entry->will_schedule, 0);
833 entry->cpu = cpu; 824 entry->cpu = cpu;
834 entry->hn = &gsnedf_heap_node[cpu]; 825 entry->hn = &gsnedf_heap_node[cpu];
835 bheap_node_init(&entry->hn, entry); 826 bheap_node_init(&entry->hn, entry);
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 65873152e68f..e6952896dc4b 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -3,6 +3,7 @@
3#include <litmus/litmus.h> 3#include <litmus/litmus.h>
4#include <litmus/budget.h> 4#include <litmus/budget.h>
5#include <litmus/sched_plugin.h> 5#include <litmus/sched_plugin.h>
6#include <litmus/preempt.h>
6 7
7static void update_time_litmus(struct rq *rq, struct task_struct *p) 8static void update_time_litmus(struct rq *rq, struct task_struct *p)
8{ 9{
@@ -52,6 +53,8 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
52 /* let the plugin schedule */ 53 /* let the plugin schedule */
53 next = litmus->schedule(prev); 54 next = litmus->schedule(prev);
54 55
56 sched_state_plugin_check();
57
55 /* check if a global plugin pulled a task from a different RQ */ 58 /* check if a global plugin pulled a task from a different RQ */
56 if (next && task_rq(next) != rq) { 59 if (next && task_rq(next) != rq) {
57 /* we need to migrate the task */ 60 /* we need to migrate the task */
@@ -198,7 +201,7 @@ static void yield_task_litmus(struct rq *rq)
198 * then determine if a preemption is still required. 201 * then determine if a preemption is still required.
199 */ 202 */
200 clear_exit_np(current); 203 clear_exit_np(current);
201 set_tsk_need_resched(current); 204 litmus_reschedule_local();
202} 205}
203 206
204/* Plugins are responsible for this. 207/* Plugins are responsible for this.
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index ea77d3295290..c7d5cf7aa2b3 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -16,6 +16,7 @@
16 16
17#include <litmus/litmus.h> 17#include <litmus/litmus.h>
18#include <litmus/jobs.h> 18#include <litmus/jobs.h>
19#include <litmus/preempt.h>
19#include <litmus/rt_domain.h> 20#include <litmus/rt_domain.h>
20#include <litmus/sched_plugin.h> 21#include <litmus/sched_plugin.h>
21#include <litmus/sched_trace.h> 22#include <litmus/sched_trace.h>
@@ -241,11 +242,7 @@ static void check_preempt(struct task_struct* t)
241 PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", 242 PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n",
242 tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); 243 tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on);
243 /* preempt */ 244 /* preempt */
244 if (cpu == smp_processor_id()) 245 litmus_reschedule(cpu);
245 set_tsk_need_resched(current);
246 else {
247 smp_send_reschedule(cpu);
248 }
249 } 246 }
250} 247}
251 248
@@ -545,7 +542,7 @@ static void pfair_tick(struct task_struct* t)
545 542
546 if (state->local != current 543 if (state->local != current
547 && (is_realtime(current) || is_present(state->local))) 544 && (is_realtime(current) || is_present(state->local)))
548 set_tsk_need_resched(current); 545 litmus_reschedule_local();
549} 546}
550 547
551static int safe_to_schedule(struct task_struct* t, int cpu) 548static int safe_to_schedule(struct task_struct* t, int cpu)
@@ -577,7 +574,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
577 if (next) 574 if (next)
578 tsk_rt(next)->scheduled_on = state->cpu; 575 tsk_rt(next)->scheduled_on = state->cpu;
579 } 576 }
580 577 sched_state_task_picked();
581 raw_spin_unlock(&pfair_lock); 578 raw_spin_unlock(&pfair_lock);
582 579
583 if (next) 580 if (next)
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index ec04454a0cf9..d912a6494d20 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -10,7 +10,7 @@
10 10
11#include <litmus/litmus.h> 11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h> 12#include <litmus/sched_plugin.h>
13 13#include <litmus/preempt.h>
14#include <litmus/jobs.h> 14#include <litmus/jobs.h>
15 15
16/* 16/*
@@ -19,36 +19,30 @@
19 * non-preemptive section aware and does not invoke the scheduler / send 19 * non-preemptive section aware and does not invoke the scheduler / send
20 * IPIs if the to-be-preempted task is actually non-preemptive. 20 * IPIs if the to-be-preempted task is actually non-preemptive.
21 */ 21 */
22void preempt_if_preemptable(struct task_struct* t, int on_cpu) 22void preempt_if_preemptable(struct task_struct* t, int cpu)
23{ 23{
24 /* t is the real-time task executing on CPU on_cpu If t is NULL, then 24 /* t is the real-time task executing on CPU on_cpu If t is NULL, then
25 * on_cpu is currently scheduling background work. 25 * on_cpu is currently scheduling background work.
26 */ 26 */
27 27
28 int send_ipi; 28 int reschedule = 0;
29 29
30 if (smp_processor_id() == on_cpu) { 30 if (!t)
31 /* local CPU case */ 31 /* move non-real-time task out of the way */
32 if (t) { 32 reschedule = 1;
33 else {
34 if (smp_processor_id() == cpu) {
35 /* local CPU case */
33 /* check if we need to poke userspace */ 36 /* check if we need to poke userspace */
34 if (is_user_np(t)) 37 if (is_user_np(t))
35 /* yes, poke it */ 38 /* yes, poke it */
36 request_exit_np(t); 39 request_exit_np(t);
37 else 40 else if (!is_kernel_np(t))
38 /* no, see if we are allowed to preempt the 41 /* only if we are allowed to preempt the
39 * currently-executing task */ 42 * currently-executing task */
40 if (!is_kernel_np(t)) 43 reschedule = 1;
41 set_tsk_need_resched(t); 44 } else {
42 } else 45 /* remote CPU case */
43 /* move non-real-time task out of the way */
44 set_tsk_need_resched(current);
45 } else {
46 /* remote CPU case */
47 if (!t)
48 /* currently schedules non-real-time work */
49 send_ipi = 1;
50 else {
51 /* currently schedules real-time work */
52 if (is_user_np(t)) { 46 if (is_user_np(t)) {
53 /* need to notify user space of delayed 47 /* need to notify user space of delayed
54 * preemption */ 48 * preemption */
@@ -60,14 +54,14 @@ void preempt_if_preemptable(struct task_struct* t, int on_cpu)
60 mb(); 54 mb();
61 } 55 }
62 /* Only send an ipi if remote task might have raced our 56 /* Only send an ipi if remote task might have raced our
63 * request, i.e., send an IPI to make sure if it exited 57 * request, i.e., send an IPI to make sure in case it
64 * its critical section. 58 * exited its critical section.
65 */ 59 */
66 send_ipi = !is_np(t) && !is_kernel_np(t); 60 reschedule = !is_np(t) && !is_kernel_np(t);
67 } 61 }
68 if (likely(send_ipi))
69 smp_send_reschedule(on_cpu);
70 } 62 }
63 if (likely(reschedule))
64 litmus_reschedule(cpu);
71} 65}
72 66
73 67
@@ -81,6 +75,7 @@ static void litmus_dummy_finish_switch(struct task_struct * prev)
81 75
82static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) 76static struct task_struct* litmus_dummy_schedule(struct task_struct * prev)
83{ 77{
78 sched_state_task_picked();
84 return NULL; 79 return NULL;
85} 80}
86 81
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index aa567f2b91b9..b89823d5c026 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -16,6 +16,7 @@
16 16
17#include <litmus/litmus.h> 17#include <litmus/litmus.h>
18#include <litmus/jobs.h> 18#include <litmus/jobs.h>
19#include <litmus/preempt.h>
19#include <litmus/sched_plugin.h> 20#include <litmus/sched_plugin.h>
20#include <litmus/edf_common.h> 21#include <litmus/edf_common.h>
21#include <litmus/sched_trace.h> 22#include <litmus/sched_trace.h>
@@ -108,7 +109,7 @@ static void psnedf_tick(struct task_struct *t)
108 109
109 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { 110 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
110 if (!is_np(t)) { 111 if (!is_np(t)) {
111 set_tsk_need_resched(t); 112 litmus_reschedule_local();
112 TRACE("psnedf_scheduler_tick: " 113 TRACE("psnedf_scheduler_tick: "
113 "%d is preemptable " 114 "%d is preemptable "
114 " => FORCE_RESCHED\n", t->pid); 115 " => FORCE_RESCHED\n", t->pid);
@@ -204,6 +205,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
204 } 205 }
205 206
206 pedf->scheduled = next; 207 pedf->scheduled = next;
208 sched_state_task_picked();
207 raw_spin_unlock(&pedf->slock); 209 raw_spin_unlock(&pedf->slock);
208 210
209 return next; 211 return next;