diff options
Diffstat (limited to 'litmus/preempt.c')
-rw-r--r-- | litmus/preempt.c | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/litmus/preempt.c b/litmus/preempt.c new file mode 100644 index 000000000000..6be2f26728b8 --- /dev/null +++ b/litmus/preempt.c | |||
@@ -0,0 +1,137 @@ | |||
1 | #include <linux/sched.h> | ||
2 | |||
3 | #include <litmus/litmus.h> | ||
4 | #include <litmus/preempt.h> | ||
5 | #include <litmus/trace.h> | ||
6 | |||
7 | /* The rescheduling state of each processor. | ||
8 | */ | ||
9 | DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
10 | |||
11 | void sched_state_will_schedule(struct task_struct* tsk) | ||
12 | { | ||
13 | /* Litmus hack: we only care about processor-local invocations of | ||
14 | * set_tsk_need_resched(). We can't reliably set the flag remotely | ||
15 | * since it might race with other updates to the scheduling state. We | ||
16 | * can't rely on the runqueue lock protecting updates to the sched | ||
17 | * state since processors do not acquire the runqueue locks for all | ||
18 | * updates to the sched state (to avoid acquiring two runqueue locks at | ||
19 | * the same time). Further, if tsk is residing on a remote processor, | ||
20 | * then that processor doesn't actually know yet that it is going to | ||
21 | * reschedule; it still must receive an IPI (unless a local invocation | ||
22 | * races). | ||
23 | */ | ||
24 | if (likely(task_cpu(tsk) == smp_processor_id())) { | ||
25 | VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE); | ||
26 | if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) | ||
27 | set_sched_state(PICKED_WRONG_TASK); | ||
28 | else | ||
29 | set_sched_state(WILL_SCHEDULE); | ||
30 | } else | ||
31 | /* Litmus tasks should never be subject to a remote | ||
32 | * set_tsk_need_resched(). */ | ||
33 | BUG_ON(is_realtime(tsk)); | ||
34 | #ifdef CONFIG_PREEMPT_STATE_TRACE | ||
35 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | ||
36 | __builtin_return_address(0)); | ||
37 | #endif | ||
38 | } | ||
39 | |||
40 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ | ||
41 | void sched_state_ipi(void) | ||
42 | { | ||
43 | /* If the IPI was slow, we might be in any state right now. The IPI is | ||
44 | * only meaningful if we are in SHOULD_SCHEDULE. */ | ||
45 | if (is_in_sched_state(SHOULD_SCHEDULE)) { | ||
46 | /* Cause scheduler to be invoked. | ||
47 | * This will cause a transition to WILL_SCHEDULE. */ | ||
48 | set_tsk_need_resched(current); | ||
49 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | ||
50 | current->comm, current->pid); | ||
51 | TS_SEND_RESCHED_END; | ||
52 | } else { | ||
53 | /* ignore */ | ||
54 | TRACE_STATE("ignoring IPI in state %x (%s)\n", | ||
55 | get_sched_state(), | ||
56 | sched_state_name(get_sched_state())); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | /* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must | ||
61 | * hold the lock that is used to serialize scheduling decisions. */ | ||
62 | void litmus_reschedule(int cpu) | ||
63 | { | ||
64 | int picked_transition_ok = 0; | ||
65 | int scheduled_transition_ok = 0; | ||
66 | |||
67 | /* The (remote) CPU could be in any state. */ | ||
68 | |||
69 | /* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU | ||
70 | * is not aware of the need to reschedule at this point. */ | ||
71 | |||
72 | /* is a context switch in progress? */ | ||
73 | if (cpu_is_in_sched_state(cpu, TASK_PICKED)) | ||
74 | picked_transition_ok = sched_state_transition_on( | ||
75 | cpu, TASK_PICKED, PICKED_WRONG_TASK); | ||
76 | |||
77 | if (!picked_transition_ok && | ||
78 | cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { | ||
79 | /* We either raced with the end of the context switch, or the | ||
80 | * CPU was in TASK_SCHEDULED anyway. */ | ||
81 | scheduled_transition_ok = sched_state_transition_on( | ||
82 | cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); | ||
83 | } | ||
84 | |||
85 | /* If the CPU was in state TASK_SCHEDULED, then we need to cause the | ||
86 | * scheduler to be invoked. */ | ||
87 | if (scheduled_transition_ok) { | ||
88 | if (smp_processor_id() == cpu) | ||
89 | set_tsk_need_resched(current); | ||
90 | else { | ||
91 | TS_SEND_RESCHED_START(cpu); | ||
92 | smp_send_reschedule(cpu); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", | ||
97 | __FUNCTION__, | ||
98 | picked_transition_ok, | ||
99 | scheduled_transition_ok); | ||
100 | } | ||
101 | |||
102 | void litmus_reschedule_local(void) | ||
103 | { | ||
104 | if (is_in_sched_state(TASK_PICKED)) | ||
105 | set_sched_state(PICKED_WRONG_TASK); | ||
106 | else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { | ||
107 | set_sched_state(WILL_SCHEDULE); | ||
108 | set_tsk_need_resched(current); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | #ifdef CONFIG_DEBUG_KERNEL | ||
113 | |||
114 | void sched_state_plugin_check(void) | ||
115 | { | ||
116 | if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { | ||
117 | TRACE("!!!! plugin did not call sched_state_task_picked()!" | ||
118 | "Calling sched_state_task_picked() is mandatory---fix this.\n"); | ||
119 | set_sched_state(TASK_PICKED); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | #define NAME_CHECK(x) case x: return #x | ||
124 | const char* sched_state_name(int s) | ||
125 | { | ||
126 | switch (s) { | ||
127 | NAME_CHECK(TASK_SCHEDULED); | ||
128 | NAME_CHECK(SHOULD_SCHEDULE); | ||
129 | NAME_CHECK(WILL_SCHEDULE); | ||
130 | NAME_CHECK(TASK_PICKED); | ||
131 | NAME_CHECK(PICKED_WRONG_TASK); | ||
132 | default: | ||
133 | return "UNKNOWN"; | ||
134 | }; | ||
135 | } | ||
136 | |||
137 | #endif | ||