1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
|
#include <linux/sched.h>
#include <litmus/litmus.h>
#include <litmus/preempt.h>
/* The rescheduling state of each processor.
*/
DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
#define TRACE_TASK(t, fmt, args...)
#define TRACE(fmt, args...)
void sched_state_will_schedule(struct task_struct* tsk)
{
/* Litmus hack: we only care about processor-local invocations of
* set_tsk_need_resched(). We can't reliably set the flag remotely
* since it might race with other updates to the scheduling state. We
* can't rely on the runqueue lock protecting updates to the sched
* state since processors do not acquire the runqueue locks for all
* updates to the sched state (to avoid acquiring two runqueue locks at
* the same time). Further, if tsk is residing on a remote processor,
* then that processor doesn't actually know yet that it is going to
* reschedule; it still must receive an IPI (unless a local invocation
* races).
*/
if (likely(task_cpu(tsk) == smp_processor_id())) {
VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);
if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK))
set_sched_state(PICKED_WRONG_TASK);
else
set_sched_state(WILL_SCHEDULE);
} else
/* Litmus tasks should never be subject to a remote
* set_tsk_need_resched(). */
BUG_ON(is_realtime(tsk));
TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
__builtin_return_address(0));
}
/* Called by the IPI handler after another CPU called smp_send_resched(). */
void sched_state_ipi(void)
{
/* If the IPI was slow, we might be in any state right now. The IPI is
* only meaningful if we are in SHOULD_SCHEDULE. */
if (is_in_sched_state(SHOULD_SCHEDULE)) {
/* Cause scheduler to be invoked.
* This will cause a transition to WILL_SCHEDULE. */
set_tsk_need_resched(current);
TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n",
current->comm, current->pid);
} else {
/* ignore */
TRACE_STATE("ignoring IPI in state %x (%s)\n",
get_sched_state(),
sched_state_name(get_sched_state()));
}
}
/* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must
* hold the lock that is used to serialize scheduling decisions. */
void litmus_reschedule(int cpu)
{
int picked_transition_ok = 0;
int scheduled_transition_ok = 0;
/* The (remote) CPU could be in any state. */
/* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU
* is not aware of the need to reschedule at this point. */
/* is a context switch in progress? */
if (cpu_is_in_sched_state(cpu, TASK_PICKED))
picked_transition_ok = sched_state_transition_on(
cpu, TASK_PICKED, PICKED_WRONG_TASK);
if (!picked_transition_ok &&
cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) {
/* We either raced with the end of the context switch, or the
* CPU was in TASK_SCHEDULED anyway. */
scheduled_transition_ok = sched_state_transition_on(
cpu, TASK_SCHEDULED, SHOULD_SCHEDULE);
}
/* If the CPU was in state TASK_SCHEDULED, then we need to cause the
* scheduler to be invoked. */
if (scheduled_transition_ok) {
if (smp_processor_id() == cpu)
set_tsk_need_resched(current);
else
smp_send_reschedule(cpu);
}
TRACE_STATE("%s picked-ok:%d sched-ok:%d\n",
__FUNCTION__,
picked_transition_ok,
scheduled_transition_ok);
}
void litmus_reschedule_local(void)
{
if (is_in_sched_state(TASK_PICKED))
set_sched_state(PICKED_WRONG_TASK);
else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) {
set_sched_state(WILL_SCHEDULE);
set_tsk_need_resched(current);
}
}
#ifdef CONFIG_DEBUG_KERNEL
void sched_state_plugin_check(void)
{
if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) {
TRACE("!!!! plugin did not call sched_state_task_picked()!"
"Calling sched_state_task_picked() is mandatory---fix this.\n");
set_sched_state(TASK_PICKED);
}
}
#define NAME_CHECK(x) case x: return #x
const char* sched_state_name(int s)
{
switch (s) {
NAME_CHECK(TASK_SCHEDULED);
NAME_CHECK(SHOULD_SCHEDULE);
NAME_CHECK(WILL_SCHEDULE);
NAME_CHECK(TASK_PICKED);
NAME_CHECK(PICKED_WRONG_TASK);
default:
return "UNKNOWN";
};
}
#endif
|