aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/preempt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/preempt.h')
-rw-r--r--include/litmus/preempt.h165
1 files changed, 165 insertions, 0 deletions
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
new file mode 100644
index 000000000000..f3cf29ad87ee
--- /dev/null
+++ b/include/litmus/preempt.h
@@ -0,0 +1,165 @@
1#ifndef LITMUS_PREEMPT_H
2#define LITMUS_PREEMPT_H
3
4#include <linux/types.h>
5#include <linux/cache.h>
6#include <linux/percpu.h>
7#include <asm/atomic.h>
8
9#include <litmus/debug_trace.h>
10
11extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
12
13//#ifdef CONFIG_DEBUG_KERNEL
14#if 0
15const char* sched_state_name(int s);
16#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
17#else
18#define TRACE_STATE(fmt, args...) /* ignore */
19#endif
20
21#define VERIFY_SCHED_STATE(x) \
22 do { int __s = get_sched_state(); \
23 if ((__s & (x)) == 0) \
24 TRACE_STATE("INVALID s=0x%x (%s) not " \
25 "in 0x%x (%s) [%s]\n", \
26 __s, sched_state_name(__s), \
27 (x), #x, __FUNCTION__); \
28 } while (0);
29
30#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
31 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
32 cpu, (x), sched_state_name(x), \
33 (y), sched_state_name(y))
34
35
36typedef enum scheduling_state {
37 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that
38 * should be scheduled, and the processor does not
39 * plan to invoke schedule(). */
40 SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the
41 * processor should reschedule, but this has not
42 * been communicated yet (IPI still pending). */
43 WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to
44 * reschedule and will do so shortly. */
45 TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(),
46 * has selected a new task to schedule, but has not
47 * yet performed the actual context switch. */
48 PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context
49 * switch, but a remote processor has already
50 * determined that a higher-priority task became
51 * eligible after the task was picked. */
52} sched_state_t;
53
54static inline sched_state_t get_sched_state_on(int cpu)
55{
56 return atomic_read(&per_cpu(resched_state, cpu));
57}
58
59static inline sched_state_t get_sched_state(void)
60{
61 return atomic_read(&__get_cpu_var(resched_state));
62}
63
64static inline int is_in_sched_state(int possible_states)
65{
66 return get_sched_state() & possible_states;
67}
68
69static inline int cpu_is_in_sched_state(int cpu, int possible_states)
70{
71 return get_sched_state_on(cpu) & possible_states;
72}
73
74static inline void set_sched_state(sched_state_t s)
75{
76 TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id());
77 atomic_set(&__get_cpu_var(resched_state), s);
78}
79
80static inline int sched_state_transition(sched_state_t from, sched_state_t to)
81{
82 sched_state_t old_state;
83
84 old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to);
85 if (old_state == from) {
86 TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id());
87 return 1;
88 } else
89 return 0;
90}
91
92static inline int sched_state_transition_on(int cpu,
93 sched_state_t from,
94 sched_state_t to)
95{
96 sched_state_t old_state;
97
98 old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to);
99 if (old_state == from) {
100 TRACE_SCHED_STATE_CHANGE(from, to, cpu);
101 return 1;
102 } else
103 return 0;
104}
105
106/* Plugins must call this function after they have decided which job to
107 * schedule next. IMPORTANT: this function must be called while still holding
108 * the lock that is used to serialize scheduling decisions.
109 *
110 * (Ideally, we would like to use runqueue locks for this purpose, but that
111 * would lead to deadlocks with the migration code.)
112 */
113static inline void sched_state_task_picked(void)
114{
115 VERIFY_SCHED_STATE(WILL_SCHEDULE);
116
117 /* WILL_SCHEDULE has only a local tansition => simple store is ok */
118 set_sched_state(TASK_PICKED);
119}
120
121static inline void sched_state_entered_schedule(void)
122{
123 /* Update state for the case that we entered schedule() not due to
124 * set_tsk_need_resched() */
125 set_sched_state(WILL_SCHEDULE);
126}
127
128/* Called by schedule() to check if the scheduling decision is still valid
129 * after a context switch. Returns 1 if the CPU needs to reschdule. */
130static inline int sched_state_validate_switch(void)
131{
132 int left_state_ok = 0;
133
134 VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED);
135
136 if (is_in_sched_state(TASK_PICKED)) {
137 /* Might be good; let's try to transition out of this
138 * state. This must be done atomically since remote processors
139 * may try to change the state, too. */
140 left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED);
141 }
142
143 if (!left_state_ok) {
144 /* We raced with a higher-priority task arrival => not
145 * valid. The CPU needs to reschedule. */
146 set_sched_state(WILL_SCHEDULE);
147 return 1;
148 } else
149 return 0;
150}
151
152/* State transition events. See litmus/preempt.c for details. */
153void sched_state_will_schedule(struct task_struct* tsk);
154void sched_state_ipi(void);
155/* Cause a CPU (remote or local) to reschedule. */
156void litmus_reschedule(int cpu);
157void litmus_reschedule_local(void);
158
159#ifdef CONFIG_DEBUG_KERNEL
160void sched_state_plugin_check(void);
161#else
162#define sched_state_plugin_check() /* no check */
163#endif
164
165#endif