aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/litmus/preempt.h164
2 files changed, 166 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 708523b06920..c9ac4fc837ba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct sched_param {
96#include <asm/processor.h> 96#include <asm/processor.h>
97 97
98#include <litmus/rt_param.h> 98#include <litmus/rt_param.h>
99#include <litmus/preempt.h>
99 100
100struct exec_domain; 101struct exec_domain;
101struct futex_pi_state; 102struct futex_pi_state;
@@ -2301,6 +2302,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2301static inline void set_tsk_need_resched(struct task_struct *tsk) 2302static inline void set_tsk_need_resched(struct task_struct *tsk)
2302{ 2303{
2303 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2304 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2305 sched_state_will_schedule(tsk);
2304} 2306}
2305 2307
2306static inline void clear_tsk_need_resched(struct task_struct *tsk) 2308static inline void clear_tsk_need_resched(struct task_struct *tsk)
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
new file mode 100644
index 000000000000..260c6fe17986
--- /dev/null
+++ b/include/litmus/preempt.h
@@ -0,0 +1,164 @@
1#ifndef LITMUS_PREEMPT_H
2#define LITMUS_PREEMPT_H
3
4#include <linux/types.h>
5#include <linux/cache.h>
6#include <linux/percpu.h>
7#include <asm/atomic.h>
8
9#include <litmus/debug_trace.h>
10
11extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
12
13#ifdef CONFIG_DEBUG_KERNEL
14const char* sched_state_name(int s);
15#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
16#else
17#define TRACE_STATE(fmt, args...) /* ignore */
18#endif
19
20#define VERIFY_SCHED_STATE(x) \
21 do { int __s = get_sched_state(); \
22 if ((__s & (x)) == 0) \
23 TRACE_STATE("INVALID s=0x%x (%s) not " \
24 "in 0x%x (%s) [%s]\n", \
25 __s, sched_state_name(__s), \
26 (x), #x, __FUNCTION__); \
27 } while (0);
28
29#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
30 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
31 cpu, (x), sched_state_name(x), \
32 (y), sched_state_name(y))
33
34
35typedef enum scheduling_state {
36 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that
37 * should be scheduled, and the processor does not
38 * plan to invoke schedule(). */
39 SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the
40 * processor should reschedule, but this has not
41 * been communicated yet (IPI still pending). */
42 WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to
43 * reschedule and will do so shortly. */
44 TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(),
45 * has selected a new task to schedule, but has not
46 * yet performed the actual context switch. */
47 PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context
48 * switch, but a remote processor has already
49 * determined that a higher-priority task became
50 * eligible after the task was picked. */
51} sched_state_t;
52
53static inline sched_state_t get_sched_state_on(int cpu)
54{
55 return atomic_read(&per_cpu(resched_state, cpu));
56}
57
58static inline sched_state_t get_sched_state(void)
59{
60 return atomic_read(&__get_cpu_var(resched_state));
61}
62
63static inline int is_in_sched_state(int possible_states)
64{
65 return get_sched_state() & possible_states;
66}
67
68static inline int cpu_is_in_sched_state(int cpu, int possible_states)
69{
70 return get_sched_state_on(cpu) & possible_states;
71}
72
73static inline void set_sched_state(sched_state_t s)
74{
75 TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id());
76 atomic_set(&__get_cpu_var(resched_state), s);
77}
78
79static inline int sched_state_transition(sched_state_t from, sched_state_t to)
80{
81 sched_state_t old_state;
82
83 old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to);
84 if (old_state == from) {
85 TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id());
86 return 1;
87 } else
88 return 0;
89}
90
91static inline int sched_state_transition_on(int cpu,
92 sched_state_t from,
93 sched_state_t to)
94{
95 sched_state_t old_state;
96
97 old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to);
98 if (old_state == from) {
99 TRACE_SCHED_STATE_CHANGE(from, to, cpu);
100 return 1;
101 } else
102 return 0;
103}
104
105/* Plugins must call this function after they have decided which job to
106 * schedule next. IMPORTANT: this function must be called while still holding
107 * the lock that is used to serialize scheduling decisions.
108 *
109 * (Ideally, we would like to use runqueue locks for this purpose, but that
110 * would lead to deadlocks with the migration code.)
111 */
112static inline void sched_state_task_picked(void)
113{
114 VERIFY_SCHED_STATE(WILL_SCHEDULE);
115
116 /* WILL_SCHEDULE has only a local tansition => simple store is ok */
117 set_sched_state(TASK_PICKED);
118}
119
120static inline void sched_state_entered_schedule(void)
121{
122 /* Update state for the case that we entered schedule() not due to
123 * set_tsk_need_resched() */
124 set_sched_state(WILL_SCHEDULE);
125}
126
127/* Called by schedule() to check if the scheduling decision is still valid
128 * after a context switch. Returns 1 if the CPU needs to reschdule. */
129static inline int sched_state_validate_switch(void)
130{
131 int left_state_ok = 0;
132
133 VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED);
134
135 if (is_in_sched_state(TASK_PICKED)) {
136 /* Might be good; let's try to transition out of this
137 * state. This must be done atomically since remote processors
138 * may try to change the state, too. */
139 left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED);
140 }
141
142 if (!left_state_ok) {
143 /* We raced with a higher-priority task arrival => not
144 * valid. The CPU needs to reschedule. */
145 set_sched_state(WILL_SCHEDULE);
146 return 1;
147 } else
148 return 0;
149}
150
151/* State transition events. See litmus/preempt.c for details. */
152void sched_state_will_schedule(struct task_struct* tsk);
153void sched_state_ipi(void);
154/* Cause a CPU (remote or local) to reschedule. */
155void litmus_reschedule(int cpu);
156void litmus_reschedule_local(void);
157
158#ifdef CONFIG_DEBUG_KERNEL
159void sched_state_plugin_check(void);
160#else
161#define sched_state_plugin_check() /* no check */
162#endif
163
164#endif