diff options
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r-- | include/litmus/litmus.h | 312 |
1 files changed, 312 insertions, 0 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h new file mode 100644 index 00000000000..6a1a59da6b5 --- /dev/null +++ b/include/litmus/litmus.h | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * Constant definitions related to | ||
3 | * scheduling policy. | ||
4 | */ | ||
5 | |||
6 | #ifndef _LINUX_LITMUS_H_ | ||
7 | #define _LINUX_LITMUS_H_ | ||
8 | |||
9 | #include <litmus/debug_trace.h> | ||
10 | |||
11 | #ifdef CONFIG_RELEASE_MASTER | ||
12 | extern atomic_t release_master_cpu; | ||
13 | #endif | ||
14 | |||
15 | /* in_list - is a given list_head queued on some list? | ||
16 | */ | ||
17 | static inline int in_list(struct list_head* list) | ||
18 | { | ||
19 | return !( /* case 1: deleted */ | ||
20 | (list->next == LIST_POISON1 && | ||
21 | list->prev == LIST_POISON2) | ||
22 | || | ||
23 | /* case 2: initialized */ | ||
24 | (list->next == list && | ||
25 | list->prev == list) | ||
26 | ); | ||
27 | } | ||
28 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | ||
30 | struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq); | ||
31 | |||
32 | #define NO_CPU 0xffffffff | ||
33 | |||
34 | void litmus_fork(struct task_struct *tsk); | ||
35 | void litmus_exec(void); | ||
36 | /* clean up real-time state of a task */ | ||
37 | void exit_litmus(struct task_struct *dead_tsk); | ||
38 | |||
39 | long litmus_admit_task(struct task_struct *tsk); | ||
40 | void litmus_exit_task(struct task_struct *tsk); | ||
41 | |||
42 | #define is_realtime(t) ((t)->policy == SCHED_LITMUS) | ||
43 | #define rt_transition_pending(t) \ | ||
44 | ((t)->rt_param.transition_pending) | ||
45 | |||
46 | #define tsk_rt(t) (&(t)->rt_param) | ||
47 | |||
48 | /* Realtime utility macros */ | ||
49 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | ||
50 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | ||
51 | |||
52 | /* task_params macros */ | ||
53 | #define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) | ||
54 | #define get_rt_period(t) (tsk_rt(t)->task_params.period) | ||
55 | #define get_rt_relative_deadline(t) (tsk_rt(t)->task_params.relative_deadline) | ||
56 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | ||
57 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | ||
58 | #define get_priority(t) (tsk_rt(t)->task_params.priority) | ||
59 | #define get_class(t) (tsk_rt(t)->task_params.cls) | ||
60 | #define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) | ||
61 | |||
62 | /* job_param macros */ | ||
63 | #define get_job_no(t) (tsk_rt(t)->job_params.job_no) | ||
64 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
65 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | ||
66 | #define get_release(t) (tsk_rt(t)->job_params.release) | ||
67 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | ||
68 | |||
69 | /* release policy macros */ | ||
70 | #define is_periodic(t) (get_release_policy(t) == PERIODIC) | ||
71 | #define is_sporadic(t) (get_release_policy(t) == SPORADIC) | ||
72 | #ifdef CONFIG_ALLOW_EARLY_RELEASE | ||
73 | #define is_early_releasing(t) (get_release_policy(t) == EARLY) | ||
74 | #else | ||
75 | #define is_early_releasing(t) (0) | ||
76 | #endif | ||
77 | |||
78 | #define is_hrt(t) \ | ||
79 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | ||
80 | #define is_srt(t) \ | ||
81 | (tsk_rt(t)->task_params.cls == RT_CLASS_SOFT) | ||
82 | #define is_be(t) \ | ||
83 | (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT) | ||
84 | |||
85 | /* Our notion of time within LITMUS: kernel monotonic time. */ | ||
86 | static inline lt_t litmus_clock(void) | ||
87 | { | ||
88 | return ktime_to_ns(ktime_get()); | ||
89 | } | ||
90 | |||
91 | /* A macro to convert from nanoseconds to ktime_t. */ | ||
92 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | ||
93 | |||
94 | #define get_domain(t) (tsk_rt(t)->domain) | ||
95 | |||
96 | /* Honor the flag in the preempt_count variable that is set | ||
97 | * when scheduling is in progress. | ||
98 | */ | ||
99 | #define is_running(t) \ | ||
100 | ((t)->state == TASK_RUNNING || \ | ||
101 | task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) | ||
102 | |||
103 | #define is_blocked(t) \ | ||
104 | (!is_running(t)) | ||
105 | #define is_released(t, now) \ | ||
106 | (lt_before_eq(get_release(t), now)) | ||
107 | #define is_tardy(t, now) \ | ||
108 | (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) | ||
109 | |||
110 | /* real-time comparison macros */ | ||
111 | #define earlier_deadline(a, b) (lt_before(\ | ||
112 | (a)->rt_param.job_params.deadline,\ | ||
113 | (b)->rt_param.job_params.deadline)) | ||
114 | #define earlier_release(a, b) (lt_before(\ | ||
115 | (a)->rt_param.job_params.release,\ | ||
116 | (b)->rt_param.job_params.release)) | ||
117 | |||
118 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | ||
119 | |||
120 | #ifdef CONFIG_LITMUS_LOCKING | ||
121 | void srp_ceiling_block(void); | ||
122 | #else | ||
123 | #define srp_ceiling_block() /* nothing */ | ||
124 | #endif | ||
125 | |||
126 | #define bheap2task(hn) ((struct task_struct*) hn->value) | ||
127 | |||
128 | #ifdef CONFIG_NP_SECTION | ||
129 | |||
130 | static inline int is_kernel_np(struct task_struct *t) | ||
131 | { | ||
132 | return tsk_rt(t)->kernel_np; | ||
133 | } | ||
134 | |||
135 | static inline int is_user_np(struct task_struct *t) | ||
136 | { | ||
137 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0; | ||
138 | } | ||
139 | |||
140 | static inline void request_exit_np(struct task_struct *t) | ||
141 | { | ||
142 | if (is_user_np(t)) { | ||
143 | /* Set the flag that tells user space to call | ||
144 | * into the kernel at the end of a critical section. */ | ||
145 | if (likely(tsk_rt(t)->ctrl_page)) { | ||
146 | TRACE_TASK(t, "setting delayed_preemption flag\n"); | ||
147 | tsk_rt(t)->ctrl_page->sched.np.preempt = 1; | ||
148 | } | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static inline void make_np(struct task_struct *t) | ||
153 | { | ||
154 | tsk_rt(t)->kernel_np++; | ||
155 | } | ||
156 | |||
157 | /* Caller should check if preemption is necessary when | ||
158 | * the function return 0. | ||
159 | */ | ||
160 | static inline int take_np(struct task_struct *t) | ||
161 | { | ||
162 | return --tsk_rt(t)->kernel_np; | ||
163 | } | ||
164 | |||
165 | /* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ | ||
166 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
167 | { | ||
168 | union np_flag old, new; | ||
169 | |||
170 | if (tsk_rt(t)->ctrl_page) { | ||
171 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; | ||
172 | if (old.np.flag == 0) { | ||
173 | /* no longer non-preemptive */ | ||
174 | return 0; | ||
175 | } else if (old.np.preempt) { | ||
176 | /* already set, nothing for us to do */ | ||
177 | return 1; | ||
178 | } else { | ||
179 | /* non preemptive and flag not set */ | ||
180 | new.raw = old.raw; | ||
181 | new.np.preempt = 1; | ||
182 | /* if we get old back, then we atomically set the flag */ | ||
183 | return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; | ||
184 | /* If we raced with a concurrent change, then so be | ||
185 | * it. Deliver it by IPI. We don't want an unbounded | ||
186 | * retry loop here since tasks might exploit that to | ||
187 | * keep the kernel busy indefinitely. */ | ||
188 | } | ||
189 | } else | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | #else | ||
194 | |||
195 | static inline int is_kernel_np(struct task_struct* t) | ||
196 | { | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static inline int is_user_np(struct task_struct* t) | ||
201 | { | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static inline void request_exit_np(struct task_struct *t) | ||
206 | { | ||
207 | /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ | ||
208 | BUG(); | ||
209 | } | ||
210 | |||
211 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
212 | { | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | #endif | ||
217 | |||
218 | static inline void clear_exit_np(struct task_struct *t) | ||
219 | { | ||
220 | if (likely(tsk_rt(t)->ctrl_page)) | ||
221 | tsk_rt(t)->ctrl_page->sched.np.preempt = 0; | ||
222 | } | ||
223 | |||
224 | static inline int is_np(struct task_struct *t) | ||
225 | { | ||
226 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
227 | int kernel, user; | ||
228 | kernel = is_kernel_np(t); | ||
229 | user = is_user_np(t); | ||
230 | if (kernel || user) | ||
231 | TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
232 | |||
233 | kernel, user); | ||
234 | return kernel || user; | ||
235 | #else | ||
236 | return unlikely(is_kernel_np(t) || is_user_np(t)); | ||
237 | #endif | ||
238 | } | ||
239 | |||
240 | static inline int is_present(struct task_struct* t) | ||
241 | { | ||
242 | return t && tsk_rt(t)->present; | ||
243 | } | ||
244 | |||
245 | static inline int is_completed(struct task_struct* t) | ||
246 | { | ||
247 | return t && tsk_rt(t)->completed; | ||
248 | } | ||
249 | |||
250 | |||
251 | /* make the unit explicit */ | ||
252 | typedef unsigned long quanta_t; | ||
253 | |||
254 | enum round { | ||
255 | FLOOR, | ||
256 | CEIL | ||
257 | }; | ||
258 | |||
259 | |||
260 | /* Tick period is used to convert ns-specified execution | ||
261 | * costs and periods into tick-based equivalents. | ||
262 | */ | ||
263 | extern ktime_t tick_period; | ||
264 | |||
265 | static inline quanta_t time2quanta(lt_t time, enum round round) | ||
266 | { | ||
267 | s64 quantum_length = ktime_to_ns(tick_period); | ||
268 | |||
269 | if (do_div(time, quantum_length) && round == CEIL) | ||
270 | time++; | ||
271 | return (quanta_t) time; | ||
272 | } | ||
273 | |||
274 | /* By how much is cpu staggered behind CPU 0? */ | ||
275 | u64 cpu_stagger_offset(int cpu); | ||
276 | |||
277 | static inline struct control_page* get_control_page(struct task_struct *t) | ||
278 | { | ||
279 | return tsk_rt(t)->ctrl_page; | ||
280 | } | ||
281 | |||
282 | static inline int has_control_page(struct task_struct* t) | ||
283 | { | ||
284 | return tsk_rt(t)->ctrl_page != NULL; | ||
285 | } | ||
286 | |||
287 | |||
288 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
289 | |||
290 | #define TS_SYSCALL_IN_START \ | ||
291 | if (has_control_page(current)) { \ | ||
292 | __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \ | ||
293 | } | ||
294 | |||
295 | #define TS_SYSCALL_IN_END \ | ||
296 | if (has_control_page(current)) { \ | ||
297 | uint64_t irqs; \ | ||
298 | local_irq_disable(); \ | ||
299 | irqs = get_control_page(current)->irq_count - \ | ||
300 | get_control_page(current)->irq_syscall_start; \ | ||
301 | __TS_SYSCALL_IN_END(&irqs); \ | ||
302 | local_irq_enable(); \ | ||
303 | } | ||
304 | |||
305 | #else | ||
306 | |||
307 | #define TS_SYSCALL_IN_START | ||
308 | #define TS_SYSCALL_IN_END | ||
309 | |||
310 | #endif | ||
311 | |||
312 | #endif | ||