aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/litmus.h
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-25 01:27:07 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2014-06-07 05:30:42 -0400
commit2edfb17682026b6a1efcd67d4ec9bb3f75b02d3b (patch)
treeb49af42cb00ace1be3c3ab1920688db9c08933c6 /include/litmus/litmus.h
parent493f1cfd648d8b50276b61532bea8b862308a4a1 (diff)
Add LITMUS^RT core implementation
This patch adds the core of LITMUS^RT: - library functionality (heaps, rt_domain, prioritization, etc.) - budget enforcement logic - job management - system call backends - virtual devices (control page, etc.) - scheduler plugin API (and dummy plugin) This code compiles, but is not yet integrated with the rest of Linux.
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r--include/litmus/litmus.h268
1 files changed, 268 insertions, 0 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index c87863c9b231..0519831f6878 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -6,7 +6,49 @@
6#ifndef _LINUX_LITMUS_H_ 6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_ 7#define _LINUX_LITMUS_H_
8 8
9#include <litmus/debug_trace.h>
10
11#ifdef CONFIG_RELEASE_MASTER
12extern atomic_t release_master_cpu;
13#endif
14
15/* in_list - is a given list_head queued on some list?
16 */
17static inline int in_list(struct list_head* list)
18{
19 return !( /* case 1: deleted */
20 (list->next == LIST_POISON1 &&
21 list->prev == LIST_POISON2)
22 ||
23 /* case 2: initialized */
24 (list->next == list &&
25 list->prev == list)
26 );
27}
28
29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
30
31#define NO_CPU 0xffffffff
32
33void litmus_fork(struct task_struct *tsk);
34void litmus_exec(void);
35/* clean up real-time state of a task */
36void litmus_clear_state(struct task_struct *dead_tsk);
37void exit_litmus(struct task_struct *dead_tsk);
38
39/* Prevent the plugin from being switched-out from underneath a code
40 * path. Might sleep, so may be called only from non-atomic context. */
41void litmus_plugin_switch_disable(void);
42void litmus_plugin_switch_enable(void);
43
44long litmus_admit_task(struct task_struct *tsk);
45void litmus_exit_task(struct task_struct *tsk);
46void litmus_dealloc(struct task_struct *tsk);
47void litmus_do_exit(struct task_struct *tsk);
48
9#define is_realtime(t) ((t)->policy == SCHED_LITMUS) 49#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
50#define rt_transition_pending(t) \
51 ((t)->rt_param.transition_pending)
10 52
11#define tsk_rt(t) (&(t)->rt_param) 53#define tsk_rt(t) (&(t)->rt_param)
12 54
@@ -28,6 +70,7 @@
28#define get_partition(t) (tsk_rt(t)->task_params.cpu) 70#define get_partition(t) (tsk_rt(t)->task_params.cpu)
29#define get_priority(t) (tsk_rt(t)->task_params.priority) 71#define get_priority(t) (tsk_rt(t)->task_params.priority)
30#define get_class(t) (tsk_rt(t)->task_params.cls) 72#define get_class(t) (tsk_rt(t)->task_params.cls)
73#define get_release_policy(t) (tsk_rt(t)->task_params.release_policy)
31 74
32/* job_param macros */ 75/* job_param macros */
33#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) 76#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
@@ -35,6 +78,15 @@
35#define get_release(t) (tsk_rt(t)->job_params.release) 78#define get_release(t) (tsk_rt(t)->job_params.release)
36#define get_lateness(t) (tsk_rt(t)->job_params.lateness) 79#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
37 80
81/* release policy macros */
82#define is_periodic(t) (get_release_policy(t) == TASK_PERIODIC)
83#define is_sporadic(t) (get_release_policy(t) == TASK_SPORADIC)
84#ifdef CONFIG_ALLOW_EARLY_RELEASE
85#define is_early_releasing(t) (get_release_policy(t) == TASK_EARLY)
86#else
87#define is_early_releasing(t) (0)
88#endif
89
38#define is_hrt(t) \ 90#define is_hrt(t) \
39 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) 91 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
40#define is_srt(t) \ 92#define is_srt(t) \
@@ -48,6 +100,196 @@ static inline lt_t litmus_clock(void)
48 return ktime_to_ns(ktime_get()); 100 return ktime_to_ns(ktime_get());
49} 101}
50 102
103/* A macro to convert from nanoseconds to ktime_t. */
104#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
105
106#define get_domain(t) (tsk_rt(t)->domain)
107
108/* Honor the flag in the preempt_count variable that is set
109 * when scheduling is in progress.
110 */
111#define is_running(t) \
112 ((t)->state == TASK_RUNNING || \
113 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
114
115#define is_blocked(t) \
116 (!is_running(t))
117#define is_released(t, now) \
118 (lt_before_eq(get_release(t), now))
119#define is_tardy(t, now) \
120 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
121
122/* real-time comparison macros */
123#define earlier_deadline(a, b) (lt_before(\
124 (a)->rt_param.job_params.deadline,\
125 (b)->rt_param.job_params.deadline))
126#define earlier_release(a, b) (lt_before(\
127 (a)->rt_param.job_params.release,\
128 (b)->rt_param.job_params.release))
129
130void preempt_if_preemptable(struct task_struct* t, int on_cpu);
131
132#ifdef CONFIG_LITMUS_LOCKING
133void srp_ceiling_block(void);
134#else
135#define srp_ceiling_block() /* nothing */
136#endif
137
138#define bheap2task(hn) ((struct task_struct*) hn->value)
139
140#ifdef CONFIG_NP_SECTION
141
142static inline int is_kernel_np(struct task_struct *t)
143{
144 return tsk_rt(t)->kernel_np;
145}
146
147static inline int is_user_np(struct task_struct *t)
148{
149 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
150}
151
152static inline void request_exit_np(struct task_struct *t)
153{
154 if (is_user_np(t)) {
155 /* Set the flag that tells user space to call
156 * into the kernel at the end of a critical section. */
157 if (likely(tsk_rt(t)->ctrl_page)) {
158 TRACE_TASK(t, "setting delayed_preemption flag\n");
159 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
160 }
161 }
162}
163
164static inline void make_np(struct task_struct *t)
165{
166 tsk_rt(t)->kernel_np++;
167}
168
169/* Caller should check if preemption is necessary when
170 * the function return 0.
171 */
172static inline int take_np(struct task_struct *t)
173{
174 return --tsk_rt(t)->kernel_np;
175}
176
177/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
178static inline int request_exit_np_atomic(struct task_struct *t)
179{
180 union np_flag old, new;
181
182 if (tsk_rt(t)->ctrl_page) {
183 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
184 if (old.np.flag == 0) {
185 /* no longer non-preemptive */
186 return 0;
187 } else if (old.np.preempt) {
188 /* already set, nothing for us to do */
189 return 1;
190 } else {
191 /* non preemptive and flag not set */
192 new.raw = old.raw;
193 new.np.preempt = 1;
194 /* if we get old back, then we atomically set the flag */
195 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
196 /* If we raced with a concurrent change, then so be
197 * it. Deliver it by IPI. We don't want an unbounded
198 * retry loop here since tasks might exploit that to
199 * keep the kernel busy indefinitely. */
200 }
201 } else
202 return 0;
203}
204
205#else
206
207static inline int is_kernel_np(struct task_struct* t)
208{
209 return 0;
210}
211
212static inline int is_user_np(struct task_struct* t)
213{
214 return 0;
215}
216
217static inline void request_exit_np(struct task_struct *t)
218{
219 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
220 BUG();
221}
222
223static inline int request_exit_np_atomic(struct task_struct *t)
224{
225 return 0;
226}
227
228#endif
229
230static inline void clear_exit_np(struct task_struct *t)
231{
232 if (likely(tsk_rt(t)->ctrl_page))
233 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
234}
235
236static inline int is_np(struct task_struct *t)
237{
238#ifdef CONFIG_SCHED_DEBUG_TRACE
239 int kernel, user;
240 kernel = is_kernel_np(t);
241 user = is_user_np(t);
242 if (kernel || user)
243 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
244
245 kernel, user);
246 return kernel || user;
247#else
248 return unlikely(is_kernel_np(t) || is_user_np(t));
249#endif
250}
251
252static inline int is_present(struct task_struct* t)
253{
254 return t && tsk_rt(t)->present;
255}
256
257static inline int is_completed(struct task_struct* t)
258{
259 return t && tsk_rt(t)->completed;
260}
261
262
263/* Used to convert ns-specified execution costs and periods into
264 * integral quanta equivalents.
265 */
266#define LITMUS_QUANTUM_LENGTH_NS (CONFIG_LITMUS_QUANTUM_LENGTH_US * 1000ULL)
267
268/* make the unit explicit */
269typedef unsigned long quanta_t;
270
271enum round {
272 FLOOR,
273 CEIL
274};
275
276static inline quanta_t time2quanta(lt_t time, enum round round)
277{
278 s64 quantum_length = LITMUS_QUANTUM_LENGTH_NS;
279
280 if (do_div(time, quantum_length) && round == CEIL)
281 time++;
282 return (quanta_t) time;
283}
284
285static inline lt_t quanta2time(quanta_t quanta)
286{
287 return quanta * LITMUS_QUANTUM_LENGTH_NS;
288}
289
290/* By how much is cpu staggered behind CPU 0? */
291u64 cpu_stagger_offset(int cpu);
292
51static inline struct control_page* get_control_page(struct task_struct *t) 293static inline struct control_page* get_control_page(struct task_struct *t)
52{ 294{
53 return tsk_rt(t)->ctrl_page; 295 return tsk_rt(t)->ctrl_page;
@@ -58,4 +300,30 @@ static inline int has_control_page(struct task_struct* t)
58 return tsk_rt(t)->ctrl_page != NULL; 300 return tsk_rt(t)->ctrl_page != NULL;
59} 301}
60 302
303
304#ifdef CONFIG_SCHED_OVERHEAD_TRACE
305
306#define TS_SYSCALL_IN_START \
307 if (has_control_page(current)) { \
308 __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \
309 }
310
311#define TS_SYSCALL_IN_END \
312 if (has_control_page(current)) { \
313 unsigned long flags; \
314 uint64_t irqs; \
315 local_irq_save(flags); \
316 irqs = get_control_page(current)->irq_count - \
317 get_control_page(current)->irq_syscall_start; \
318 __TS_SYSCALL_IN_END(&irqs); \
319 local_irq_restore(flags); \
320 }
321
322#else
323
324#define TS_SYSCALL_IN_START
325#define TS_SYSCALL_IN_END
326
327#endif
328
61#endif 329#endif