aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/litmus.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r--include/litmus/litmus.h292
1 files changed, 292 insertions, 0 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
new file mode 100644
index 000000000000..31ac72eddef7
--- /dev/null
+++ b/include/litmus/litmus.h
@@ -0,0 +1,292 @@
1/*
2 * Constant definitions related to
3 * scheduling policy.
4 */
5
6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_
8
9#include <litmus/debug_trace.h>
10
11#ifdef CONFIG_RELEASE_MASTER
12extern atomic_t release_master_cpu;
13#endif
14
15/* in_list - is a given list_head queued on some list?
16 */
17static inline int in_list(struct list_head* list)
18{
19 return !( /* case 1: deleted */
20 (list->next == LIST_POISON1 &&
21 list->prev == LIST_POISON2)
22 ||
23 /* case 2: initialized */
24 (list->next == list &&
25 list->prev == list)
26 );
27}
28
29#define NO_CPU 0xffffffff
30
31void litmus_fork(struct task_struct *tsk);
32void litmus_exec(void);
33/* clean up real-time state of a task */
34void exit_litmus(struct task_struct *dead_tsk);
35
36long litmus_admit_task(struct task_struct *tsk);
37void litmus_exit_task(struct task_struct *tsk);
38
39#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
40#define rt_transition_pending(t) \
41 ((t)->rt_param.transition_pending)
42
43#define tsk_rt(t) (&(t)->rt_param)
44
45/* Realtime utility macros */
46#define get_rt_flags(t) (tsk_rt(t)->flags)
47#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
48#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
49#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
50#define get_rt_period(t) (tsk_rt(t)->task_params.period)
51#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
52#define get_partition(t) (tsk_rt(t)->task_params.cpu)
53#define get_priority(t) (tsk_rt(t)->task_params.priority)
54#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
55#define get_release(t) (tsk_rt(t)->job_params.release)
56#define get_class(t) (tsk_rt(t)->task_params.cls)
57
58#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
59#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
60
61inline static int budget_exhausted(struct task_struct* t)
62{
63 return get_exec_time(t) >= get_exec_cost(t);
64}
65
66inline static lt_t budget_remaining(struct task_struct* t)
67{
68 if (!budget_exhausted(t))
69 return get_exec_cost(t) - get_exec_time(t);
70 else
71 /* avoid overflow */
72 return 0;
73}
74
75#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
76
77#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \
78 == PRECISE_ENFORCEMENT)
79
80#define is_hrt(t) \
81 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
82#define is_srt(t) \
83 (tsk_rt(t)->task_params.cls == RT_CLASS_SOFT)
84#define is_be(t) \
85 (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT)
86
87/* Our notion of time within LITMUS: kernel monotonic time. */
88static inline lt_t litmus_clock(void)
89{
90 return ktime_to_ns(ktime_get());
91}
92
93/* A macro to convert from nanoseconds to ktime_t. */
94#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
95
96#define get_domain(t) (tsk_rt(t)->domain)
97
98/* Honor the flag in the preempt_count variable that is set
99 * when scheduling is in progress.
100 */
101#define is_running(t) \
102 ((t)->state == TASK_RUNNING || \
103 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
104
105#define is_blocked(t) \
106 (!is_running(t))
107#define is_released(t, now) \
108 (lt_before_eq(get_release(t), now))
109#define is_tardy(t, now) \
110 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
111
112/* real-time comparison macros */
113#define earlier_deadline(a, b) (lt_before(\
114 (a)->rt_param.job_params.deadline,\
115 (b)->rt_param.job_params.deadline))
116#define earlier_release(a, b) (lt_before(\
117 (a)->rt_param.job_params.release,\
118 (b)->rt_param.job_params.release))
119
120void preempt_if_preemptable(struct task_struct* t, int on_cpu);
121
122#ifdef CONFIG_LITMUS_LOCKING
123void srp_ceiling_block(void);
124#else
125#define srp_ceiling_block() /* nothing */
126#endif
127
128#define bheap2task(hn) ((struct task_struct*) hn->value)
129
130static inline struct control_page* get_control_page(struct task_struct *t)
131{
132 return tsk_rt(t)->ctrl_page;
133}
134
135static inline int has_control_page(struct task_struct* t)
136{
137 return tsk_rt(t)->ctrl_page != NULL;
138}
139
140#ifdef CONFIG_NP_SECTION
141
142static inline int is_kernel_np(struct task_struct *t)
143{
144 return tsk_rt(t)->kernel_np;
145}
146
147static inline int is_user_np(struct task_struct *t)
148{
149 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
150}
151
152static inline void request_exit_np(struct task_struct *t)
153{
154 if (is_user_np(t)) {
155 /* Set the flag that tells user space to call
156 * into the kernel at the end of a critical section. */
157 if (likely(tsk_rt(t)->ctrl_page)) {
158 TRACE_TASK(t, "setting delayed_preemption flag\n");
159 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
160 }
161 }
162}
163
164static inline void make_np(struct task_struct *t)
165{
166 tsk_rt(t)->kernel_np++;
167}
168
169/* Caller should check if preemption is necessary when
170 * the function return 0.
171 */
172static inline int take_np(struct task_struct *t)
173{
174 return --tsk_rt(t)->kernel_np;
175}
176
177/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
178static inline int request_exit_np_atomic(struct task_struct *t)
179{
180 union np_flag old, new;
181 int ok;
182
183 if (tsk_rt(t)->ctrl_page) {
184 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
185 if (old.np.flag == 0) {
186 /* no longer non-preemptive */
187 return 0;
188 } else if (old.np.preempt) {
189 /* already set, nothing for us to do */
190 TRACE_TASK(t, "not setting np.preempt flag again\n");
191 return 1;
192 } else {
193 /* non preemptive and flag not set */
194 new.raw = old.raw;
195 new.np.preempt = 1;
196 /* if we get old back, then we atomically set the flag */
197 ok = cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
198 /* If we raced with a concurrent change, then so be
199 * it. Deliver it by IPI. We don't want an unbounded
200 * retry loop here since tasks might exploit that to
201 * keep the kernel busy indefinitely. */
202 TRACE_TASK(t, "request_exit_np => %d\n", ok);
203 return ok;
204 }
205 } else
206 return 0;
207}
208
209#else
210
211static inline int is_kernel_np(struct task_struct* t)
212{
213 return 0;
214}
215
216static inline int is_user_np(struct task_struct* t)
217{
218 return 0;
219}
220
221static inline void request_exit_np(struct task_struct *t)
222{
223 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
224 BUG();
225}
226
227static inline int request_exist_np_atomic(struct task_struct *t)
228{
229 return 0;
230}
231
232#endif
233
234static inline void clear_exit_np(struct task_struct *t)
235{
236 if (likely(tsk_rt(t)->ctrl_page))
237 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
238}
239
240static inline int is_np(struct task_struct *t)
241{
242#ifdef CONFIG_SCHED_DEBUG_TRACE
243 int kernel, user;
244 kernel = is_kernel_np(t);
245 user = is_user_np(t);
246 if (kernel || user)
247 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
248
249 kernel, user);
250 return kernel || user;
251#else
252 return unlikely(is_kernel_np(t) || is_user_np(t));
253#endif
254}
255
256static inline int is_present(struct task_struct* t)
257{
258 return t && tsk_rt(t)->present;
259}
260
261
262/* make the unit explicit */
263typedef unsigned long quanta_t;
264
265enum round {
266 FLOOR,
267 CEIL
268};
269
270
271/* Tick period is used to convert ns-specified execution
272 * costs and periods into tick-based equivalents.
273 */
274extern ktime_t tick_period;
275
276static inline quanta_t time2quanta(lt_t time, enum round round)
277{
278 s64 quantum_length = ktime_to_ns(tick_period);
279
280 if (do_div(time, quantum_length) && round == CEIL)
281 time++;
282 return (quanta_t) time;
283}
284
285/* By how much is cpu staggered behind CPU 0? */
286u64 cpu_stagger_offset(int cpu);
287
288#define TS_SYSCALL_IN_START \
289 if (has_control_page(current)) \
290 __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start);
291
292#endif