aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/litmus.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r--include/litmus/litmus.h252
1 files changed, 252 insertions, 0 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
new file mode 100644
index 000000000000..62107e659c12
--- /dev/null
+++ b/include/litmus/litmus.h
@@ -0,0 +1,252 @@
1/*
2 * Constant definitions related to
3 * scheduling policy.
4 */
5
6#ifndef _LINUX_LITMUS_H_
7#define _LINUX_LITMUS_H_
8
9#include <linux/jiffies.h>
10#include <litmus/sched_trace.h>
11
12extern atomic_t release_master_cpu;
13
14extern atomic_t __log_seq_no;
15
16#define TRACE(fmt, args...) \
17 sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \
18 raw_smp_processor_id(), ## args)
19
20#define TRACE_TASK(t, fmt, args...) \
21 TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args)
22
23#define TRACE_CUR(fmt, args...) \
24 TRACE_TASK(current, fmt, ## args)
25
26#define TRACE_BUG_ON(cond) \
27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \
28 "called from %p current=%s/%d state=%d " \
29 "flags=%x partition=%d cpu=%d rtflags=%d"\
30 " job=%u timeslice=%u\n", \
31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \
32 current->pid, current->state, current->flags, \
33 get_partition(current), smp_processor_id(), get_rt_flags(current), \
34 current->rt_param.job_params.job_no, \
35 current->rt.time_slice\
36 ); } while(0);
37
38
39/* in_list - is a given list_head queued on some list?
40 */
41static inline int in_list(struct list_head* list)
42{
43 return !( /* case 1: deleted */
44 (list->next == LIST_POISON1 &&
45 list->prev == LIST_POISON2)
46 ||
47 /* case 2: initialized */
48 (list->next == list &&
49 list->prev == list)
50 );
51}
52
53#define NO_CPU 0xffffffff
54
55void litmus_fork(struct task_struct *tsk);
56void litmus_exec(void);
57/* clean up real-time state of a task */
58void exit_litmus(struct task_struct *dead_tsk);
59
60long litmus_admit_task(struct task_struct *tsk);
61void litmus_exit_task(struct task_struct *tsk);
62
63#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
64#define rt_transition_pending(t) \
65 ((t)->rt_param.transition_pending)
66
67#define tsk_rt(t) (&(t)->rt_param)
68
69/* Realtime utility macros */
70#define get_rt_flags(t) (tsk_rt(t)->flags)
71#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
72#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
73#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
74#define get_rt_period(t) (tsk_rt(t)->task_params.period)
75#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
76#define get_partition(t) (tsk_rt(t)->task_params.cpu)
77#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
78#define get_release(t) (tsk_rt(t)->job_params.release)
79#define get_class(t) (tsk_rt(t)->task_params.cls)
80
81inline static int budget_exhausted(struct task_struct* t)
82{
83 return get_exec_time(t) >= get_exec_cost(t);
84}
85
86
87#define is_hrt(t) \
88 (tsk_rt(t)->task_params.class == RT_CLASS_HARD)
89#define is_srt(t) \
90 (tsk_rt(t)->task_params.class == RT_CLASS_SOFT)
91#define is_be(t) \
92 (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT)
93
94/* Our notion of time within LITMUS: kernel monotonic time. */
95static inline lt_t litmus_clock(void)
96{
97 return ktime_to_ns(ktime_get());
98}
99
100/* A macro to convert from nanoseconds to ktime_t. */
101#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
102
103#define get_domain(t) (tsk_rt(t)->domain)
104
105/* Honor the flag in the preempt_count variable that is set
106 * when scheduling is in progress.
107 */
108#define is_running(t) \
109 ((t)->state == TASK_RUNNING || \
110 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
111
112#define is_blocked(t) \
113 (!is_running(t))
114#define is_released(t, now) \
115 (lt_before_eq(get_release(t), now))
116#define is_tardy(t, now) \
117 (lt_before_eq(tsk_rt(t)->job_params.deadline, now))
118
119/* real-time comparison macros */
120#define earlier_deadline(a, b) (lt_before(\
121 (a)->rt_param.job_params.deadline,\
122 (b)->rt_param.job_params.deadline))
123#define earlier_release(a, b) (lt_before(\
124 (a)->rt_param.job_params.release,\
125 (b)->rt_param.job_params.release))
126
127void preempt_if_preemptable(struct task_struct* t, int on_cpu);
128
129#ifdef CONFIG_SRP
130void srp_ceiling_block(void);
131#else
132#define srp_ceiling_block() /* nothing */
133#endif
134
135#define bheap2task(hn) ((struct task_struct*) hn->value)
136
137#ifdef CONFIG_NP_SECTION
138
139static inline int is_kernel_np(struct task_struct *t)
140{
141 return tsk_rt(t)->kernel_np;
142}
143
144static inline int is_user_np(struct task_struct *t)
145{
146 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0;
147}
148
149static inline void request_exit_np(struct task_struct *t)
150{
151 if (is_user_np(t)) {
152 /* Set the flag that tells user space to call
153 * into the kernel at the end of a critical section. */
154 if (likely(tsk_rt(t)->ctrl_page)) {
155 TRACE_TASK(t, "setting delayed_preemption flag\n");
156 tsk_rt(t)->ctrl_page->delayed_preemption = 1;
157 }
158 }
159}
160
161static inline void clear_exit_np(struct task_struct *t)
162{
163 if (likely(tsk_rt(t)->ctrl_page))
164 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
165}
166
167static inline void make_np(struct task_struct *t)
168{
169 tsk_rt(t)->kernel_np++;
170}
171
172/* Caller should check if preemption is necessary when
173 * the function return 0.
174 */
175static inline int take_np(struct task_struct *t)
176{
177 return --tsk_rt(t)->kernel_np;
178}
179
180#else
181
182static inline int is_kernel_np(struct task_struct* t)
183{
184 return 0;
185}
186
187static inline int is_user_np(struct task_struct* t)
188{
189 return 0;
190}
191
192static inline void request_exit_np(struct task_struct *t)
193{
194 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
195 BUG();
196}
197
198static inline void clear_exit_np(struct task_struct* t)
199{
200}
201
202#endif
203
204static inline int is_np(struct task_struct *t)
205{
206#ifdef CONFIG_SCHED_DEBUG_TRACE
207 int kernel, user;
208 kernel = is_kernel_np(t);
209 user = is_user_np(t);
210 if (kernel || user)
211 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
212
213 kernel, user);
214 return kernel || user;
215#else
216 return unlikely(is_kernel_np(t) || is_user_np(t));
217#endif
218}
219
220static inline int is_present(struct task_struct* t)
221{
222 return t && tsk_rt(t)->present;
223}
224
225
226/* make the unit explicit */
227typedef unsigned long quanta_t;
228
229enum round {
230 FLOOR,
231 CEIL
232};
233
234
235/* Tick period is used to convert ns-specified execution
236 * costs and periods into tick-based equivalents.
237 */
238extern ktime_t tick_period;
239
240static inline quanta_t time2quanta(lt_t time, enum round round)
241{
242 s64 quantum_length = ktime_to_ns(tick_period);
243
244 if (do_div(time, quantum_length) && round == CEIL)
245 time++;
246 return (quanta_t) time;
247}
248
249/* By how much is cpu staggered behind CPU 0? */
250u64 cpu_stagger_offset(int cpu);
251
252#endif