aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_param.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/rt_param.h')
-rw-r--r--include/litmus/rt_param.h283
1 files changed, 283 insertions, 0 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 00000000000..70c09acbeb2
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,283 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */
34} budget_policy_t;
35
36/* Release behaviors for jobs. PERIODIC and EARLY jobs
37 must end by calling sys_complete_job() (or equivalent)
38 to set up their next release and deadline. */
39typedef enum {
40 /* Jobs are released sporadically (provided job precedence
41 constraints are met). */
42 SPORADIC,
43
44 /* Jobs are released periodically (provided job precedence
45 constraints are met). */
46 PERIODIC,
47
48 /* Jobs are released immediately after meeting precedence
49 constraints. Beware this can peg your CPUs if used in
50 the wrong applications. Only supported by EDF schedulers. */
51 EARLY
52} release_policy_t;
53
54/* We use the common priority interpretation "lower index == higher priority",
55 * which is commonly used in fixed-priority schedulability analysis papers.
56 * So, a numerically lower priority value implies higher scheduling priority,
57 * with priority 1 being the highest priority. Priority 0 is reserved for
58 * priority boosting. LITMUS_MAX_PRIORITY denotes the maximum priority value
59 * range.
60 */
61
62#define LITMUS_MAX_PRIORITY 512
63#define LITMUS_HIGHEST_PRIORITY 1
64#define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1)
65
66/* Provide generic comparison macros for userspace,
67 * in case that we change this later. */
68#define litmus_higher_fixed_prio(a, b) (a < b)
69#define litmus_lower_fixed_prio(a, b) (a > b)
70#define litmus_is_valid_fixed_prio(p) \
71 ((p) >= LITMUS_HIGHEST_PRIORITY && \
72 (p) <= LITMUS_LOWEST_PRIORITY)
73
74struct rt_task {
75 lt_t exec_cost;
76 lt_t period;
77 lt_t relative_deadline;
78 lt_t phase;
79 unsigned int cpu;
80 unsigned int priority;
81 task_class_t cls;
82 budget_policy_t budget_policy; /* ignored by pfair */
83 release_policy_t release_policy;
84};
85
86union np_flag {
87 uint64_t raw;
88 struct {
89 /* Is the task currently in a non-preemptive section? */
90 uint64_t flag:31;
91 /* Should the task call into the scheduler? */
92 uint64_t preempt:1;
93 } np;
94};
95
96/* The definition of the data that is shared between the kernel and real-time
97 * tasks via a shared page (see litmus/ctrldev.c).
98 *
99 * WARNING: User space can write to this, so don't trust
100 * the correctness of the fields!
101 *
102 * This servees two purposes: to enable efficient signaling
103 * of non-preemptive sections (user->kernel) and
104 * delayed preemptions (kernel->user), and to export
105 * some real-time relevant statistics such as preemption and
106 * migration data to user space. We can't use a device to export
107 * statistics because we want to avoid system call overhead when
108 * determining preemption/migration overheads).
109 */
110struct control_page {
111 /* This flag is used by userspace to communicate non-preempive
112 * sections. */
113 volatile union np_flag sched;
114
115 volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is
116 * handled. */
117
118 /* Locking overhead tracing: userspace records here the time stamp
119 * and IRQ counter prior to starting the system call. */
120 uint64_t ts_syscall_start; /* Feather-Trace cycles */
121 uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
122 * started. */
123
124 /* to be extended */
125};
126
127/* Expected offsets within the control page. */
128
129#define LITMUS_CP_OFFSET_SCHED 0
130#define LITMUS_CP_OFFSET_IRQ_COUNT 8
131#define LITMUS_CP_OFFSET_TS_SC_START 16
132#define LITMUS_CP_OFFSET_IRQ_SC_START 24
133
134/* don't export internal data structures to user space (liblitmus) */
135#ifdef __KERNEL__
136
137struct _rt_domain;
138struct bheap_node;
139struct release_heap;
140
141struct rt_job {
142 /* Time instant the the job was or will be released. */
143 lt_t release;
144 /* What is the current deadline? */
145 lt_t deadline;
146
147 /* How much service has this job received so far? */
148 lt_t exec_time;
149
150 /* By how much did the prior job miss its deadline by?
151 * Value differs from tardiness in that lateness may
152 * be negative (when job finishes before its deadline).
153 */
154 long long lateness;
155
156 /* Which job is this. This is used to let user space
157 * specify which job to wait for, which is important if jobs
158 * overrun. If we just call sys_sleep_next_period() then we
159 * will unintentionally miss jobs after an overrun.
160 *
161 * Increase this sequence number when a job is released.
162 */
163 unsigned int job_no;
164};
165
166struct pfair_param;
167
168/* RT task parameters for scheduling extensions
169 * These parameters are inherited during clone and therefore must
170 * be explicitly set up before the task set is launched.
171 */
172struct rt_param {
173 /* is the task sleeping? */
174 unsigned int flags:8;
175
176 /* do we need to check for srp blocking? */
177 unsigned int srp_non_recurse:1;
178
179 /* is the task present? (true if it can be scheduled) */
180 unsigned int present:1;
181
182 /* has the task completed? */
183 unsigned int completed:1;
184
185#ifdef CONFIG_LITMUS_LOCKING
186 /* Is the task being priority-boosted by a locking protocol? */
187 unsigned int priority_boosted:1;
188 /* If so, when did this start? */
189 lt_t boost_start_time;
190
191 /* How many LITMUS^RT locks does the task currently hold/wait for? */
192 unsigned int num_locks_held;
193 /* How many PCP/SRP locks does the task currently hold/wait for? */
194 unsigned int num_local_locks_held;
195#endif
196
197 /* user controlled parameters */
198 struct rt_task task_params;
199
200 /* timing parameters */
201 struct rt_job job_params;
202
203 /* task representing the current "inherited" task
204 * priority, assigned by inherit_priority and
205 * return priority in the scheduler plugins.
206 * could point to self if PI does not result in
207 * an increased task priority.
208 */
209 struct task_struct* inh_task;
210
211#ifdef CONFIG_NP_SECTION
212 /* For the FMLP under PSN-EDF, it is required to make the task
213 * non-preemptive from kernel space. In order not to interfere with
214 * user space, this counter indicates the kernel space np setting.
215 * kernel_np > 0 => task is non-preemptive
216 */
217 unsigned int kernel_np;
218#endif
219
220 /* This field can be used by plugins to store where the task
221 * is currently scheduled. It is the responsibility of the
222 * plugin to avoid race conditions.
223 *
224 * This used by GSN-EDF and PFAIR.
225 */
226 volatile int scheduled_on;
227
228 /* Is the stack of the task currently in use? This is updated by
229 * the LITMUS core.
230 *
231 * Be careful to avoid deadlocks!
232 */
233 volatile int stack_in_use;
234
235 /* This field can be used by plugins to store where the task
236 * is currently linked. It is the responsibility of the plugin
237 * to avoid race conditions.
238 *
239 * Used by GSN-EDF.
240 */
241 volatile int linked_on;
242
243 /* PFAIR/PD^2 state. Allocated on demand. */
244 struct pfair_param* pfair;
245
246 /* Fields saved before BE->RT transition.
247 */
248 int old_policy;
249 int old_prio;
250
251 /* ready queue for this task */
252 struct _rt_domain* domain;
253
254 /* heap element for this task
255 *
256 * Warning: Don't statically allocate this node. The heap
257 * implementation swaps these between tasks, thus after
258 * dequeuing from a heap you may end up with a different node
259 * then the one you had when enqueuing the task. For the same
260 * reason, don't obtain and store references to this node
261 * other than this pointer (which is updated by the heap
262 * implementation).
263 */
264 struct bheap_node* heap_node;
265 struct release_heap* rel_heap;
266
267 /* Used by rt_domain to queue task in release list.
268 */
269 struct list_head list;
270
271 /* Pointer to the page shared between userspace and kernel. */
272 struct control_page * ctrl_page;
273
274 lt_t total_tardy;
275 lt_t max_tardy;
276 unsigned int missed;
277 lt_t max_exec_time;
278 lt_t tot_exec_time;
279};
280
281#endif
282
283#endif