aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-25 00:22:06 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:46:43 -0400
commit16bd98f3efcbde0c48944e81a35270008edcd953 (patch)
tree10917c613ab95662c0f71fd8b8d700c0ed9b84b3 /include
parent25d4d1addba5f45d534682cc446e3157500d873e (diff)
Extend task_struct with rt_param
This patch adds the PCB extensions required for LITMUS^RT.
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/litmus/rt_param.h281
2 files changed, 286 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 178a8d909f14..0e29a7a79c7e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -55,6 +55,8 @@ struct sched_param {
55 55
56#include <asm/processor.h> 56#include <asm/processor.h>
57 57
58#include <litmus/rt_param.h>
59
58struct exec_domain; 60struct exec_domain;
59struct futex_pi_state; 61struct futex_pi_state;
60struct robust_list_head; 62struct robust_list_head;
@@ -1365,6 +1367,9 @@ struct task_struct {
1365 int nr_dirtied_pause; 1367 int nr_dirtied_pause;
1366 unsigned long dirty_paused_when; /* start of a write-and-pause period */ 1368 unsigned long dirty_paused_when; /* start of a write-and-pause period */
1367 1369
1370 /* LITMUS RT parameters and state */
1371 struct rt_param rt_param;
1372
1368#ifdef CONFIG_LATENCYTOP 1373#ifdef CONFIG_LATENCYTOP
1369 int latency_record_count; 1374 int latency_record_count;
1370 struct latency_record latency_record[LT_SAVECOUNT]; 1375 struct latency_record latency_record[LT_SAVECOUNT];
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..a1fed7653377
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,281 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */
34} budget_policy_t;
35
36/* Release behaviors for jobs. PERIODIC and EARLY jobs
37 must end by calling sys_complete_job() (or equivalent)
38 to set up their next release and deadline. */
39typedef enum {
40 /* Jobs are released sporadically (provided job precedence
41 constraints are met). */
42 TASK_SPORADIC,
43
44 /* Jobs are released periodically (provided job precedence
45 constraints are met). */
46 TASK_PERIODIC,
47
48 /* Jobs are released immediately after meeting precedence
49 constraints. Beware this can peg your CPUs if used in
50 the wrong applications. Only supported by EDF schedulers. */
51 TASK_EARLY
52} release_policy_t;
53
54/* We use the common priority interpretation "lower index == higher priority",
55 * which is commonly used in fixed-priority schedulability analysis papers.
56 * So, a numerically lower priority value implies higher scheduling priority,
57 * with priority 1 being the highest priority. Priority 0 is reserved for
58 * priority boosting. LITMUS_MAX_PRIORITY denotes the maximum priority value
59 * range.
60 */
61
62#define LITMUS_MAX_PRIORITY 512
63#define LITMUS_HIGHEST_PRIORITY 1
64#define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1)
65
66/* Provide generic comparison macros for userspace,
67 * in case that we change this later. */
68#define litmus_higher_fixed_prio(a, b) (a < b)
69#define litmus_lower_fixed_prio(a, b) (a > b)
70#define litmus_is_valid_fixed_prio(p) \
71 ((p) >= LITMUS_HIGHEST_PRIORITY && \
72 (p) <= LITMUS_LOWEST_PRIORITY)
73
74struct rt_task {
75 lt_t exec_cost;
76 lt_t period;
77 lt_t relative_deadline;
78 lt_t phase;
79 unsigned int cpu;
80 unsigned int priority;
81 task_class_t cls;
82 budget_policy_t budget_policy; /* ignored by pfair */
83 release_policy_t release_policy;
84};
85
86union np_flag {
87 uint64_t raw;
88 struct {
89 /* Is the task currently in a non-preemptive section? */
90 uint64_t flag:31;
91 /* Should the task call into the scheduler? */
92 uint64_t preempt:1;
93 } np;
94};
95
96/* The definition of the data that is shared between the kernel and real-time
97 * tasks via a shared page (see litmus/ctrldev.c).
98 *
99 * WARNING: User space can write to this, so don't trust
100 * the correctness of the fields!
101 *
102 * This servees two purposes: to enable efficient signaling
103 * of non-preemptive sections (user->kernel) and
104 * delayed preemptions (kernel->user), and to export
105 * some real-time relevant statistics such as preemption and
106 * migration data to user space. We can't use a device to export
107 * statistics because we want to avoid system call overhead when
108 * determining preemption/migration overheads).
109 */
110struct control_page {
111 /* This flag is used by userspace to communicate non-preempive
112 * sections. */
113 volatile union np_flag sched;
114
115 volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is
116 * handled. */
117
118 /* Locking overhead tracing: userspace records here the time stamp
119 * and IRQ counter prior to starting the system call. */
120 uint64_t ts_syscall_start; /* Feather-Trace cycles */
121 uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
122 * started. */
123
124 /* to be extended */
125};
126
127/* Expected offsets within the control page. */
128
129#define LITMUS_CP_OFFSET_SCHED 0
130#define LITMUS_CP_OFFSET_IRQ_COUNT 8
131#define LITMUS_CP_OFFSET_TS_SC_START 16
132#define LITMUS_CP_OFFSET_IRQ_SC_START 24
133
134/* don't export internal data structures to user space (liblitmus) */
135#ifdef __KERNEL__
136
137struct _rt_domain;
138struct bheap_node;
139struct release_heap;
140
141struct rt_job {
142 /* Time instant the the job was or will be released. */
143 lt_t release;
144 /* What is the current deadline? */
145 lt_t deadline;
146
147 /* How much service has this job received so far? */
148 lt_t exec_time;
149
150 /* By how much did the prior job miss its deadline by?
151 * Value differs from tardiness in that lateness may
152 * be negative (when job finishes before its deadline).
153 */
154 long long lateness;
155
156 /* Which job is this. This is used to let user space
157 * specify which job to wait for, which is important if jobs
158 * overrun. If we just call sys_sleep_next_period() then we
159 * will unintentionally miss jobs after an overrun.
160 *
161 * Increase this sequence number when a job is released.
162 */
163 unsigned int job_no;
164};
165
166struct pfair_param;
167
168/* RT task parameters for scheduling extensions
169 * These parameters are inherited during clone and therefore must
170 * be explicitly set up before the task set is launched.
171 */
172struct rt_param {
173 /* Generic flags available for plugin-internal use. */
174 unsigned int flags:8;
175
176 /* do we need to check for srp blocking? */
177 unsigned int srp_non_recurse:1;
178
179 /* is the task present? (true if it can be scheduled) */
180 unsigned int present:1;
181
182 /* has the task completed? */
183 unsigned int completed:1;
184
185 /* prevent this task from being requeued on another processor (used to
186 * coordinate GSN-EDF, C-EDF, and sync.c) */
187 unsigned int dont_requeue:1;
188
189#ifdef CONFIG_LITMUS_LOCKING
190 /* Is the task being priority-boosted by a locking protocol? */
191 unsigned int priority_boosted:1;
192 /* If so, when did this start? */
193 lt_t boost_start_time;
194
195 /* How many LITMUS^RT locks does the task currently hold/wait for? */
196 unsigned int num_locks_held;
197 /* How many PCP/SRP locks does the task currently hold/wait for? */
198 unsigned int num_local_locks_held;
199#endif
200
201 /* user controlled parameters */
202 struct rt_task task_params;
203
204 /* timing parameters */
205 struct rt_job job_params;
206
207 /* task representing the current "inherited" task
208 * priority, assigned by inherit_priority and
209 * return priority in the scheduler plugins.
210 * could point to self if PI does not result in
211 * an increased task priority.
212 */
213 struct task_struct* inh_task;
214
215#ifdef CONFIG_NP_SECTION
216 /* For the FMLP under PSN-EDF, it is required to make the task
217 * non-preemptive from kernel space. In order not to interfere with
218 * user space, this counter indicates the kernel space np setting.
219 * kernel_np > 0 => task is non-preemptive
220 */
221 unsigned int kernel_np;
222#endif
223
224 /* This field can be used by plugins to store where the task
225 * is currently scheduled. It is the responsibility of the
226 * plugin to avoid race conditions.
227 *
228 * This used by GSN-EDF and PFAIR.
229 */
230 volatile int scheduled_on;
231
232 /* Is the stack of the task currently in use? This is updated by
233 * the LITMUS core.
234 *
235 * Be careful to avoid deadlocks!
236 */
237 volatile int stack_in_use;
238
239 /* This field can be used by plugins to store where the task
240 * is currently linked. It is the responsibility of the plugin
241 * to avoid race conditions.
242 *
243 * Used by GSN-EDF.
244 */
245 volatile int linked_on;
246
247 /* PFAIR/PD^2 state. Allocated on demand. */
248 struct pfair_param* pfair;
249
250 /* Fields saved before BE->RT transition.
251 */
252 int old_policy;
253 int old_prio;
254
255 /* ready queue for this task */
256 struct _rt_domain* domain;
257
258 /* heap element for this task
259 *
260 * Warning: Don't statically allocate this node. The heap
261 * implementation swaps these between tasks, thus after
262 * dequeuing from a heap you may end up with a different node
263 * then the one you had when enqueuing the task. For the same
264 * reason, don't obtain and store references to this node
265 * other than this pointer (which is updated by the heap
266 * implementation).
267 */
268 struct bheap_node* heap_node;
269 struct release_heap* rel_heap;
270
271 /* Used by rt_domain to queue task in release list.
272 */
273 struct list_head list;
274
275 /* Pointer to the page shared between userspace and kernel. */
276 struct control_page * ctrl_page;
277};
278
279#endif
280
281#endif