aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_param.h
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
commitb1e1fea67bca3796d5f9133a92c300ec4fa93a4f (patch)
tree5cc1336e1fe1d6f93b1067e73e43381dd20db690 /include/litmus/rt_param.h
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Bjoern's Dissertation Code with Priority Donationwip-splitting-omlp-jerickso
Diffstat (limited to 'include/litmus/rt_param.h')
-rw-r--r--include/litmus/rt_param.h228
1 files changed, 228 insertions, 0 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..a23ce1524051
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,228 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */
34} budget_policy_t;
35
36#define LITMUS_MAX_PRIORITY 512
37
38struct rt_task {
39 lt_t exec_cost;
40 lt_t period;
41 lt_t phase;
42 unsigned int cpu;
43 unsigned int priority;
44 task_class_t cls;
45 budget_policy_t budget_policy; /* ignored by pfair */
46};
47
48union np_flag {
49 uint32_t raw;
50 struct {
51 /* Is the task currently in a non-preemptive section? */
52 uint32_t flag:31;
53 /* Should the task call into the scheduler? */
54 uint32_t preempt:1;
55 } np;
56};
57
58/* The definition of the data that is shared between the kernel and real-time
59 * tasks via a shared page (see litmus/ctrldev.c).
60 *
61 * WARNING: User space can write to this, so don't trust
62 * the correctness of the fields!
63 *
64 * This servees two purposes: to enable efficient signaling
65 * of non-preemptive sections (user->kernel) and
66 * delayed preemptions (kernel->user), and to export
67 * some real-time relevant statistics such as preemption and
68 * migration data to user space. We can't use a device to export
69 * statistics because we want to avoid system call overhead when
70 * determining preemption/migration overheads).
71 */
72struct control_page {
73 volatile union np_flag sched;
74
75 /* locking overhead tracing: time stamp prior to system call */
76 uint64_t ts_syscall_start; /* Feather-Trace cycles */
77
78 /* to be extended */
79};
80
81/* don't export internal data structures to user space (liblitmus) */
82#ifdef __KERNEL__
83
84struct _rt_domain;
85struct bheap_node;
86struct release_heap;
87
88struct rt_job {
89 /* Time instant the the job was or will be released. */
90 lt_t release;
91 /* What is the current deadline? */
92 lt_t deadline;
93
94 /* How much service has this job received so far? */
95 lt_t exec_time;
96
97 /* Which job is this. This is used to let user space
98 * specify which job to wait for, which is important if jobs
99 * overrun. If we just call sys_sleep_next_period() then we
100 * will unintentionally miss jobs after an overrun.
101 *
102 * Increase this sequence number when a job is released.
103 */
104 unsigned int job_no;
105};
106
107struct pfair_param;
108
109/* RT task parameters for scheduling extensions
110 * These parameters are inherited during clone and therefore must
111 * be explicitly set up before the task set is launched.
112 */
113struct rt_param {
114 /* is the task sleeping? */
115 unsigned int flags:8;
116
117 /* do we need to check for srp blocking? */
118 unsigned int srp_non_recurse:1;
119
120 /* is the task present? (true if it can be scheduled) */
121 unsigned int present:1;
122
123#ifdef CONFIG_LITMUS_LOCKING
124 /* Is the task being priority-boosted by a locking protocol? */
125 unsigned int priority_boosted:1;
126 /* If so, when did this start? */
127 lt_t boost_start_time;
128#endif
129
130 /* user controlled parameters */
131 struct rt_task task_params;
132
133 /* timing parameters */
134 struct rt_job job_params;
135
136 /* task representing the current "inherited" task
137 * priority, assigned by inherit_priority and
138 * return priority in the scheduler plugins.
139 * could point to self if PI does not result in
140 * an increased task priority.
141 */
142 struct task_struct* inh_task;
143
144#ifdef CONFIG_NP_SECTION
145 /* For the FMLP under PSN-EDF, it is required to make the task
146 * non-preemptive from kernel space. In order not to interfere with
147 * user space, this counter indicates the kernel space np setting.
148 * kernel_np > 0 => task is non-preemptive
149 */
150 unsigned int kernel_np;
151#endif
152
153 /* This field can be used by plugins to store where the task
154 * is currently scheduled. It is the responsibility of the
155 * plugin to avoid race conditions.
156 *
157 * This used by GSN-EDF and PFAIR.
158 */
159 volatile int scheduled_on;
160
161 /* Is the stack of the task currently in use? This is updated by
162 * the LITMUS core.
163 *
164 * Be careful to avoid deadlocks!
165 */
166 volatile int stack_in_use;
167
168 /* This field can be used by plugins to store where the task
169 * is currently linked. It is the responsibility of the plugin
170 * to avoid race conditions.
171 *
172 * Used by GSN-EDF.
173 */
174 volatile int linked_on;
175
176 /* PFAIR/PD^2 state. Allocated on demand. */
177 struct pfair_param* pfair;
178
179 /* Fields saved before BE->RT transition.
180 */
181 int old_policy;
182 int old_prio;
183
184 /* ready queue for this task */
185 struct _rt_domain* domain;
186
187 /* heap element for this task
188 *
189 * Warning: Don't statically allocate this node. The heap
190 * implementation swaps these between tasks, thus after
191 * dequeuing from a heap you may end up with a different node
192 * then the one you had when enqueuing the task. For the same
193 * reason, don't obtain and store references to this node
194 * other than this pointer (which is updated by the heap
195 * implementation).
196 */
197 struct bheap_node* heap_node;
198 struct release_heap* rel_heap;
199
200#ifdef CONFIG_LITMUS_LOCKING
201 /* task in heap of pending jobs -- used by C-EDF for priority donation */
202 struct bheap_node* pending_node;
203 /* is the job in a critical section or a wait queue?*/
204 unsigned int request_incomplete;
205 /* is the job currently a donor? */
206 unsigned int is_donor;
207 /* is this job suspended, waiting to become eligible? */
208 unsigned int waiting_eligible;
209
210 int pending_on;
211#endif
212
213 /* Used by rt_domain to queue task in release list.
214 */
215 struct list_head list;
216
217 /* Pointer to the page shared between userspace and kernel. */
218 struct control_page * ctrl_page;
219};
220
221/* Possible RT flags */
222#define RT_F_RUNNING 0x00000000
223#define RT_F_SLEEP 0x00000001
224#define RT_F_EXIT_SEM 0x00000008
225
226#endif
227
228#endif