aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_param.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/rt_param.h')
-rw-r--r--include/litmus/rt_param.h175
1 files changed, 175 insertions, 0 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
new file mode 100644
index 000000000000..c599f848d1ed
--- /dev/null
+++ b/include/litmus/rt_param.h
@@ -0,0 +1,175 @@
1/*
2 * Definition of the scheduler plugin interface.
3 *
4 */
5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_
7
8/* Litmus time type. */
9typedef unsigned long long lt_t;
10
11static inline int lt_after(lt_t a, lt_t b)
12{
13 return ((long long) b) - ((long long) a) < 0;
14}
15#define lt_before(a, b) lt_after(b, a)
16
17static inline int lt_after_eq(lt_t a, lt_t b)
18{
19 return ((long long) a) - ((long long) b) >= 0;
20}
21#define lt_before_eq(a, b) lt_after_eq(b, a)
22
23/* different types of clients */
24typedef enum {
25 RT_CLASS_HARD,
26 RT_CLASS_SOFT,
27 RT_CLASS_BEST_EFFORT
28} task_class_t;
29
30struct rt_task {
31 lt_t exec_cost;
32 lt_t period;
33 lt_t phase;
34 unsigned int cpu;
35 task_class_t cls;
36};
37
38/* don't export internal data structures to user space (liblitmus) */
39#ifdef __KERNEL__
40
41struct _rt_domain;
42struct heap_node;
43struct release_heap;
44
45struct rt_job {
46 /* Time instant the the job was or will be released. */
47 lt_t release;
48 /* What is the current deadline? */
49 lt_t deadline;
50
51 /* How much service has this job received so far? */
52 lt_t exec_time;
53
54 /* Which job is this. This is used to let user space
55 * specify which job to wait for, which is important if jobs
56 * overrun. If we just call sys_sleep_next_period() then we
57 * will unintentionally miss jobs after an overrun.
58 *
59 * Increase this sequence number when a job is released.
60 */
61 unsigned int job_no;
62};
63
64
65struct pfair_param;
66
67/* RT task parameters for scheduling extensions
68 * These parameters are inherited during clone and therefore must
69 * be explicitly set up before the task set is launched.
70 */
71struct rt_param {
72 /* is the task sleeping? */
73 unsigned int flags:8;
74
75 /* do we need to check for srp blocking? */
76 unsigned int srp_non_recurse:1;
77
78 /* is the task present? (true if it can be scheduled) */
79 unsigned int present:1;
80
81 /* user controlled parameters */
82 struct rt_task task_params;
83
84 /* timing parameters */
85 struct rt_job job_params;
86
87 /* task representing the current "inherited" task
88 * priority, assigned by inherit_priority and
89 * return priority in the scheduler plugins.
90 * could point to self if PI does not result in
91 * an increased task priority.
92 */
93 struct task_struct* inh_task;
94
95 /* Don't just dereference this pointer in kernel space!
96 * It might very well point to junk or nothing at all.
97 * NULL indicates that the task has not requested any non-preemptable
98 * section support.
99 * Not inherited upon fork.
100 */
101 short* np_flag;
102
103 /* re-use unused counter in plugins that don't need it */
104 union {
105 /* For the FMLP under PSN-EDF, it is required to make the task
106 * non-preemptive from kernel space. In order not to interfere with
107 * user space, this counter indicates the kernel space np setting.
108 * kernel_np > 0 => task is non-preemptive
109 */
110 unsigned int kernel_np;
111
112 /* Used by GQ-EDF */
113 unsigned int last_cpu;
114 };
115
116 /* This field can be used by plugins to store where the task
117 * is currently scheduled. It is the responsibility of the
118 * plugin to avoid race conditions.
119 *
120 * This used by GSN-EDF and PFAIR.
121 */
122 volatile int scheduled_on;
123
124 /* Is the stack of the task currently in use? This is updated by
125 * the LITMUS core.
126 *
127 * Be careful to avoid deadlocks!
128 */
129 volatile int stack_in_use;
130
131 /* This field can be used by plugins to store where the task
132 * is currently linked. It is the responsibility of the plugin
133 * to avoid race conditions.
134 *
135 * Used by GSN-EDF.
136 */
137 volatile int linked_on;
138
139 /* PFAIR/PD^2 state. Allocated on demand. */
140 struct pfair_param* pfair;
141
142 /* Fields saved before BE->RT transition.
143 */
144 int old_policy;
145 int old_prio;
146
147 /* ready queue for this task */
148 struct _rt_domain* domain;
149
150 /* heap element for this task
151 *
152 * Warning: Don't statically allocate this node. The heap
153 * implementation swaps these between tasks, thus after
154 * dequeuing from a heap you may end up with a different node
155 * then the one you had when enqueuing the task. For the same
156 * reason, don't obtain and store references to this node
157 * other than this pointer (which is updated by the heap
158 * implementation).
159 */
160 struct heap_node* heap_node;
161 struct release_heap* rel_heap;
162
163 /* Used by rt_domain to queue task in release list.
164 */
165 struct list_head list;
166};
167
168/* Possible RT flags */
169#define RT_F_RUNNING 0x00000000
170#define RT_F_SLEEP 0x00000001
171#define RT_F_EXIT_SEM 0x00000008
172
173#endif
174
175#endif