aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_param.h
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-09-11 22:42:51 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-09-11 22:42:51 -0400
commitc1d1979c99ca397241da4e3d7e0cb77f7ec28240 (patch)
tree2a988aae1ae7c08891543e844171cbcb4281a5bb /include/litmus/rt_param.h
parentfd3aa01f176cf12b1625f4f46ba01f3340bb57ed (diff)
parent55e04c94b925b0790c2ae0a79f16e939e9bb2846 (diff)
Merge branch 'wip-gpu-rtas12' into wip-slave-threads
Conflicts: include/litmus/unistd_32.h include/litmus/unistd_64.h litmus/litmus.c
Diffstat (limited to 'include/litmus/rt_param.h')
-rw-r--r--include/litmus/rt_param.h132
1 files changed, 130 insertions, 2 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 21430623a940..02b750a9570b 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -5,6 +5,8 @@
5#ifndef _LINUX_RT_PARAM_H_ 5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_ 6#define _LINUX_RT_PARAM_H_
7 7
8#include <litmus/fpmath.h>
9
8/* Litmus time type. */ 10/* Litmus time type. */
9typedef unsigned long long lt_t; 11typedef unsigned long long lt_t;
10 12
@@ -30,9 +32,15 @@ typedef enum {
30typedef enum { 32typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */ 33 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ 34 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ 35 PRECISE_ENFORCEMENT, /* budgets are enforced with hrtimers */
34} budget_policy_t; 36} budget_policy_t;
35 37
38typedef enum {
39 NO_SIGNALS, /* job receives no signals when it exhausts its budget */
40 QUANTUM_SIGNALS, /* budget signals are only sent on quantum boundaries */
41 PRECISE_SIGNALS, /* budget signals are triggered with hrtimers */
42} budget_signal_policy_t;
43
36/* We use the common priority interpretation "lower index == higher priority", 44/* We use the common priority interpretation "lower index == higher priority",
37 * which is commonly used in fixed-priority schedulability analysis papers. 45 * which is commonly used in fixed-priority schedulability analysis papers.
38 * So, a numerically lower priority value implies higher scheduling priority, 46 * So, a numerically lower priority value implies higher scheduling priority,
@@ -62,6 +70,7 @@ struct rt_task {
62 unsigned int priority; 70 unsigned int priority;
63 task_class_t cls; 71 task_class_t cls;
64 budget_policy_t budget_policy; /* ignored by pfair */ 72 budget_policy_t budget_policy; /* ignored by pfair */
73 budget_signal_policy_t budget_signal_policy; /* currently ignored by pfair */
65}; 74};
66 75
67union np_flag { 76union np_flag {
@@ -74,6 +83,19 @@ union np_flag {
74 } np; 83 } np;
75}; 84};
76 85
86struct affinity_observer_args
87{
88 int lock_od;
89};
90
91struct gpu_affinity_observer_args
92{
93 struct affinity_observer_args obs;
94 int replica_to_gpu_offset;
95 int nr_simult_users;
96 int relaxed_rules;
97};
98
77/* The definition of the data that is shared between the kernel and real-time 99/* The definition of the data that is shared between the kernel and real-time
78 * tasks via a shared page (see litmus/ctrldev.c). 100 * tasks via a shared page (see litmus/ctrldev.c).
79 * 101 *
@@ -97,6 +119,9 @@ struct control_page {
97/* don't export internal data structures to user space (liblitmus) */ 119/* don't export internal data structures to user space (liblitmus) */
98#ifdef __KERNEL__ 120#ifdef __KERNEL__
99 121
122#include <litmus/binheap.h>
123#include <linux/semaphore.h>
124
100struct _rt_domain; 125struct _rt_domain;
101struct bheap_node; 126struct bheap_node;
102struct release_heap; 127struct release_heap;
@@ -110,6 +135,12 @@ struct rt_job {
110 /* How much service has this job received so far? */ 135 /* How much service has this job received so far? */
111 lt_t exec_time; 136 lt_t exec_time;
112 137
138 /* By how much did the prior job miss its deadline by?
139 * Value differs from tardiness in that lateness may
140 * be negative (when job finishes before its deadline).
141 */
142 long long lateness;
143
113 /* Which job is this. This is used to let user space 144 /* Which job is this. This is used to let user space
114 * specify which job to wait for, which is important if jobs 145 * specify which job to wait for, which is important if jobs
115 * overrun. If we just call sys_sleep_next_period() then we 146 * overrun. If we just call sys_sleep_next_period() then we
@@ -118,10 +149,54 @@ struct rt_job {
118 * Increase this sequence number when a job is released. 149 * Increase this sequence number when a job is released.
119 */ 150 */
120 unsigned int job_no; 151 unsigned int job_no;
152
153 /* bits:
154 * 0th: Set if a budget exhaustion signal has already been sent for
155 * the current job. */
156 unsigned long flags;
121}; 157};
122 158
159#define RT_JOB_SIG_BUDGET_SENT 0
160
123struct pfair_param; 161struct pfair_param;
124 162
163enum klitirqd_sem_status
164{
165 NEED_TO_REACQUIRE,
166 REACQUIRING,
167 NOT_HELD,
168 HELD
169};
170
171typedef enum gpu_migration_dist
172{
173 // TODO: Make this variable against NR_NVIDIA_GPUS
174 MIG_LOCAL = 0,
175 MIG_NEAR = 1,
176 MIG_MED = 2,
177 MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy
178 MIG_NONE = 4,
179
180 MIG_LAST = MIG_NONE
181} gpu_migration_dist_t;
182
183typedef struct feedback_est{
184 fp_t est;
185 fp_t accum_err;
186} feedback_est_t;
187
188
189#define AVG_EST_WINDOW_SIZE 20
190
191typedef struct avg_est{
192 lt_t history[AVG_EST_WINDOW_SIZE];
193 uint16_t count;
194 uint16_t idx;
195 lt_t sum;
196 lt_t std;
197 lt_t avg;
198} avg_est_t;
199
125/* RT task parameters for scheduling extensions 200/* RT task parameters for scheduling extensions
126 * These parameters are inherited during clone and therefore must 201 * These parameters are inherited during clone and therefore must
127 * be explicitly set up before the task set is launched. 202 * be explicitly set up before the task set is launched.
@@ -136,6 +211,50 @@ struct rt_param {
136 /* is the task present? (true if it can be scheduled) */ 211 /* is the task present? (true if it can be scheduled) */
137 unsigned int present:1; 212 unsigned int present:1;
138 213
214#ifdef CONFIG_LITMUS_SOFTIRQD
215 /* proxy threads have minimum priority by default */
216 unsigned int is_proxy_thread:1;
217
218 /* pointer to klitirqd currently working on this
219 task_struct's behalf. only set by the task pointed
220 to by klitirqd.
221
222 ptr only valid if is_proxy_thread == 0
223 */
224 struct task_struct* cur_klitirqd;
225
226 /* Used to implement mutual execution exclusion between
227 * job and klitirqd execution. Job must always hold
228 * it's klitirqd_sem to execute. klitirqd instance
229 * must hold the semaphore before executing on behalf
230 * of a job.
231 */
232 struct mutex klitirqd_sem;
233
234 /* status of held klitirqd_sem, even if the held klitirqd_sem is from
235 another task (only proxy threads do this though).
236 */
237 atomic_t klitirqd_sem_stat;
238#endif
239
240#ifdef CONFIG_LITMUS_NVIDIA
241 /* number of top-half interrupts handled on behalf of current job */
242 atomic_t nv_int_count;
243 long unsigned int held_gpus; // bitmap of held GPUs.
244
245#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
246 avg_est_t gpu_migration_est[MIG_LAST+1];
247
248 gpu_migration_dist_t gpu_migration;
249 int last_gpu;
250
251 lt_t accum_gpu_time;
252 lt_t gpu_time_stamp;
253
254 unsigned int suspend_gpu_tracker_on_block:1;
255#endif
256#endif
257
139#ifdef CONFIG_LITMUS_LOCKING 258#ifdef CONFIG_LITMUS_LOCKING
140 /* Is the task being priority-boosted by a locking protocol? */ 259 /* Is the task being priority-boosted by a locking protocol? */
141 unsigned int priority_boosted:1; 260 unsigned int priority_boosted:1;
@@ -155,11 +274,20 @@ struct rt_param {
155 * could point to self if PI does not result in 274 * could point to self if PI does not result in
156 * an increased task priority. 275 * an increased task priority.
157 */ 276 */
158 struct task_struct* inh_task; 277 struct task_struct* inh_task;
278
279#ifdef CONFIG_LITMUS_NESTED_LOCKING
280 raw_spinlock_t hp_blocked_tasks_lock;
281 struct binheap hp_blocked_tasks;
282
283 /* pointer to lock upon which is currently blocked */
284 struct litmus_lock* blocked_lock;
285#endif
159 286
160 287
161 struct task_struct* hp_group; 288 struct task_struct* hp_group;
162 unsigned int is_slave:1; 289 unsigned int is_slave:1;
290 unsigned int has_slaves:1;
163 291
164 292
165#ifdef CONFIG_NP_SECTION 293#ifdef CONFIG_NP_SECTION