diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-14 08:34:36 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-14 08:34:36 -0400 |
commit | 4ad6ba08f0dab67bbd89a26b27f1cc86e3c45c13 (patch) | |
tree | fd982c7a12f7a947278e05d0b126a015c24793f4 | |
parent | c1d1979c99ca397241da4e3d7e0cb77f7ec28240 (diff) |
checkpoint for aux_tasks. can still deadlock
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | include/litmus/aux_tasks.h | 33 | ||||
-rw-r--r-- | include/litmus/litmus.h | 1 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 17 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 7 | ||||
-rw-r--r-- | litmus/Makefile | 2 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 387 | ||||
-rw-r--r-- | litmus/edf_common.c | 22 | ||||
-rw-r--r-- | litmus/litmus.c | 111 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 48 | ||||
-rw-r--r-- | litmus/rt_domain.c | 13 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 12 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 161 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 16 |
14 files changed, 746 insertions, 88 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c990d13ae35..5d1c041be809 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1532,8 +1532,10 @@ struct task_struct { | |||
1532 | #endif | 1532 | #endif |
1533 | struct prop_local_single dirties; | 1533 | struct prop_local_single dirties; |
1534 | 1534 | ||
1535 | /* LITMUS RT parameters and state */ | 1535 | /*** LITMUS RT parameters and state ***/ |
1536 | struct rt_param rt_param; | 1536 | struct rt_param rt_param; |
1537 | struct aux_data aux_data; | ||
1538 | /*****/ | ||
1537 | 1539 | ||
1538 | /* references to PI semaphores, etc. */ | 1540 | /* references to PI semaphores, etc. */ |
1539 | struct od_table_entry *od_table; | 1541 | struct od_table_entry *od_table; |
diff --git a/include/litmus/aux_tasks.h b/include/litmus/aux_tasks.h new file mode 100644 index 000000000000..8e50ac85b082 --- /dev/null +++ b/include/litmus/aux_tasks.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef LITMUS_AUX_taskS | ||
2 | #define LITMUS_AUX_taskS | ||
3 | |||
4 | struct task_struct; | ||
5 | |||
6 | #define MAGIC_AUX_TASK_PERIOD ~((lt_t)0) | ||
7 | |||
8 | /* admit an aux task with default parameters */ | ||
9 | //int admit_aux_task(struct task_struct *t); | ||
10 | |||
11 | /* call on an aux task when it exits real-time */ | ||
12 | int exit_aux_task(struct task_struct *t); | ||
13 | |||
14 | /* call when an aux_owner becomes real-time */ | ||
15 | long enable_aux_task_owner(struct task_struct *t); | ||
16 | |||
17 | /* call when an aux_owner exits real-time */ | ||
18 | long disable_aux_task_owner(struct task_struct *t); | ||
19 | |||
20 | |||
21 | /* collectivelly make all aux tasks in the process of leader inherit from hp */ | ||
22 | //int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp); | ||
23 | |||
24 | /* collectivelly make all aux tasks in the process of leader inherit from hp */ | ||
25 | //int aux_tasks_decrease_priority(struct task_struct *leader, struct task_struct *hp); | ||
26 | |||
27 | /* call when an aux_owner increases its priority */ | ||
28 | int aux_task_owner_increase_priority(struct task_struct *t); | ||
29 | |||
30 | /* call when an aux_owner decreases its priority */ | ||
31 | int aux_task_owner_decrease_priority(struct task_struct *t); | ||
32 | |||
33 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 1d70ab713571..f9829167294d 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -44,6 +44,7 @@ void litmus_exit_task(struct task_struct *tsk); | |||
44 | ((t)->rt_param.transition_pending) | 44 | ((t)->rt_param.transition_pending) |
45 | 45 | ||
46 | #define tsk_rt(t) (&(t)->rt_param) | 46 | #define tsk_rt(t) (&(t)->rt_param) |
47 | #define tsk_aux(t) (&(t)->aux_data) | ||
47 | 48 | ||
48 | /* Realtime utility macros */ | 49 | /* Realtime utility macros */ |
49 | #define get_rt_flags(t) (tsk_rt(t)->flags) | 50 | #define get_rt_flags(t) (tsk_rt(t)->flags) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 02b750a9570b..2a6c70f1dd37 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -285,9 +285,13 @@ struct rt_param { | |||
285 | #endif | 285 | #endif |
286 | 286 | ||
287 | 287 | ||
288 | struct task_struct* hp_group; | 288 | #ifdef CONFIG_LITMUS_LOCKING |
289 | unsigned int is_slave:1; | 289 | unsigned int is_aux_task:1; |
290 | unsigned int has_slaves:1; | 290 | unsigned int has_aux_tasks:1; |
291 | |||
292 | struct list_head aux_task_node; | ||
293 | struct binheap_node aux_task_owner_node; | ||
294 | #endif | ||
291 | 295 | ||
292 | 296 | ||
293 | #ifdef CONFIG_NP_SECTION | 297 | #ifdef CONFIG_NP_SECTION |
@@ -354,6 +358,13 @@ struct rt_param { | |||
354 | struct control_page * ctrl_page; | 358 | struct control_page * ctrl_page; |
355 | }; | 359 | }; |
356 | 360 | ||
361 | struct aux_data | ||
362 | { | ||
363 | struct list_head aux_tasks; | ||
364 | struct binheap aux_task_owners; | ||
365 | unsigned int initialized:1; | ||
366 | }; | ||
367 | |||
357 | /* Possible RT flags */ | 368 | /* Possible RT flags */ |
358 | #define RT_F_RUNNING 0x00000000 | 369 | #define RT_F_RUNNING 0x00000000 |
359 | #define RT_F_SLEEP 0x00000001 | 370 | #define RT_F_SLEEP 0x00000001 |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 24a6858b4b0b..bd75e7c09a10 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -70,6 +70,10 @@ typedef long (*allocate_affinity_observer_t) ( | |||
70 | 70 | ||
71 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | 71 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); |
72 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | 72 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); |
73 | |||
74 | typedef int (*__increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
75 | typedef int (*__decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
76 | |||
73 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | 77 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, |
74 | raw_spinlock_t *to_unlock, unsigned long irqflags); | 78 | raw_spinlock_t *to_unlock, unsigned long irqflags); |
75 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | 79 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, |
@@ -146,6 +150,9 @@ struct sched_plugin { | |||
146 | allocate_lock_t allocate_lock; | 150 | allocate_lock_t allocate_lock; |
147 | increase_prio_t increase_prio; | 151 | increase_prio_t increase_prio; |
148 | decrease_prio_t decrease_prio; | 152 | decrease_prio_t decrease_prio; |
153 | |||
154 | __increase_prio_t __increase_prio; | ||
155 | __decrease_prio_t __decrease_prio; | ||
149 | #endif | 156 | #endif |
150 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 157 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
151 | nested_increase_prio_t nested_increase_prio; | 158 | nested_increase_prio_t nested_increase_prio; |
diff --git a/litmus/Makefile b/litmus/Makefile index 59c018560ee9..f2dd7be7ae4a 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -31,7 +31,7 @@ obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | |||
31 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | 31 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o |
32 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | 32 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o |
33 | 33 | ||
34 | obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o | 34 | obj-$(CONFIG_LITMUS_LOCKING) += aux_tasks.o kfmlp_lock.o |
35 | obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o | 35 | obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o |
36 | obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o | 36 | obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o |
37 | obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o | 37 | obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c new file mode 100644 index 000000000000..c197a95fc3a1 --- /dev/null +++ b/litmus/aux_tasks.c | |||
@@ -0,0 +1,387 @@ | |||
1 | #ifdef CONFIG_LITMUS_LOCKING | ||
2 | |||
3 | #include <litmus/sched_plugin.h> | ||
4 | #include <litmus/trace.h> | ||
5 | #include <litmus/litmus.h> | ||
6 | #include <litmus/rt_param.h> | ||
7 | #include <litmus/aux_tasks.h> | ||
8 | |||
9 | static int admit_aux_task(struct task_struct *t) | ||
10 | { | ||
11 | int retval = 0; | ||
12 | struct task_struct *leader = t->group_leader; | ||
13 | |||
14 | struct rt_task tp = { | ||
15 | .exec_cost = 0, | ||
16 | .period = MAGIC_AUX_TASK_PERIOD, | ||
17 | .relative_deadline = MAGIC_AUX_TASK_PERIOD, | ||
18 | .phase = 0, | ||
19 | .cpu = task_cpu(leader), /* take CPU of group leader */ | ||
20 | .budget_policy = NO_ENFORCEMENT, | ||
21 | .cls = RT_CLASS_BEST_EFFORT | ||
22 | }; | ||
23 | |||
24 | struct sched_param param = { .sched_priority = 0}; | ||
25 | |||
26 | tsk_rt(t)->task_params = tp; | ||
27 | retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, ¶m); | ||
28 | |||
29 | return retval; | ||
30 | } | ||
31 | |||
32 | int exit_aux_task(struct task_struct *t) | ||
33 | { | ||
34 | int retval = 0; | ||
35 | struct task_struct *leader = t->group_leader; | ||
36 | |||
37 | BUG_ON(!tsk_rt(t)->is_aux_task); | ||
38 | |||
39 | TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
40 | |||
41 | list_del(&tsk_rt(t)->aux_task_node); | ||
42 | |||
43 | tsk_rt(t)->is_aux_task = 0; | ||
44 | |||
45 | if (tsk_rt(t)->inh_task) { | ||
46 | litmus->decrease_prio(t, NULL); | ||
47 | } | ||
48 | |||
49 | return retval; | ||
50 | } | ||
51 | |||
52 | static int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp) | ||
53 | { | ||
54 | int retval = 0; | ||
55 | struct list_head *pos; | ||
56 | |||
57 | TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | ||
58 | |||
59 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { | ||
60 | struct task_struct *aux = | ||
61 | container_of(list_entry(pos, struct rt_param, aux_task_node), | ||
62 | struct task_struct, rt_param); | ||
63 | |||
64 | if (!is_realtime(aux)) { | ||
65 | #if 0 | ||
66 | /* currently can't do this here because of scheduler deadlock on itself */ | ||
67 | TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); | ||
68 | retval = admit_aux_task(aux); | ||
69 | |||
70 | if (retval != 0) { | ||
71 | TRACE_CUR("failed to admit aux task %s/%d\n", aux->comm, aux->pid); | ||
72 | goto out; | ||
73 | } | ||
74 | #endif | ||
75 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); | ||
76 | } | ||
77 | |||
78 | // aux tasks don't touch rt locks, so no nested call needed. | ||
79 | TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); | ||
80 | retval = litmus->__increase_prio(aux, hp); | ||
81 | } | ||
82 | |||
83 | //out: | ||
84 | return retval; | ||
85 | } | ||
86 | |||
87 | static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_struct *hp) | ||
88 | { | ||
89 | int retval = 0; | ||
90 | struct list_head *pos; | ||
91 | |||
92 | TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | ||
93 | |||
94 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { | ||
95 | struct task_struct *aux = | ||
96 | container_of(list_entry(pos, struct rt_param, aux_task_node), | ||
97 | struct task_struct, rt_param); | ||
98 | |||
99 | if (!is_realtime(aux)) { | ||
100 | #if 0 | ||
101 | /* currently can't do this here because of scheduler deadlock on itself */ | ||
102 | TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); | ||
103 | retval = admit_aux_task(aux); | ||
104 | |||
105 | if (retval != 0) | ||
106 | goto out; | ||
107 | |||
108 | if (hp) { | ||
109 | // aux tasks don't touch rt locks, so no nested call needed. | ||
110 | TRACE_CUR("decreasing (actually increasing) %s/%d.\n", aux->comm, aux->pid); | ||
111 | retval = litmus->__increase_prio(aux, hp); | ||
112 | } | ||
113 | #endif | ||
114 | |||
115 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); | ||
116 | } | ||
117 | else { | ||
118 | TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); | ||
119 | retval = litmus->__decrease_prio(aux, hp); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | //out: | ||
124 | return retval; | ||
125 | } | ||
126 | |||
127 | int aux_task_owner_increase_priority(struct task_struct *t) | ||
128 | { | ||
129 | int retval = 0; | ||
130 | struct task_struct *leader; | ||
131 | struct task_struct *hp = NULL; | ||
132 | |||
133 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | ||
134 | BUG_ON(!is_realtime(t)); | ||
135 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); | ||
136 | |||
137 | leader = t->group_leader; | ||
138 | |||
139 | TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
140 | |||
141 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
142 | struct task_struct, rt_param); | ||
143 | |||
144 | if (hp == t) { | ||
145 | goto out; // already hp, nothing to do. | ||
146 | } | ||
147 | |||
148 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | ||
149 | |||
150 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
151 | struct task_struct, rt_param); | ||
152 | |||
153 | if (hp == t) { | ||
154 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
155 | retval = aux_tasks_increase_priority(leader, | ||
156 | (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp); | ||
157 | } | ||
158 | |||
159 | out: | ||
160 | return retval; | ||
161 | } | ||
162 | |||
163 | int aux_task_owner_decrease_priority(struct task_struct *t) | ||
164 | { | ||
165 | int retval = 0; | ||
166 | struct task_struct *leader; | ||
167 | struct task_struct *hp = NULL; | ||
168 | struct task_struct *new_hp = NULL; | ||
169 | |||
170 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | ||
171 | BUG_ON(!is_realtime(t)); | ||
172 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); | ||
173 | |||
174 | leader = t->group_leader; | ||
175 | |||
176 | TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
177 | |||
178 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
179 | struct task_struct, rt_param); | ||
180 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | ||
181 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, | ||
182 | struct rt_param, aux_task_owner_node); | ||
183 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
184 | struct task_struct, rt_param); | ||
185 | |||
186 | if (hp == t && new_hp != t) { | ||
187 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
188 | retval = aux_tasks_decrease_priority(leader, | ||
189 | (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp); | ||
190 | } | ||
191 | |||
192 | return retval; | ||
193 | } | ||
194 | |||
195 | |||
196 | |||
197 | long enable_aux_task_owner(struct task_struct *t) | ||
198 | { | ||
199 | long retval = 0; | ||
200 | struct task_struct *leader = t->group_leader; | ||
201 | struct task_struct *hp; | ||
202 | |||
203 | if (!tsk_rt(t)->has_aux_tasks) { | ||
204 | TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); | ||
205 | return -1; | ||
206 | } | ||
207 | |||
208 | BUG_ON(!is_realtime(t)); | ||
209 | |||
210 | if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | ||
211 | TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid); | ||
212 | goto out; | ||
213 | } | ||
214 | |||
215 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, | ||
216 | struct rt_param, aux_task_owner_node); | ||
217 | |||
218 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
219 | struct task_struct, rt_param); | ||
220 | if (hp == t) { | ||
221 | /* we're the new hp */ | ||
222 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
223 | |||
224 | retval = aux_tasks_increase_priority(leader, | ||
225 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | ||
226 | } | ||
227 | |||
228 | |||
229 | out: | ||
230 | return retval; | ||
231 | } | ||
232 | |||
233 | long disable_aux_task_owner(struct task_struct *t) | ||
234 | { | ||
235 | long retval = 0; | ||
236 | struct task_struct *leader = t->group_leader; | ||
237 | struct task_struct *hp; | ||
238 | struct task_struct *new_hp = NULL; | ||
239 | |||
240 | if (!tsk_rt(t)->has_aux_tasks) { | ||
241 | TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); | ||
242 | return -1; | ||
243 | } | ||
244 | |||
245 | BUG_ON(!is_realtime(t)); | ||
246 | |||
247 | if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | ||
248 | TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid); | ||
249 | goto out; | ||
250 | } | ||
251 | |||
252 | TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
253 | |||
254 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
255 | struct task_struct, rt_param); | ||
256 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | ||
257 | |||
258 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { | ||
259 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
260 | struct task_struct, rt_param); | ||
261 | } | ||
262 | |||
263 | if (hp == t && new_hp != t) { | ||
264 | struct task_struct *to_inh = NULL; | ||
265 | |||
266 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
267 | |||
268 | if (new_hp) { | ||
269 | to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; | ||
270 | } | ||
271 | |||
272 | retval = aux_tasks_decrease_priority(leader, to_inh); | ||
273 | } | ||
274 | |||
275 | out: | ||
276 | return retval; | ||
277 | } | ||
278 | |||
279 | |||
280 | static int aux_task_owner_max_priority_order(struct binheap_node *a, | ||
281 | struct binheap_node *b) | ||
282 | { | ||
283 | struct task_struct *d_a = container_of(binheap_entry(a, struct rt_param, aux_task_owner_node), | ||
284 | struct task_struct, rt_param); | ||
285 | struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node), | ||
286 | struct task_struct, rt_param); | ||
287 | |||
288 | BUG_ON(!d_a); | ||
289 | BUG_ON(!d_b); | ||
290 | |||
291 | return litmus->compare(d_a, d_b); | ||
292 | } | ||
293 | |||
294 | |||
295 | asmlinkage long sys_slave_non_rt_threads(void) | ||
296 | { | ||
297 | long retval = 0; | ||
298 | struct task_struct *leader; | ||
299 | struct task_struct *t; | ||
300 | |||
301 | read_lock_irq(&tasklist_lock); | ||
302 | |||
303 | leader = current->group_leader; | ||
304 | |||
305 | #if 0 | ||
306 | t = leader; | ||
307 | do { | ||
308 | if (tsk_rt(t)->has_aux_tasks || tsk_rt(t)->is_aux_task) { | ||
309 | printk("slave_non_rt_tasks may only be called once per process.\n"); | ||
310 | retval = -EINVAL; | ||
311 | goto out_unlock; | ||
312 | } | ||
313 | } while (t != leader); | ||
314 | #endif | ||
315 | |||
316 | if (!tsk_aux(leader)->initialized) { | ||
317 | INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks); | ||
318 | INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order); | ||
319 | tsk_aux(leader)->initialized = 1; | ||
320 | } | ||
321 | |||
322 | t = leader; | ||
323 | do { | ||
324 | /* doesn't hurt to initialize them both */ | ||
325 | INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); | ||
326 | INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); | ||
327 | |||
328 | TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", | ||
329 | leader->comm, leader->pid, t->comm, t->pid, | ||
330 | tsk_rt(t)->task_params.period); | ||
331 | |||
332 | /* inspect heap_node to see if it is an rt task */ | ||
333 | if (tsk_rt(t)->task_params.period == 0 || | ||
334 | tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { | ||
335 | if (!tsk_rt(t)->is_aux_task) { | ||
336 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | ||
337 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ | ||
338 | tsk_rt(t)->is_aux_task = 1; | ||
339 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); | ||
340 | |||
341 | (void)admit_aux_task(t); | ||
342 | } | ||
343 | else { | ||
344 | TRACE_CUR("AUX task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); | ||
345 | } | ||
346 | } | ||
347 | else { | ||
348 | if (!tsk_rt(t)->has_aux_tasks) { | ||
349 | TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | ||
350 | tsk_rt(t)->has_aux_tasks = 1; | ||
351 | if (is_realtime(t)) { | ||
352 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, | ||
353 | struct rt_param, aux_task_owner_node); | ||
354 | } | ||
355 | } | ||
356 | else { | ||
357 | TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); | ||
358 | } | ||
359 | } | ||
360 | |||
361 | t = next_thread(t); | ||
362 | } while(t != leader); | ||
363 | |||
364 | |||
365 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { | ||
366 | struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
367 | struct task_struct, rt_param); | ||
368 | TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); | ||
369 | retval = aux_tasks_increase_priority(leader, | ||
370 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | ||
371 | } | ||
372 | |||
373 | //out_unlock: | ||
374 | read_unlock_irq(&tasklist_lock); | ||
375 | |||
376 | return retval; | ||
377 | } | ||
378 | |||
379 | #else | ||
380 | |||
381 | asmlinkage long sys_slave_non_rt_tasks(void) | ||
382 | { | ||
383 | printk("Unsupported. Recompile with CONFIG_LITMUS_LOCKING.\n"); | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | |||
387 | #endif | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 39ce1816ee04..9b439299e5fc 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -74,6 +74,23 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | #ifdef CONFIG_LITMUS_LOCKING | 76 | #ifdef CONFIG_LITMUS_LOCKING |
77 | /* aux threads with no inheritance have lowest priority; however, do a PID | ||
78 | * tie break if both threads are aux threads with no inheritance. | ||
79 | */ | ||
80 | if (unlikely(first->rt_param.is_aux_task && !first->rt_param.inh_task)) { | ||
81 | if (second->rt_param.is_aux_task && !second->rt_param.inh_task) { | ||
82 | /* pid break */ | ||
83 | if (first->pid < second->pid) { | ||
84 | return 1; | ||
85 | } | ||
86 | } | ||
87 | return 0; | ||
88 | } | ||
89 | if (unlikely(second->rt_param.is_aux_task && !second->rt_param.inh_task)) { | ||
90 | /* no need for pid break -- case already tested */ | ||
91 | return 1; | ||
92 | } | ||
93 | |||
77 | /* Check for EFFECTIVE priorities. Change task | 94 | /* Check for EFFECTIVE priorities. Change task |
78 | * used for comparison in such a case. | 95 | * used for comparison in such a case. |
79 | */ | 96 | */ |
@@ -191,7 +208,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
191 | /* Both tasks have the same inherited priority. | 208 | /* Both tasks have the same inherited priority. |
192 | * Likely in a bug-condition. | 209 | * Likely in a bug-condition. |
193 | */ | 210 | */ |
194 | if (likely(first->pid < second->pid)) { | 211 | if (first->pid < second->pid) { |
195 | return 1; | 212 | return 1; |
196 | } | 213 | } |
197 | else if (first->pid == second->pid) { | 214 | else if (first->pid == second->pid) { |
@@ -205,6 +222,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
205 | 222 | ||
206 | /* The task with the inherited priority wins. */ | 223 | /* The task with the inherited priority wins. */ |
207 | if (!second->rt_param.inh_task) { | 224 | if (!second->rt_param.inh_task) { |
225 | /* | ||
226 | * common with aux tasks. | ||
208 | TRACE_CUR("unusual comparison: " | 227 | TRACE_CUR("unusual comparison: " |
209 | "first = %s/%d first_task = %s/%d " | 228 | "first = %s/%d first_task = %s/%d " |
210 | "second = %s/%d second_task = %s/%d\n", | 229 | "second = %s/%d second_task = %s/%d\n", |
@@ -214,6 +233,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
214 | second->comm, second->pid, | 233 | second->comm, second->pid, |
215 | (second->rt_param.inh_task) ? second->rt_param.inh_task->comm : "(nil)", | 234 | (second->rt_param.inh_task) ? second->rt_param.inh_task->comm : "(nil)", |
216 | (second->rt_param.inh_task) ? second->rt_param.inh_task->pid : 0); | 235 | (second->rt_param.inh_task) ? second->rt_param.inh_task->pid : 0); |
236 | */ | ||
217 | return 1; | 237 | return 1; |
218 | } | 238 | } |
219 | } | 239 | } |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 83e8ef3f42af..1b4182ac3337 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #include <litmus/nvidia_info.h> | 25 | #include <litmus/nvidia_info.h> |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef CONFIG_LITMUS_LOCKING | ||
29 | #include <litmus/aux_tasks.h> | ||
30 | #endif | ||
31 | |||
28 | /* Number of RT tasks that exist in the system */ | 32 | /* Number of RT tasks that exist in the system */ |
29 | atomic_t rt_task_count = ATOMIC_INIT(0); | 33 | atomic_t rt_task_count = ATOMIC_INIT(0); |
30 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 34 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -327,60 +331,6 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
327 | return ret; | 331 | return ret; |
328 | } | 332 | } |
329 | 333 | ||
330 | |||
331 | long __litmus_admit_task(struct task_struct* tsk); | ||
332 | |||
333 | asmlinkage long sys_slave_non_rt_threads(void) | ||
334 | { | ||
335 | long retval = 0; | ||
336 | struct task_struct *leader = current->group_leader; | ||
337 | struct task_struct *t; | ||
338 | struct task_struct *hp = NULL; | ||
339 | |||
340 | read_lock_irq(&tasklist_lock); | ||
341 | |||
342 | t = leader; | ||
343 | do { | ||
344 | TRACE_CUR("threads in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | ||
345 | |||
346 | if (tsk_rt(t)->heap_node == NULL) { | ||
347 | retval = __litmus_admit_task(t); | ||
348 | |||
349 | if (retval != 0) break; | ||
350 | |||
351 | /* hasn't been admitted into rt. make it a slave. */ | ||
352 | tsk_rt(t)->slave = 1; | ||
353 | } | ||
354 | else { | ||
355 | tsk_rt(t)->has_slaves = 1; | ||
356 | |||
357 | if (is_realtime(t) && litmus->compare(t, hp)) { | ||
358 | hp = t; | ||
359 | } | ||
360 | } | ||
361 | |||
362 | t = next_thread(t); | ||
363 | } while(t != leader); | ||
364 | |||
365 | if (hp) { | ||
366 | TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); | ||
367 | |||
368 | /* set up inheritance */ | ||
369 | leader->hp_group = hp; | ||
370 | |||
371 | t = leader; | ||
372 | do { | ||
373 | if (tsk_rt(t)->slave) { | ||
374 | litmus->increase_prio(t); | ||
375 | } | ||
376 | } while(t != leader); | ||
377 | } | ||
378 | |||
379 | read_unlock_irq(&tasklist_lock); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | 334 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) |
385 | void init_gpu_affinity_state(struct task_struct* p) | 335 | void init_gpu_affinity_state(struct task_struct* p) |
386 | { | 336 | { |
@@ -412,11 +362,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
412 | { | 362 | { |
413 | struct rt_task user_config = {}; | 363 | struct rt_task user_config = {}; |
414 | void* ctrl_page = NULL; | 364 | void* ctrl_page = NULL; |
415 | 365 | ||
416 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 366 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
417 | binheap_order_t prio_order = NULL; | 367 | binheap_order_t prio_order = NULL; |
418 | #endif | 368 | #endif |
419 | 369 | ||
370 | TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); | ||
371 | |||
420 | if (restore) { | 372 | if (restore) { |
421 | /* Safe user-space provided configuration data. | 373 | /* Safe user-space provided configuration data. |
422 | * and allocated page. */ | 374 | * and allocated page. */ |
@@ -428,10 +380,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
428 | prio_order = p->rt_param.hp_blocked_tasks.compare; | 380 | prio_order = p->rt_param.hp_blocked_tasks.compare; |
429 | #endif | 381 | #endif |
430 | 382 | ||
383 | #ifdef CONFIG_LITMUS_LOCKING | ||
431 | /* We probably should not be inheriting any task's priority | 384 | /* We probably should not be inheriting any task's priority |
432 | * at this point in time. | 385 | * at this point in time. |
433 | */ | 386 | */ |
434 | WARN_ON(p->rt_param.inh_task); | 387 | WARN_ON(p->rt_param.inh_task); |
388 | #endif | ||
435 | 389 | ||
436 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 390 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
437 | WARN_ON(p->rt_param.blocked_lock); | 391 | WARN_ON(p->rt_param.blocked_lock); |
@@ -459,6 +413,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
459 | /* Cleanup everything else. */ | 413 | /* Cleanup everything else. */ |
460 | memset(&p->rt_param, 0, sizeof(p->rt_param)); | 414 | memset(&p->rt_param, 0, sizeof(p->rt_param)); |
461 | 415 | ||
416 | #ifdef CONFIG_LITMUS_LOCKING | ||
417 | /* also clear out the aux_data. the !restore case is only called on | ||
418 | * fork (initial thread creation). */ | ||
419 | if (!restore) | ||
420 | memset(&p->aux_data, 0, sizeof(p->aux_data)); | ||
421 | #endif | ||
422 | |||
462 | /* Restore preserved fields. */ | 423 | /* Restore preserved fields. */ |
463 | if (restore) { | 424 | if (restore) { |
464 | p->rt_param.task_params = user_config; | 425 | p->rt_param.task_params = user_config; |
@@ -475,7 +436,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
475 | #endif | 436 | #endif |
476 | } | 437 | } |
477 | 438 | ||
439 | |||
440 | #ifdef CONFIG_LITMUS_LOCKING | ||
441 | long __litmus_admit_task(struct task_struct* tsk, int clear_aux) | ||
442 | #else | ||
478 | long __litmus_admit_task(struct task_struct* tsk) | 443 | long __litmus_admit_task(struct task_struct* tsk) |
444 | #endif | ||
479 | { | 445 | { |
480 | long retval = 0; | 446 | long retval = 0; |
481 | unsigned long flags; | 447 | unsigned long flags; |
@@ -520,6 +486,14 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
520 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); | 486 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); |
521 | #endif | 487 | #endif |
522 | 488 | ||
489 | #ifdef CONFIG_LITMUS_LOCKING | ||
490 | /* turns out our aux thread isn't really an aux thread. */ | ||
491 | if (clear_aux && tsk_rt(tsk)->is_aux_task) { | ||
492 | exit_aux_task(tsk); | ||
493 | tsk_rt(tsk)->has_aux_tasks = 1; | ||
494 | } | ||
495 | #endif | ||
496 | |||
523 | retval = litmus->admit_task(tsk); | 497 | retval = litmus->admit_task(tsk); |
524 | 498 | ||
525 | if (!retval) { | 499 | if (!retval) { |
@@ -537,8 +511,7 @@ out_unlock: | |||
537 | long litmus_admit_task(struct task_struct* tsk) | 511 | long litmus_admit_task(struct task_struct* tsk) |
538 | { | 512 | { |
539 | long retval = 0; | 513 | long retval = 0; |
540 | unsigned long flags; | 514 | |
541 | |||
542 | BUG_ON(is_realtime(tsk)); | 515 | BUG_ON(is_realtime(tsk)); |
543 | 516 | ||
544 | if (get_rt_relative_deadline(tsk) == 0 || | 517 | if (get_rt_relative_deadline(tsk) == 0 || |
@@ -560,8 +533,12 @@ long litmus_admit_task(struct task_struct* tsk) | |||
560 | goto out; | 533 | goto out; |
561 | } | 534 | } |
562 | 535 | ||
536 | #ifdef CONFIG_LITMUS_LOCKING | ||
537 | retval = __litmus_admit_task(tsk, (tsk_rt(tsk)->task_params.period != MAGIC_AUX_TASK_PERIOD)); | ||
538 | #else | ||
563 | retval = __litmus_admit_task(tsk); | 539 | retval = __litmus_admit_task(tsk); |
564 | 540 | #endif | |
541 | |||
565 | out: | 542 | out: |
566 | return retval; | 543 | return retval; |
567 | } | 544 | } |
@@ -574,7 +551,7 @@ void litmus_exit_task(struct task_struct* tsk) | |||
574 | litmus->task_exit(tsk); | 551 | litmus->task_exit(tsk); |
575 | 552 | ||
576 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | 553 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); |
577 | bheap_node_free(tsk_rt(tsk)->heap_node); | 554 | bheap_node_free(tsk_rt(tsk)->heap_node); |
578 | release_heap_free(tsk_rt(tsk)->rel_heap); | 555 | release_heap_free(tsk_rt(tsk)->rel_heap); |
579 | 556 | ||
580 | atomic_dec(&rt_task_count); | 557 | atomic_dec(&rt_task_count); |
@@ -647,14 +624,22 @@ out: | |||
647 | */ | 624 | */ |
648 | void litmus_fork(struct task_struct* p) | 625 | void litmus_fork(struct task_struct* p) |
649 | { | 626 | { |
627 | reinit_litmus_state(p, 0); | ||
628 | |||
650 | if (is_realtime(p)) { | 629 | if (is_realtime(p)) { |
630 | TRACE_TASK(p, "fork, is real-time\n"); | ||
651 | /* clean out any litmus related state, don't preserve anything */ | 631 | /* clean out any litmus related state, don't preserve anything */ |
652 | reinit_litmus_state(p, 0); | 632 | //reinit_litmus_state(p, 0); |
653 | /* Don't let the child be a real-time task. */ | 633 | /* Don't let the child be a real-time task. */ |
654 | p->sched_reset_on_fork = 1; | 634 | p->sched_reset_on_fork = 1; |
655 | } else | 635 | } else { |
656 | /* non-rt tasks might have ctrl_page set */ | 636 | /* non-rt tasks might have ctrl_page set */ |
657 | tsk_rt(p)->ctrl_page = NULL; | 637 | tsk_rt(p)->ctrl_page = NULL; |
638 | |||
639 | /* still don't inherit any parental parameters */ | ||
640 | //memset(&p->rt_param, 0, sizeof(p->rt_param)); | ||
641 | //memset(&p->aux_data, 0, sizeof(p->aux_data)); | ||
642 | } | ||
658 | 643 | ||
659 | /* od tables are never inherited across a fork */ | 644 | /* od tables are never inherited across a fork */ |
660 | p->od_table = NULL; | 645 | p->od_table = NULL; |
@@ -751,6 +736,10 @@ static int __init _init_litmus(void) | |||
751 | init_topology(); | 736 | init_topology(); |
752 | #endif | 737 | #endif |
753 | 738 | ||
739 | #ifdef CONFIG_LITMUS_NVIDIA | ||
740 | //init_nvidia_info(); | ||
741 | #endif | ||
742 | |||
754 | return 0; | 743 | return 0; |
755 | } | 744 | } |
756 | 745 | ||
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 4b86a50d3bd1..b6ead58802f6 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -244,9 +244,56 @@ void dump_nvidia_info(const struct tasklet_struct *t) | |||
244 | #endif | 244 | #endif |
245 | } | 245 | } |
246 | 246 | ||
247 | |||
248 | |||
247 | static struct module* nvidia_mod = NULL; | 249 | static struct module* nvidia_mod = NULL; |
250 | |||
251 | |||
252 | #if 0 | ||
253 | static int nvidia_ready_module_notify(struct notifier_block *self, | ||
254 | unsigned long val, void *data) | ||
255 | { | ||
256 | mutex_lock(&module_mutex); | ||
257 | nvidia_mod = find_module("nvidia"); | ||
258 | mutex_unlock(&module_mutex); | ||
259 | |||
260 | if(nvidia_mod != NULL) | ||
261 | { | ||
262 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, | ||
263 | (void*)(nvidia_mod->module_core), | ||
264 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | ||
265 | init_nv_device_reg(); | ||
266 | return(0); | ||
267 | } | ||
268 | else | ||
269 | { | ||
270 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static int nvidia_going_module_notify(struct notifier_block *self, | ||
275 | unsigned long val, void *data) | ||
276 | { | ||
277 | nvidia_mod = NULL; | ||
278 | mb(); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static struct notifier_block nvidia_ready = { | ||
284 | .notifier_call = nvidia_ready_module_notify, | ||
285 | .priority = 1, | ||
286 | }; | ||
287 | |||
288 | static struct notifier_block nvidia_going = { | ||
289 | .notifier_call = nvidia_going_module_notify, | ||
290 | .priority = 1, | ||
291 | }; | ||
292 | #endif | ||
293 | |||
248 | int init_nvidia_info(void) | 294 | int init_nvidia_info(void) |
249 | { | 295 | { |
296 | #if 1 | ||
250 | mutex_lock(&module_mutex); | 297 | mutex_lock(&module_mutex); |
251 | nvidia_mod = find_module("nvidia"); | 298 | nvidia_mod = find_module("nvidia"); |
252 | mutex_unlock(&module_mutex); | 299 | mutex_unlock(&module_mutex); |
@@ -263,6 +310,7 @@ int init_nvidia_info(void) | |||
263 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | 310 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); |
264 | return(-1); | 311 | return(-1); |
265 | } | 312 | } |
313 | #endif | ||
266 | } | 314 | } |
267 | 315 | ||
268 | void shutdown_nvidia_info(void) | 316 | void shutdown_nvidia_info(void) |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index d0b796611bea..d4f030728d3c 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -300,10 +300,15 @@ void rt_domain_init(rt_domain_t *rt, | |||
300 | */ | 300 | */ |
301 | void __add_ready(rt_domain_t* rt, struct task_struct *new) | 301 | void __add_ready(rt_domain_t* rt, struct task_struct *new) |
302 | { | 302 | { |
303 | TRACE("rt: adding %s/%d (%llu, %llu, %llu) rel=%llu " | 303 | TRACE("rt: adding %s/%d (%llu, %llu, %llu) " |
304 | "to ready queue at %llu\n", | 304 | "[inh_task: %s/%d (%llu, %llu %llu)] " |
305 | new->comm, new->pid, | 305 | "rel=%llu to ready queue at %llu\n", |
306 | get_exec_cost(new), get_rt_period(new), get_rt_relative_deadline(new), | 306 | new->comm, new->pid, get_exec_cost(new), get_rt_period(new), get_rt_relative_deadline(new), |
307 | (tsk_rt(new)->inh_task) ? tsk_rt(new)->inh_task->comm : "(nil)", | ||
308 | (tsk_rt(new)->inh_task) ? tsk_rt(new)->inh_task->pid : 0, | ||
309 | (tsk_rt(new)->inh_task) ? get_exec_cost(tsk_rt(new)->inh_task) : 0, | ||
310 | (tsk_rt(new)->inh_task) ? get_rt_period(tsk_rt(new)->inh_task) : 0, | ||
311 | (tsk_rt(new)->inh_task) ? get_rt_relative_deadline(tsk_rt(new)->inh_task) : 0, | ||
307 | get_release(new), litmus_clock()); | 312 | get_release(new), litmus_clock()); |
308 | 313 | ||
309 | BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); | 314 | BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index d98de4579394..f030f027b486 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -1086,9 +1086,10 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
1086 | 1086 | ||
1087 | 1087 | ||
1088 | /* called with IRQs off */ | 1088 | /* called with IRQs off */ |
1089 | static void __increase_priority_inheritance(struct task_struct* t, | 1089 | static int __increase_priority_inheritance(struct task_struct* t, |
1090 | struct task_struct* prio_inh) | 1090 | struct task_struct* prio_inh) |
1091 | { | 1091 | { |
1092 | int success = 1; | ||
1092 | int linked_on; | 1093 | int linked_on; |
1093 | int check_preempt = 0; | 1094 | int check_preempt = 0; |
1094 | 1095 | ||
@@ -1166,8 +1167,10 @@ static void __increase_priority_inheritance(struct task_struct* t, | |||
1166 | (prio_inh) ? prio_inh->comm : "nil", | 1167 | (prio_inh) ? prio_inh->comm : "nil", |
1167 | (prio_inh) ? prio_inh->pid : -1); | 1168 | (prio_inh) ? prio_inh->pid : -1); |
1168 | WARN_ON(!prio_inh); | 1169 | WARN_ON(!prio_inh); |
1170 | success = 0; | ||
1169 | } | 1171 | } |
1170 | #endif | 1172 | #endif |
1173 | return success; | ||
1171 | } | 1174 | } |
1172 | 1175 | ||
1173 | /* called with IRQs off */ | 1176 | /* called with IRQs off */ |
@@ -1204,9 +1207,10 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1204 | } | 1207 | } |
1205 | 1208 | ||
1206 | /* called with IRQs off */ | 1209 | /* called with IRQs off */ |
1207 | static void __decrease_priority_inheritance(struct task_struct* t, | 1210 | static int __decrease_priority_inheritance(struct task_struct* t, |
1208 | struct task_struct* prio_inh) | 1211 | struct task_struct* prio_inh) |
1209 | { | 1212 | { |
1213 | int success = 1; | ||
1210 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1214 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1211 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 1215 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
1212 | #endif | 1216 | #endif |
@@ -1254,8 +1258,10 @@ static void __decrease_priority_inheritance(struct task_struct* t, | |||
1254 | effective_priority(t)->comm, effective_priority(t)->pid, | 1258 | effective_priority(t)->comm, effective_priority(t)->pid, |
1255 | (prio_inh) ? prio_inh->comm : "nil", | 1259 | (prio_inh) ? prio_inh->comm : "nil", |
1256 | (prio_inh) ? prio_inh->pid : -1); | 1260 | (prio_inh) ? prio_inh->pid : -1); |
1261 | success = 0; | ||
1257 | } | 1262 | } |
1258 | #endif | 1263 | #endif |
1264 | return success; | ||
1259 | } | 1265 | } |
1260 | 1266 | ||
1261 | static void decrease_priority_inheritance(struct task_struct* t, | 1267 | static void decrease_priority_inheritance(struct task_struct* t, |
@@ -1812,6 +1818,8 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
1812 | .allocate_lock = cedf_allocate_lock, | 1818 | .allocate_lock = cedf_allocate_lock, |
1813 | .increase_prio = increase_priority_inheritance, | 1819 | .increase_prio = increase_priority_inheritance, |
1814 | .decrease_prio = decrease_priority_inheritance, | 1820 | .decrease_prio = decrease_priority_inheritance, |
1821 | .__increase_prio = __increase_priority_inheritance, | ||
1822 | .__decrease_prio = __decrease_priority_inheritance, | ||
1815 | #endif | 1823 | #endif |
1816 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1824 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1817 | .nested_increase_prio = nested_increase_priority_inheritance, | 1825 | .nested_increase_prio = nested_increase_priority_inheritance, |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 83b2f04b1532..5b8ca6698423 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #ifdef CONFIG_LITMUS_LOCKING | 30 | #ifdef CONFIG_LITMUS_LOCKING |
31 | #include <litmus/kfmlp_lock.h> | 31 | #include <litmus/kfmlp_lock.h> |
32 | #include <litmus/aux_tasks.h> | ||
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 35 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
@@ -295,11 +296,37 @@ static noinline void requeue(struct task_struct* task) | |||
295 | /* sanity check before insertion */ | 296 | /* sanity check before insertion */ |
296 | BUG_ON(is_queued(task)); | 297 | BUG_ON(is_queued(task)); |
297 | 298 | ||
298 | if (is_released(task, litmus_clock())) | 299 | if (is_released(task, litmus_clock())) { |
299 | __add_ready(&gsnedf, task); | 300 | |
301 | if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { | ||
302 | /* aux_task probably transitioned to real-time while it was blocked */ | ||
303 | TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); | ||
304 | } | ||
305 | else { | ||
306 | __add_ready(&gsnedf, task); | ||
307 | |||
308 | #if 0 | ||
309 | if (tsk_rt(task)->has_aux_tasks) { | ||
310 | |||
311 | TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); | ||
312 | /* allow it's prio inheritance to act on aux threads */ | ||
313 | enable_aux_task_owner(task); | ||
314 | } | ||
315 | #endif | ||
316 | } | ||
317 | } | ||
300 | else { | 318 | else { |
301 | /* it has got to wait */ | 319 | /* it has got to wait */ |
302 | add_release(&gsnedf, task); | 320 | add_release(&gsnedf, task); |
321 | |||
322 | #if 0 | ||
323 | if (tsk_rt(task)->has_aux_tasks) { | ||
324 | |||
325 | TRACE_CUR("%s/%d is waiting for release and has aux tasks.\n", task->comm, task->pid); | ||
326 | /* prevent prio inheritance from acting while it's not ready */ | ||
327 | disable_aux_task_owner(task); | ||
328 | } | ||
329 | #endif | ||
303 | } | 330 | } |
304 | } | 331 | } |
305 | 332 | ||
@@ -366,10 +393,45 @@ static noinline void gsnedf_job_arrival(struct task_struct* task) | |||
366 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | 393 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) |
367 | { | 394 | { |
368 | unsigned long flags; | 395 | unsigned long flags; |
396 | //struct bheap_node* node; | ||
369 | 397 | ||
370 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 398 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
371 | 399 | ||
400 | #if 0 | ||
401 | node = tasks->head; | ||
402 | while(node) { | ||
403 | struct task_struct *task = bheap2task(node); | ||
404 | |||
405 | if (tsk_rt(task)->has_aux_tasks) { | ||
406 | |||
407 | TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); | ||
408 | |||
409 | /* allow it's prio inheritance to act on aux threads */ | ||
410 | enable_aux_task_owner(task); | ||
411 | } | ||
412 | |||
413 | /* pre-order sub-tree traversal */ | ||
414 | if (node->child) { | ||
415 | /* go down */ | ||
416 | node = node->child; | ||
417 | } | ||
418 | else if(node->parent && node->parent->next) { | ||
419 | /* go up a level and across */ | ||
420 | node = node->parent->next; | ||
421 | } | ||
422 | else if(!node->parent && node->next) { | ||
423 | /* go to the next binomial tree */ | ||
424 | node = node->next; | ||
425 | } | ||
426 | else { | ||
427 | /* the end! */ | ||
428 | node = NULL; | ||
429 | } | ||
430 | } | ||
431 | #endif | ||
432 | |||
372 | __merge_ready(rt, tasks); | 433 | __merge_ready(rt, tasks); |
434 | |||
373 | check_for_preemptions(); | 435 | check_for_preemptions(); |
374 | 436 | ||
375 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 437 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
@@ -387,11 +449,12 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
387 | #endif | 449 | #endif |
388 | 450 | ||
389 | TRACE_TASK(t, "job_completion().\n"); | 451 | TRACE_TASK(t, "job_completion().\n"); |
390 | 452 | ||
391 | /* set flags */ | 453 | /* set flags */ |
392 | set_rt_flags(t, RT_F_SLEEP); | 454 | set_rt_flags(t, RT_F_SLEEP); |
393 | /* prepare for next period */ | 455 | /* prepare for next period */ |
394 | prepare_for_next_period(t); | 456 | prepare_for_next_period(t); |
457 | |||
395 | if (is_released(t, litmus_clock())) | 458 | if (is_released(t, litmus_clock())) |
396 | sched_trace_task_release(t); | 459 | sched_trace_task_release(t); |
397 | /* unlink */ | 460 | /* unlink */ |
@@ -902,8 +965,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
902 | else if (exists && !next) | 965 | else if (exists && !next) |
903 | TRACE("becomes idle at %llu.\n", litmus_clock()); | 966 | TRACE("becomes idle at %llu.\n", litmus_clock()); |
904 | #endif | 967 | #endif |
905 | 968 | ||
906 | |||
907 | return next; | 969 | return next; |
908 | } | 970 | } |
909 | 971 | ||
@@ -997,13 +1059,18 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
997 | set_rt_flags(task, RT_F_RUNNING); | 1059 | set_rt_flags(task, RT_F_RUNNING); |
998 | #endif | 1060 | #endif |
999 | 1061 | ||
1062 | if (tsk_rt(task)->has_aux_tasks) { | ||
1063 | |||
1064 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); | ||
1065 | disable_aux_task_owner(task); | ||
1066 | } | ||
1067 | |||
1000 | gsnedf_job_arrival(task); | 1068 | gsnedf_job_arrival(task); |
1001 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1069 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1002 | } | 1070 | } |
1003 | 1071 | ||
1004 | static void gsnedf_task_block(struct task_struct *t) | 1072 | static void gsnedf_task_block(struct task_struct *t) |
1005 | { | 1073 | { |
1006 | // TODO: is this called on preemption?? | ||
1007 | unsigned long flags; | 1074 | unsigned long flags; |
1008 | 1075 | ||
1009 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 1076 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
@@ -1013,6 +1080,12 @@ static void gsnedf_task_block(struct task_struct *t) | |||
1013 | 1080 | ||
1014 | unlink(t); | 1081 | unlink(t); |
1015 | 1082 | ||
1083 | if (tsk_rt(t)->has_aux_tasks) { | ||
1084 | |||
1085 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); | ||
1086 | enable_aux_task_owner(t); | ||
1087 | } | ||
1088 | |||
1016 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1089 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1017 | 1090 | ||
1018 | BUG_ON(!is_realtime(t)); | 1091 | BUG_ON(!is_realtime(t)); |
@@ -1027,8 +1100,22 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
1027 | gsnedf_change_prio_pai_tasklet(t, NULL); | 1100 | gsnedf_change_prio_pai_tasklet(t, NULL); |
1028 | #endif | 1101 | #endif |
1029 | 1102 | ||
1103 | #ifdef CONFIG_LITMUS_LOCKING | ||
1104 | if (tsk_rt(t)->is_aux_task) { | ||
1105 | exit_aux_task(t); /* cannot be called with gsnedf_lock held */ | ||
1106 | } | ||
1107 | #endif | ||
1108 | |||
1030 | /* unlink if necessary */ | 1109 | /* unlink if necessary */ |
1031 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1110 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1111 | |||
1112 | #ifdef CONFIG_LITMUS_LOCKING | ||
1113 | /* make sure we clean up on our way out */ | ||
1114 | if(tsk_rt(t)->has_aux_tasks) { | ||
1115 | disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ | ||
1116 | } | ||
1117 | #endif | ||
1118 | |||
1032 | unlink(t); | 1119 | unlink(t); |
1033 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 1120 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
1034 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 1121 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
@@ -1037,7 +1124,7 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
1037 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1124 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1038 | 1125 | ||
1039 | BUG_ON(!is_realtime(t)); | 1126 | BUG_ON(!is_realtime(t)); |
1040 | TRACE_TASK(t, "RIP\n"); | 1127 | TRACE_TASK(t, "RIP\n"); |
1041 | } | 1128 | } |
1042 | 1129 | ||
1043 | 1130 | ||
@@ -1061,12 +1148,20 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
1061 | #include <litmus/fdso.h> | 1148 | #include <litmus/fdso.h> |
1062 | 1149 | ||
1063 | /* called with IRQs off */ | 1150 | /* called with IRQs off */ |
1064 | static void __increase_priority_inheritance(struct task_struct* t, | 1151 | static int __increase_priority_inheritance(struct task_struct* t, |
1065 | struct task_struct* prio_inh) | 1152 | struct task_struct* prio_inh) |
1066 | { | 1153 | { |
1154 | int success = 1; | ||
1067 | int linked_on; | 1155 | int linked_on; |
1068 | int check_preempt = 0; | 1156 | int check_preempt = 0; |
1069 | 1157 | ||
1158 | if (prio_inh && prio_inh == effective_priority(t)) { | ||
1159 | /* relationship already established. */ | ||
1160 | TRACE_TASK(t, "already has effective priority of %s/%d\n", | ||
1161 | prio_inh->comm, prio_inh->pid); | ||
1162 | goto out; | ||
1163 | } | ||
1164 | |||
1070 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1165 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1071 | /* this sanity check allows for weaker locking in protocols */ | 1166 | /* this sanity check allows for weaker locking in protocols */ |
1072 | /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ | 1167 | /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ |
@@ -1126,28 +1221,40 @@ static void __increase_priority_inheritance(struct task_struct* t, | |||
1126 | &gsnedf.ready_queue); | 1221 | &gsnedf.ready_queue); |
1127 | check_for_preemptions(); | 1222 | check_for_preemptions(); |
1128 | } | 1223 | } |
1224 | |||
1225 | |||
1226 | /* propagate to aux tasks */ | ||
1227 | if (tsk_rt(t)->has_aux_tasks) { | ||
1228 | aux_task_owner_increase_priority(t); | ||
1229 | } | ||
1129 | } | 1230 | } |
1130 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1231 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1131 | } | 1232 | } |
1132 | else { | 1233 | else { |
1133 | TRACE_TASK(t, "Spurious invalid priority increase. " | 1234 | TRACE_TASK(t, "Spurious invalid priority increase. " |
1134 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | 1235 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" |
1135 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | 1236 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", |
1136 | t->comm, t->pid, | 1237 | t->comm, t->pid, |
1137 | effective_priority(t)->comm, effective_priority(t)->pid, | 1238 | effective_priority(t)->comm, effective_priority(t)->pid, |
1138 | (prio_inh) ? prio_inh->comm : "nil", | 1239 | (prio_inh) ? prio_inh->comm : "nil", |
1139 | (prio_inh) ? prio_inh->pid : -1); | 1240 | (prio_inh) ? prio_inh->pid : -1); |
1140 | WARN_ON(!prio_inh); | 1241 | WARN_ON(!prio_inh); |
1242 | success = 0; | ||
1141 | } | 1243 | } |
1142 | #endif | 1244 | #endif |
1245 | |||
1246 | out: | ||
1247 | return success; | ||
1143 | } | 1248 | } |
1144 | 1249 | ||
1145 | /* called with IRQs off */ | 1250 | /* called with IRQs off */ |
1146 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 1251 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
1147 | { | 1252 | { |
1253 | int success; | ||
1254 | |||
1148 | raw_spin_lock(&gsnedf_lock); | 1255 | raw_spin_lock(&gsnedf_lock); |
1149 | 1256 | ||
1150 | __increase_priority_inheritance(t, prio_inh); | 1257 | success = __increase_priority_inheritance(t, prio_inh); |
1151 | 1258 | ||
1152 | #ifdef CONFIG_LITMUS_SOFTIRQD | 1259 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1153 | if(tsk_rt(t)->cur_klitirqd != NULL) | 1260 | if(tsk_rt(t)->cur_klitirqd != NULL) |
@@ -1160,7 +1267,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1160 | #endif | 1267 | #endif |
1161 | 1268 | ||
1162 | raw_spin_unlock(&gsnedf_lock); | 1269 | raw_spin_unlock(&gsnedf_lock); |
1163 | 1270 | ||
1164 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1271 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
1165 | if(tsk_rt(t)->held_gpus) { | 1272 | if(tsk_rt(t)->held_gpus) { |
1166 | int i; | 1273 | int i; |
@@ -1175,9 +1282,19 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1175 | 1282 | ||
1176 | 1283 | ||
1177 | /* called with IRQs off */ | 1284 | /* called with IRQs off */ |
1178 | static void __decrease_priority_inheritance(struct task_struct* t, | 1285 | static int __decrease_priority_inheritance(struct task_struct* t, |
1179 | struct task_struct* prio_inh) | 1286 | struct task_struct* prio_inh) |
1180 | { | 1287 | { |
1288 | int success = 1; | ||
1289 | |||
1290 | if (prio_inh == tsk_rt(t)->inh_task) { | ||
1291 | /* relationship already established. */ | ||
1292 | TRACE_TASK(t, "already inherits priority from %s/%d\n", | ||
1293 | (prio_inh) ? prio_inh->comm : "(nil)", | ||
1294 | (prio_inh) ? prio_inh->pid : 0); | ||
1295 | goto out; | ||
1296 | } | ||
1297 | |||
1181 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1298 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1182 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 1299 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
1183 | #endif | 1300 | #endif |
@@ -1214,6 +1331,11 @@ static void __decrease_priority_inheritance(struct task_struct* t, | |||
1214 | } | 1331 | } |
1215 | raw_spin_unlock(&gsnedf.release_lock); | 1332 | raw_spin_unlock(&gsnedf.release_lock); |
1216 | } | 1333 | } |
1334 | |||
1335 | /* propagate to aux tasks */ | ||
1336 | if (tsk_rt(t)->has_aux_tasks) { | ||
1337 | aux_task_owner_decrease_priority(t); | ||
1338 | } | ||
1217 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1339 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1218 | } | 1340 | } |
1219 | else { | 1341 | else { |
@@ -1224,16 +1346,23 @@ static void __decrease_priority_inheritance(struct task_struct* t, | |||
1224 | effective_priority(t)->comm, effective_priority(t)->pid, | 1346 | effective_priority(t)->comm, effective_priority(t)->pid, |
1225 | (prio_inh) ? prio_inh->comm : "nil", | 1347 | (prio_inh) ? prio_inh->comm : "nil", |
1226 | (prio_inh) ? prio_inh->pid : -1); | 1348 | (prio_inh) ? prio_inh->pid : -1); |
1349 | success = 0; | ||
1227 | } | 1350 | } |
1228 | #endif | 1351 | #endif |
1352 | |||
1353 | out: | ||
1354 | return success; | ||
1229 | } | 1355 | } |
1230 | 1356 | ||
1231 | static void decrease_priority_inheritance(struct task_struct* t, | 1357 | static void decrease_priority_inheritance(struct task_struct* t, |
1232 | struct task_struct* prio_inh) | 1358 | struct task_struct* prio_inh) |
1233 | { | 1359 | { |
1360 | int success; | ||
1361 | |||
1234 | raw_spin_lock(&gsnedf_lock); | 1362 | raw_spin_lock(&gsnedf_lock); |
1235 | __decrease_priority_inheritance(t, prio_inh); | 1363 | |
1236 | 1364 | success = __decrease_priority_inheritance(t, prio_inh); | |
1365 | |||
1237 | #ifdef CONFIG_LITMUS_SOFTIRQD | 1366 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1238 | if(tsk_rt(t)->cur_klitirqd != NULL) | 1367 | if(tsk_rt(t)->cur_klitirqd != NULL) |
1239 | { | 1368 | { |
@@ -1245,7 +1374,7 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1245 | #endif | 1374 | #endif |
1246 | 1375 | ||
1247 | raw_spin_unlock(&gsnedf_lock); | 1376 | raw_spin_unlock(&gsnedf_lock); |
1248 | 1377 | ||
1249 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1378 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
1250 | if(tsk_rt(t)->held_gpus) { | 1379 | if(tsk_rt(t)->held_gpus) { |
1251 | int i; | 1380 | int i; |
@@ -1828,6 +1957,8 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1828 | .allocate_lock = gsnedf_allocate_lock, | 1957 | .allocate_lock = gsnedf_allocate_lock, |
1829 | .increase_prio = increase_priority_inheritance, | 1958 | .increase_prio = increase_priority_inheritance, |
1830 | .decrease_prio = decrease_priority_inheritance, | 1959 | .decrease_prio = decrease_priority_inheritance, |
1960 | .__increase_prio = __increase_priority_inheritance, | ||
1961 | .__decrease_prio = __decrease_priority_inheritance, | ||
1831 | #endif | 1962 | #endif |
1832 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1963 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1833 | .nested_increase_prio = nested_increase_priority_inheritance, | 1964 | .nested_increase_prio = nested_increase_priority_inheritance, |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 245e41c25a5d..d24c9167cff8 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -137,6 +137,18 @@ static void litmus_dummy_increase_prio(struct task_struct* t, struct task_struct | |||
137 | static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh) | 137 | static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh) |
138 | { | 138 | { |
139 | } | 139 | } |
140 | |||
141 | static int litmus_dummy___increase_prio(struct task_struct* t, struct task_struct* prio_inh) | ||
142 | { | ||
143 | TRACE_CUR("WARNING: Dummy litmus_dummy___increase_prio called!\n"); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struct* prio_inh) | ||
148 | { | ||
149 | TRACE_CUR("WARNING: Dummy litmus_dummy___decrease_prio called!\n"); | ||
150 | return 0; | ||
151 | } | ||
140 | #endif | 152 | #endif |
141 | 153 | ||
142 | #ifdef CONFIG_LITMUS_SOFTIRQD | 154 | #ifdef CONFIG_LITMUS_SOFTIRQD |
@@ -227,6 +239,8 @@ struct sched_plugin linux_sched_plugin = { | |||
227 | .allocate_lock = litmus_dummy_allocate_lock, | 239 | .allocate_lock = litmus_dummy_allocate_lock, |
228 | .increase_prio = litmus_dummy_increase_prio, | 240 | .increase_prio = litmus_dummy_increase_prio, |
229 | .decrease_prio = litmus_dummy_decrease_prio, | 241 | .decrease_prio = litmus_dummy_decrease_prio, |
242 | .__increase_prio = litmus_dummy___increase_prio, | ||
243 | .__decrease_prio = litmus_dummy___decrease_prio, | ||
230 | #endif | 244 | #endif |
231 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 245 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
232 | .nested_increase_prio = litmus_dummy_nested_increase_prio, | 246 | .nested_increase_prio = litmus_dummy_nested_increase_prio, |
@@ -289,6 +303,8 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
289 | CHECK(allocate_lock); | 303 | CHECK(allocate_lock); |
290 | CHECK(increase_prio); | 304 | CHECK(increase_prio); |
291 | CHECK(decrease_prio); | 305 | CHECK(decrease_prio); |
306 | CHECK(__increase_prio); | ||
307 | CHECK(__decrease_prio); | ||
292 | #endif | 308 | #endif |
293 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 309 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
294 | CHECK(nested_increase_prio); | 310 | CHECK(nested_increase_prio); |