diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-01-21 20:00:29 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-01-21 20:00:29 -0500 |
commit | 95717fa9f4b9f725928e898c42fb0e711e896311 (patch) | |
tree | 6c552bd879cdafcd90b6bdacdc167338dd3189bf | |
parent | 84aa3706d63edda13560ff812740cac0adf744e1 (diff) |
Fixed case where blocked tasks are released.
Fixed bug where AUX tasks were being added to the
ready queue while those AUX tasks were actually blocked.
Bug stems from the fact that the AUX tasks do not
make themselves realtime, but another thread does this
instead. Also fixed minor bugs elsewhere.
NOTE: ONLY FIXES C-EDF. OTHER PLUGINS REMAIN TO BE FIXED.
-rw-r--r-- | include/litmus/litmus.h | 13 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 23 | ||||
-rw-r--r-- | litmus/edf_common.c | 114 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 3 | ||||
-rw-r--r-- | litmus/jobs.c | 10 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 4 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 62 | ||||
-rw-r--r-- | litmus/sched_trace.c | 1 |
8 files changed, 124 insertions, 106 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 2da61fa58bdc..17d30326034c 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -85,6 +85,19 @@ static inline lt_t litmus_clock(void) | |||
85 | return ktime_to_ns(ktime_get()); | 85 | return ktime_to_ns(ktime_get()); |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline int is_persistent(struct task_struct* t) | ||
89 | { | ||
90 | int is_per = ( 0 | ||
91 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
92 | || t->rt_param.is_aux_task | ||
93 | #endif | ||
94 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
95 | || t->rt_param.is_interrupt_thread | ||
96 | #endif | ||
97 | ); | ||
98 | return is_per; | ||
99 | } | ||
100 | |||
88 | /* A macro to convert from nanoseconds to ktime_t. */ | 101 | /* A macro to convert from nanoseconds to ktime_t. */ |
89 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | 102 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) |
90 | 103 | ||
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index aa851ab2655b..46217e78b1ec 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -67,11 +67,11 @@ int exit_aux_task(struct task_struct *t) | |||
67 | return retval; | 67 | return retval; |
68 | } | 68 | } |
69 | 69 | ||
70 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | ||
70 | static int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp) | 71 | static int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp) |
71 | { | 72 | { |
72 | int retval = 0; | 73 | int retval = 0; |
73 | 74 | ||
74 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | ||
75 | struct list_head *pos; | 75 | struct list_head *pos; |
76 | 76 | ||
77 | TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | 77 | TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); |
@@ -94,7 +94,6 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s | |||
94 | retval = litmus->__increase_prio(aux, hp); | 94 | retval = litmus->__increase_prio(aux, hp); |
95 | } | 95 | } |
96 | } | 96 | } |
97 | #endif | ||
98 | 97 | ||
99 | return retval; | 98 | return retval; |
100 | } | 99 | } |
@@ -103,7 +102,6 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
103 | { | 102 | { |
104 | int retval = 0; | 103 | int retval = 0; |
105 | 104 | ||
106 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | ||
107 | struct list_head *pos; | 105 | struct list_head *pos; |
108 | 106 | ||
109 | TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | 107 | TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); |
@@ -121,11 +119,12 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
121 | retval = litmus->__decrease_prio(aux, hp); | 119 | retval = litmus->__decrease_prio(aux, hp); |
122 | } | 120 | } |
123 | } | 121 | } |
124 | #endif | ||
125 | 122 | ||
126 | return retval; | 123 | return retval; |
127 | } | 124 | } |
128 | 125 | ||
126 | #endif | ||
127 | |||
129 | int aux_task_owner_increase_priority(struct task_struct *t) | 128 | int aux_task_owner_increase_priority(struct task_struct *t) |
130 | { | 129 | { |
131 | int retval = 0; | 130 | int retval = 0; |
@@ -167,9 +166,9 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
167 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 166 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
168 | retval = aux_tasks_increase_priority(leader, hp_eff); | 167 | retval = aux_tasks_increase_priority(leader, hp_eff); |
169 | } | 168 | } |
170 | #endif | ||
171 | |||
172 | out: | 169 | out: |
170 | |||
171 | #endif | ||
173 | return retval; | 172 | return retval; |
174 | } | 173 | } |
175 | 174 | ||
@@ -215,9 +214,9 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
215 | retval = aux_tasks_decrease_priority(leader, hp_eff); | 214 | retval = aux_tasks_decrease_priority(leader, hp_eff); |
216 | } | 215 | } |
217 | } | 216 | } |
218 | #endif | ||
219 | |||
220 | out: | 217 | out: |
218 | |||
219 | #endif | ||
221 | return retval; | 220 | return retval; |
222 | } | 221 | } |
223 | 222 | ||
@@ -305,9 +304,9 @@ long enable_aux_task_owner(struct task_struct *t) | |||
305 | retval = aux_tasks_increase_priority(leader, | 304 | retval = aux_tasks_increase_priority(leader, |
306 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | 305 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); |
307 | } | 306 | } |
308 | #endif | ||
309 | |||
310 | out: | 307 | out: |
308 | |||
309 | #endif | ||
311 | return retval; | 310 | return retval; |
312 | } | 311 | } |
313 | 312 | ||
@@ -356,9 +355,9 @@ long disable_aux_task_owner(struct task_struct *t) | |||
356 | 355 | ||
357 | retval = aux_tasks_decrease_priority(leader, to_inh); | 356 | retval = aux_tasks_decrease_priority(leader, to_inh); |
358 | } | 357 | } |
359 | #endif | ||
360 | |||
361 | out: | 358 | out: |
359 | |||
360 | #endif | ||
362 | return retval; | 361 | return retval; |
363 | } | 362 | } |
364 | 363 | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 441fbfddf0c2..ef22eb93dbf3 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -67,11 +67,14 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | |||
71 | /* check for NULL tasks */ | 70 | /* check for NULL tasks */ |
72 | if (!first || !second) { | 71 | if (!first || !second) { |
73 | return first && !second; | 72 | return first && !second; |
74 | } | 73 | } |
74 | /* check for non-realtime */ | ||
75 | if (!is_realtime(first) || !is_realtime(second)) { | ||
76 | return is_realtime(first) && !is_realtime(second); | ||
77 | } | ||
75 | 78 | ||
76 | /* There is some goofy stuff in this code here. There are three subclasses | 79 | /* There is some goofy stuff in this code here. There are three subclasses |
77 | * within the SCHED_LITMUS scheduling class: | 80 | * within the SCHED_LITMUS scheduling class: |
@@ -92,49 +95,31 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
92 | 95 | ||
93 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_BOOSTED) | 96 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_BOOSTED) |
94 | /* run aux tasks at max priority */ | 97 | /* run aux tasks at max priority */ |
95 | /* TODO: Actually use prio-boosting. */ | 98 | if (tsk_rt(first)->is_aux_task != tsk_rt(second)->is_aux_task) { |
96 | if (first->rt_param.is_aux_task != second->rt_param.is_aux_task) | 99 | return (tsk_rt(first)->is_aux_task > tsk_rt(second)->is_aux_task); |
97 | { | ||
98 | return (first->rt_param.is_aux_task > second->rt_param.is_aux_task); | ||
99 | } | ||
100 | else if(first->rt_param.is_aux_task && second->rt_param.is_aux_task) | ||
101 | { | ||
102 | if(first->group_leader == second->group_leader) { | ||
103 | TRACE_CUR("aux tie break!\n"); // tie-break by BASE priority of the aux tasks | ||
104 | goto aux_tie_break; | ||
105 | } | ||
106 | first = first->group_leader; | ||
107 | second = second->group_leader; | ||
108 | } | 100 | } |
109 | #elif defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) | 101 | #elif defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) |
110 | { | 102 | { |
111 | int first_lo_aux = first->rt_param.is_aux_task && !first->rt_param.inh_task; | 103 | int first_lo_aux = tsk_rt(first)->is_aux_task && !tsk_rt(first)->inh_task; |
112 | int second_lo_aux = second->rt_param.is_aux_task && !second->rt_param.inh_task; | 104 | int second_lo_aux = tsk_rt(second)->is_aux_task && !tsk_rt(second)->inh_task; |
113 | 105 | ||
114 | /* prioritize aux tasks without inheritance below real-time tasks */ | 106 | /* prioritize aux tasks without inheritance below real-time tasks */ |
115 | if (first_lo_aux || second_lo_aux) { | 107 | if (first_lo_aux || second_lo_aux) { |
116 | // one of these is an aux task without inheritance. | 108 | // one of these is an aux task without inheritance. |
117 | if(first_lo_aux && second_lo_aux) { | 109 | if (first_lo_aux != second_lo_aux) { |
118 | TRACE_CUR("aux tie break!\n"); // tie-break by BASE priority of the aux tasks | 110 | int temp = (first_lo_aux < second_lo_aux); // non-lo-aux has higher priority. |
119 | goto aux_tie_break; | ||
120 | } | ||
121 | else { | ||
122 | |||
123 | // make the aux thread lowest priority real-time task | ||
124 | int temp = 0; | ||
125 | if (first_lo_aux && is_realtime(second)) { | ||
126 | // temp = 0; | ||
127 | } | ||
128 | else if(second_lo_aux && is_realtime(first)) { | ||
129 | temp = 1; | ||
130 | } | ||
131 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); | 111 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); |
132 | return temp; | 112 | return temp; |
133 | } | 113 | } |
114 | else { | ||
115 | /* both MUST be lo_aux. tie-break. */ | ||
116 | TRACE_CUR("aux tie break!\n"); | ||
117 | goto aux_tie_break; | ||
118 | } | ||
134 | } | 119 | } |
135 | 120 | ||
136 | if (first->rt_param.is_aux_task && second->rt_param.is_aux_task && | 121 | if (tsk_rt(first)->is_aux_task && tsk_rt(second)->is_aux_task && |
137 | first->rt_param.inh_task == second->rt_param.inh_task) { | 122 | tsk_rt(first)->inh_task == tsk_rt(second)->inh_task) { |
138 | // inh_task is !NULL for both tasks since neither was a lo_aux task. | 123 | // inh_task is !NULL for both tasks since neither was a lo_aux task. |
139 | // Both aux tasks inherit from the same task, so tie-break | 124 | // Both aux tasks inherit from the same task, so tie-break |
140 | // by base priority of the aux tasks. | 125 | // by base priority of the aux tasks. |
@@ -146,33 +131,26 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
146 | 131 | ||
147 | #ifdef CONFIG_LITMUS_SOFTIRQD | 132 | #ifdef CONFIG_LITMUS_SOFTIRQD |
148 | { | 133 | { |
149 | int first_lo_klmirqd = first->rt_param.is_interrupt_thread && !first->rt_param.inh_task; | 134 | int first_lo_klmirqd = tsk_rt(first)->is_interrupt_thread && !tsk_rt(first)->inh_task; |
150 | int second_lo_klmirqd = second->rt_param.is_interrupt_thread && !second->rt_param.inh_task; | 135 | int second_lo_klmirqd = tsk_rt(second)->is_interrupt_thread && !tsk_rt(second)->inh_task; |
151 | 136 | ||
152 | /* prioritize aux tasks without inheritance below real-time tasks */ | 137 | /* prioritize aux tasks without inheritance below real-time tasks */ |
153 | if (first_lo_klmirqd || second_lo_klmirqd) { | 138 | if (first_lo_klmirqd || second_lo_klmirqd) { |
154 | // one of these is an klmirqd thread without inheritance. | 139 | // one of these is an klmirqd thread without inheritance. |
155 | if(first_lo_klmirqd && second_lo_klmirqd) { | 140 | if (first_lo_klmirqd != second_lo_klmirqd) { |
156 | TRACE_CUR("klmirqd tie break!\n"); // tie-break by BASE priority of the aux tasks | 141 | int temp = (first_lo_klmirqd < second_lo_klmirqd); // non-klmirqd has higher priority |
157 | goto klmirqd_tie_break; | ||
158 | } | ||
159 | else { | ||
160 | // make the klmirqd thread the lowest-priority real-time task | ||
161 | // but (above low-prio aux tasks and Linux tasks) | ||
162 | int temp = 0; | ||
163 | if (first_lo_klmirqd && is_realtime(second)) { | ||
164 | // temp = 0; | ||
165 | } | ||
166 | else if(second_lo_klmirqd && is_realtime(first)) { | ||
167 | temp = 1; | ||
168 | } | ||
169 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); | 142 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); |
170 | return temp; | 143 | return temp; |
171 | } | 144 | } |
145 | else { | ||
146 | /* both MUST be klmirqd. tie-break. */ | ||
147 | TRACE_CUR("klmirqd tie break!\n"); | ||
148 | goto klmirqd_tie_break; | ||
149 | } | ||
172 | } | 150 | } |
173 | 151 | ||
174 | if (first->rt_param.is_interrupt_thread && second->rt_param.is_interrupt_thread && | 152 | if (tsk_rt(first)->is_interrupt_thread && tsk_rt(second)->is_interrupt_thread && |
175 | first->rt_param.inh_task == second->rt_param.inh_task) { | 153 | tsk_rt(first)->inh_task == tsk_rt(second)->inh_task) { |
176 | // inh_task is !NULL for both tasks since neither was a lo_klmirqd task. | 154 | // inh_task is !NULL for both tasks since neither was a lo_klmirqd task. |
177 | // Both klmirqd tasks inherit from the same task, so tie-break | 155 | // Both klmirqd tasks inherit from the same task, so tie-break |
178 | // by base priority of the klmirqd tasks. | 156 | // by base priority of the klmirqd tasks. |
@@ -187,19 +165,19 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
187 | /* Check for EFFECTIVE priorities. Change task | 165 | /* Check for EFFECTIVE priorities. Change task |
188 | * used for comparison in such a case. | 166 | * used for comparison in such a case. |
189 | */ | 167 | */ |
190 | if (unlikely(first->rt_param.inh_task) | 168 | if (unlikely(tsk_rt(first)->inh_task) |
191 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 169 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
192 | && (first_mode == EFFECTIVE) | 170 | && (first_mode == EFFECTIVE) |
193 | #endif | 171 | #endif |
194 | ) { | 172 | ) { |
195 | first_task = first->rt_param.inh_task; | 173 | first_task = tsk_rt(first)->inh_task; |
196 | } | 174 | } |
197 | if (unlikely(second->rt_param.inh_task) | 175 | if (unlikely(tsk_rt(second)->inh_task) |
198 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 176 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
199 | && (second_mode == EFFECTIVE) | 177 | && (second_mode == EFFECTIVE) |
200 | #endif | 178 | #endif |
201 | ) { | 179 | ) { |
202 | second_task = second->rt_param.inh_task; | 180 | second_task = tsk_rt(second)->inh_task; |
203 | } | 181 | } |
204 | 182 | ||
205 | /* Check for priority boosting. Tie-break by start of boosting. | 183 | /* Check for priority boosting. Tie-break by start of boosting. |
@@ -222,17 +200,14 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
222 | 200 | ||
223 | #endif | 201 | #endif |
224 | 202 | ||
225 | #ifdef CONFIG_REALTIME_AUX_TASKS | 203 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE |
226 | aux_tie_break: | 204 | aux_tie_break: |
227 | #endif | 205 | #endif |
228 | #ifdef CONFIG_LITMUS_SOFTIRQD | 206 | #ifdef CONFIG_LITMUS_SOFTIRQD |
229 | klmirqd_tie_break: | 207 | klmirqd_tie_break: |
230 | #endif | 208 | #endif |
231 | 209 | ||
232 | if (!is_realtime(second_task)) { | 210 | if (earlier_deadline(first_task, second_task)) { |
233 | return 1; | ||
234 | } | ||
235 | else if (earlier_deadline(first_task, second_task)) { | ||
236 | return 1; | 211 | return 1; |
237 | } | 212 | } |
238 | else if (get_deadline(first_task) == get_deadline(second_task)) { | 213 | else if (get_deadline(first_task) == get_deadline(second_task)) { |
@@ -297,10 +272,10 @@ klmirqd_tie_break: | |||
297 | } | 272 | } |
298 | else if (first_task->pid == second_task->pid) { | 273 | else if (first_task->pid == second_task->pid) { |
299 | #ifdef CONFIG_LITMUS_SOFTIRQD | 274 | #ifdef CONFIG_LITMUS_SOFTIRQD |
300 | if (first_task->rt_param.is_interrupt_thread < second_task->rt_param.is_interrupt_thread) { | 275 | if (tsk_rt(first_task)->is_interrupt_thread < tsk_rt(second_task)->is_interrupt_thread) { |
301 | return 1; | 276 | return 1; |
302 | } | 277 | } |
303 | else if (first_task->rt_param.is_interrupt_thread == second_task->rt_param.is_interrupt_thread) { | 278 | else if (tsk_rt(first_task)->is_interrupt_thread == tsk_rt(second_task)->is_interrupt_thread) { |
304 | #endif | 279 | #endif |
305 | 280 | ||
306 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) | 281 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) |
@@ -311,7 +286,7 @@ klmirqd_tie_break: | |||
311 | #endif | 286 | #endif |
312 | 287 | ||
313 | /* Something could be wrong if you get this far. */ | 288 | /* Something could be wrong if you get this far. */ |
314 | if (unlikely(first->rt_param.inh_task == second->rt_param.inh_task)) { | 289 | if (unlikely(tsk_rt(first)->inh_task == tsk_rt(second)->inh_task)) { |
315 | /* Both tasks have the same inherited priority. | 290 | /* Both tasks have the same inherited priority. |
316 | * Likely in a bug-condition. | 291 | * Likely in a bug-condition. |
317 | */ | 292 | */ |
@@ -324,22 +299,22 @@ klmirqd_tie_break: | |||
324 | } | 299 | } |
325 | else { | 300 | else { |
326 | /* At least one task must inherit */ | 301 | /* At least one task must inherit */ |
327 | BUG_ON(!first->rt_param.inh_task && | 302 | BUG_ON(!tsk_rt(first)->inh_task && |
328 | !second->rt_param.inh_task); | 303 | !tsk_rt(second)->inh_task); |
329 | 304 | ||
330 | /* The task withOUT the inherited priority wins. */ | 305 | /* The task withOUT the inherited priority wins. */ |
331 | if (second->rt_param.inh_task) { | 306 | if (tsk_rt(second)->inh_task) { |
332 | /* | 307 | /* |
333 | * common with aux tasks. | 308 | * common with aux tasks. |
334 | TRACE_CUR("unusual comparison: " | 309 | TRACE_CUR("unusual comparison: " |
335 | "first = %s/%d first_task = %s/%d " | 310 | "first = %s/%d first_task = %s/%d " |
336 | "second = %s/%d second_task = %s/%d\n", | 311 | "second = %s/%d second_task = %s/%d\n", |
337 | first->comm, first->pid, | 312 | first->comm, first->pid, |
338 | (first->rt_param.inh_task) ? first->rt_param.inh_task->comm : "(nil)", | 313 | (tsk_rt(first)->inh_task) ? tsk_rt(first)->inh_task->comm : "(nil)", |
339 | (first->rt_param.inh_task) ? first->rt_param.inh_task->pid : 0, | 314 | (tsk_rt(first)->inh_task) ? tsk_rt(first)->inh_task->pid : 0, |
340 | second->comm, second->pid, | 315 | second->comm, second->pid, |
341 | (second->rt_param.inh_task) ? second->rt_param.inh_task->comm : "(nil)", | 316 | (tsk_rt(second)->inh_task) ? tsk_rt(second)->inh_task->comm : "(nil)", |
342 | (second->rt_param.inh_task) ? second->rt_param.inh_task->pid : 0); | 317 | (tsk_rt(second)->inh_task) ? tsk_rt(second)->inh_task->pid : 0); |
343 | */ | 318 | */ |
344 | return 1; | 319 | return 1; |
345 | } | 320 | } |
@@ -426,3 +401,4 @@ int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) | |||
426 | /* make sure to get non-rt stuff out of the way */ | 401 | /* make sure to get non-rt stuff out of the way */ |
427 | return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t); | 402 | return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t); |
428 | } | 403 | } |
404 | |||
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index aa6b659e437d..170c43fcc78e 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -870,7 +870,8 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, | |||
870 | } | 870 | } |
871 | else { | 871 | else { |
872 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", | 872 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", |
873 | new_max_eff_prio->comm, new_max_eff_prio->pid); | 873 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", |
874 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1); | ||
874 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | 875 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); |
875 | unlock_fine_irqrestore(&sem->lock, flags); | 876 | unlock_fine_irqrestore(&sem->lock, flags); |
876 | } | 877 | } |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 596b48e699b1..bdfc41004d38 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -8,6 +8,14 @@ | |||
8 | 8 | ||
9 | static inline void setup_release(struct task_struct *t, lt_t release) | 9 | static inline void setup_release(struct task_struct *t, lt_t release) |
10 | { | 10 | { |
11 | /* Shift all tasks that are actually daemons that inherit | ||
12 | * priority to be released immediatly. They need to be ready to run | ||
13 | * at _all_ times. | ||
14 | */ | ||
15 | if (unlikely(is_persistent(t))) { | ||
16 | release = litmus_clock(); | ||
17 | } | ||
18 | |||
11 | /* prepare next release */ | 19 | /* prepare next release */ |
12 | t->rt_param.job_params.release = release; | 20 | t->rt_param.job_params.release = release; |
13 | t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t); | 21 | t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t); |
@@ -40,7 +48,7 @@ void release_at(struct task_struct *t, lt_t start) | |||
40 | { | 48 | { |
41 | BUG_ON(!t); | 49 | BUG_ON(!t); |
42 | setup_release(t, start); | 50 | setup_release(t, start); |
43 | tsk_rt(t)->completed = 0; | 51 | t->rt_param.completed = 0; |
44 | } | 52 | } |
45 | 53 | ||
46 | 54 | ||
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 7160bb85ac9d..dda863009fee 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -729,6 +729,7 @@ int gpu_owner_increase_priority(struct task_struct *t) | |||
729 | binheap_decrease(&tsk_rt(t)->gpu_owner_node, ®->owners); | 729 | binheap_decrease(&tsk_rt(t)->gpu_owner_node, ®->owners); |
730 | } | 730 | } |
731 | 731 | ||
732 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
732 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | 733 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), |
733 | struct task_struct, rt_param); | 734 | struct task_struct, rt_param); |
734 | 735 | ||
@@ -738,6 +739,7 @@ int gpu_owner_increase_priority(struct task_struct *t) | |||
738 | 739 | ||
739 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); | 740 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); |
740 | } | 741 | } |
742 | #endif | ||
741 | 743 | ||
742 | out: | 744 | out: |
743 | return retval; | 745 | return retval; |
@@ -774,6 +776,7 @@ int gpu_owner_decrease_priority(struct task_struct *t) | |||
774 | binheap_add(&tsk_rt(t)->gpu_owner_node, ®->owners, | 776 | binheap_add(&tsk_rt(t)->gpu_owner_node, ®->owners, |
775 | struct rt_param, gpu_owner_node); | 777 | struct rt_param, gpu_owner_node); |
776 | 778 | ||
779 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
777 | if (hp == t) { /* t was originally the hp */ | 780 | if (hp == t) { /* t was originally the hp */ |
778 | struct task_struct *new_hp = | 781 | struct task_struct *new_hp = |
779 | container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | 782 | container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), |
@@ -784,6 +787,7 @@ int gpu_owner_decrease_priority(struct task_struct *t) | |||
784 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); | 787 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); |
785 | } | 788 | } |
786 | } | 789 | } |
790 | #endif | ||
787 | 791 | ||
788 | out: | 792 | out: |
789 | return retval; | 793 | return retval; |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 2ec919dc850c..1feb2fbe42bc 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -211,6 +211,7 @@ static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) | |||
211 | return binheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn); | 211 | return binheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn); |
212 | } | 212 | } |
213 | 213 | ||
214 | static noinline void unlink(struct task_struct* t); | ||
214 | 215 | ||
215 | /* link_task_to_cpu - Update the link of a CPU. | 216 | /* link_task_to_cpu - Update the link of a CPU. |
216 | * Handles the case where the to-be-linked task is already | 217 | * Handles the case where the to-be-linked task is already |
@@ -237,8 +238,28 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
237 | on_cpu = linked->rt_param.scheduled_on; | 238 | on_cpu = linked->rt_param.scheduled_on; |
238 | if (on_cpu != NO_CPU) { | 239 | if (on_cpu != NO_CPU) { |
239 | sched = &per_cpu(cedf_cpu_entries, on_cpu); | 240 | sched = &per_cpu(cedf_cpu_entries, on_cpu); |
240 | /* this should only happen if not linked already */ | 241 | |
241 | BUG_ON(sched->linked == linked); | 242 | BUG_ON(sched->linked == linked); |
243 | #if 0 | ||
244 | /* this should only happen if not linked already */ | ||
245 | if (sched->linked == linked) { | ||
246 | printk(KERN_EMERG "%s/%d hit sched bug. is_aux = %d; has_aux = %d\n", | ||
247 | linked->comm, linked->pid, tsk_rt(linked)->is_aux_task, tsk_rt(linked)->has_aux_tasks); | ||
248 | printk(KERN_EMERG "eCPU: %d, eLinked: %s/%d, sCPU: %d, sched: %s/%d\n", | ||
249 | entry->cpu, | ||
250 | (entry->linked) ? | ||
251 | entry->linked->comm : "nil", | ||
252 | (entry->linked) ? | ||
253 | entry->linked->pid : -1, | ||
254 | sched->cpu, | ||
255 | (sched->linked) ? | ||
256 | sched->linked->comm : "nil", | ||
257 | (sched->linked) ? | ||
258 | sched->linked->pid : -1); | ||
259 | TRACE_TASK(linked, "LINK BUG!\n"); | ||
260 | unlink(linked); | ||
261 | } | ||
262 | #endif | ||
242 | 263 | ||
243 | /* If we are already scheduled on the CPU to which we | 264 | /* If we are already scheduled on the CPU to which we |
244 | * wanted to link, we don't need to do the swap -- | 265 | * wanted to link, we don't need to do the swap -- |
@@ -957,7 +978,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
957 | cpu_entry_t* entry; | 978 | cpu_entry_t* entry; |
958 | cedf_domain_t* cluster; | 979 | cedf_domain_t* cluster; |
959 | 980 | ||
960 | TRACE("c-edf: task new %d\n", t->pid); | 981 | TRACE("c-edf: task new %d (param running = %d, is_running = %d)\n", t->pid, running, is_running(t)); |
961 | 982 | ||
962 | /* the cluster doesn't change even if t is running */ | 983 | /* the cluster doesn't change even if t is running */ |
963 | cluster = task_cpu_cluster(t); | 984 | cluster = task_cpu_cluster(t); |
@@ -986,44 +1007,41 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
986 | } else { | 1007 | } else { |
987 | t->rt_param.scheduled_on = NO_CPU; | 1008 | t->rt_param.scheduled_on = NO_CPU; |
988 | } | 1009 | } |
989 | t->rt_param.linked_on = NO_CPU; | 1010 | t->rt_param.linked_on = NO_CPU; |
990 | 1011 | ||
991 | cedf_job_arrival(t); | 1012 | if (is_running(t)) { |
1013 | cedf_job_arrival(t); | ||
1014 | } | ||
1015 | else { | ||
1016 | TRACE("Deferred job arrival because %d is blocked.\n", t->pid); | ||
1017 | unlink(t); /* needed?? */ | ||
1018 | } | ||
992 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); | 1019 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); |
993 | } | 1020 | } |
994 | 1021 | ||
995 | static void cedf_task_wake_up(struct task_struct *task) | 1022 | static void cedf_task_wake_up(struct task_struct *task) |
996 | { | 1023 | { |
997 | unsigned long flags; | 1024 | unsigned long flags; |
998 | //lt_t now; | ||
999 | cedf_domain_t *cluster; | 1025 | cedf_domain_t *cluster; |
1026 | lt_t now = litmus_clock(); | ||
1000 | 1027 | ||
1001 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 1028 | TRACE_TASK(task, "wake_up at %llu\n", now); |
1002 | 1029 | ||
1003 | cluster = task_cpu_cluster(task); | 1030 | cluster = task_cpu_cluster(task); |
1004 | 1031 | ||
1005 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 1032 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
1006 | 1033 | ||
1007 | #if 0 | 1034 | if (unlikely(is_persistent(task) && is_tardy(task, now))) { |
1008 | /* sporadic task model. will increment job numbers automatically */ | 1035 | /* treat tardy perisistent tasks as if they were sporadic |
1009 | now = litmus_clock(); | 1036 | tasks by releasing a new job if they're tardy. */ |
1010 | if (is_tardy(task, now)) { | ||
1011 | /* new sporadic release */ | ||
1012 | release_at(task, now); | 1037 | release_at(task, now); |
1013 | sched_trace_task_release(task); | 1038 | sched_trace_task_release(task); |
1014 | } | 1039 | } |
1015 | else { | 1040 | else if (task->rt.time_slice) { |
1016 | if (task->rt.time_slice) { | 1041 | /* periodic task model. don't force job to end. |
1017 | /* came back in time before deadline | 1042 | * rely on user to say when jobs complete or when budget expires. */ |
1018 | */ | 1043 | tsk_rt(task)->completed = 0; |
1019 | tsk_rt(task)->completed = 0; | ||
1020 | } | ||
1021 | } | 1044 | } |
1022 | #else | ||
1023 | /* periodic task model. don't force job to end. | ||
1024 | * rely on user to say when jobs complete or when budget expires. */ | ||
1025 | tsk_rt(task)->completed = 0; | ||
1026 | #endif | ||
1027 | 1045 | ||
1028 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1046 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1029 | if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) { | 1047 | if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) { |
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index f4171fddbbb1..d52e2a75dd96 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c | |||
@@ -75,7 +75,6 @@ void sched_trace_log_message(const char* fmt, ...) | |||
75 | va_end(args); | 75 | va_end(args); |
76 | } | 76 | } |
77 | 77 | ||
78 | |||
79 | /* | 78 | /* |
80 | * log_read - Read the trace buffer | 79 | * log_read - Read the trace buffer |
81 | * | 80 | * |