aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-01-23 18:13:11 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-01-23 18:13:11 -0500
commit821a500d34ae6b2487a2ebecc6ab276fdfde0680 (patch)
tree0033c2a08bdf02116e97788627e0d2857ad05259
parent95717fa9f4b9f725928e898c42fb0e711e896311 (diff)
Fix AUX tasks from being dropped by Litmus.
-rw-r--r--include/litmus/rt_domain.h12
-rw-r--r--include/litmus/sched_plugin.h1
-rw-r--r--kernel/sched.c7
-rw-r--r--litmus/aux_tasks.c1
-rw-r--r--litmus/edf_common.c42
-rw-r--r--litmus/ikglp_lock.c2
-rw-r--r--litmus/sched_cedf.c54
-rw-r--r--litmus/sched_plugin.c7
8 files changed, 53 insertions, 73 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index ac249292e866..961d7e7c4912 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -85,8 +85,18 @@ void __add_release(rt_domain_t* rt, struct task_struct *task);
85static inline struct task_struct* __take_ready(rt_domain_t* rt) 85static inline struct task_struct* __take_ready(rt_domain_t* rt)
86{ 86{
87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); 87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
88 if (hn) 88 if (hn) {
89 struct task_struct* taken = bheap2task(hn);
90 TRACE("rt: taking %s/%d [inh_task: %s/%d]\n",
91 (taken) ? taken->comm : "nil",
92 (taken) ? taken->pid : -1,
93 (taken && tsk_rt(taken)->inh_task) ?
94 tsk_rt(taken)->inh_task->comm : "nil",
95 (taken && tsk_rt(taken)->inh_task) ?
96 tsk_rt(taken)->inh_task->pid : -1);
97
89 return bheap2task(hn); 98 return bheap2task(hn);
99 }
90 else 100 else
91 return NULL; 101 return NULL;
92} 102}
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index d0e7d74bb45e..78cec30866ac 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -108,7 +108,6 @@ typedef int (*__higher_prio_t)(struct task_struct* a, comparison_mode_t a_mod,
108typedef int (*default_cpu_for_gpu_t)(int gpu); 108typedef int (*default_cpu_for_gpu_t)(int gpu);
109#endif 109#endif
110 110
111
112/********************* sys call backends ********************/ 111/********************* sys call backends ********************/
113/* This function causes the caller to sleep until the next release */ 112/* This function causes the caller to sleep until the next release */
114typedef long (*complete_job_t) (void); 113typedef long (*complete_job_t) (void);
diff --git a/kernel/sched.c b/kernel/sched.c
index e29a97235f26..51fb72b5af79 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2708,14 +2708,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2708 int cpu, success = 0; 2708 int cpu, success = 0;
2709 2709
2710 if (is_realtime(p)) { 2710 if (is_realtime(p)) {
2711 //WARN_ON(1);
2712 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); 2711 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state);
2713 } 2712 }
2714 2713
2715 smp_wmb(); 2714 smp_wmb();
2716 raw_spin_lock_irqsave(&p->pi_lock, flags); 2715 raw_spin_lock_irqsave(&p->pi_lock, flags);
2717 if (!(p->state & state)) 2716 if (!(p->state & state)) {
2717 if (is_realtime(p)) {
2718 TRACE_TASK(p, "try_to_wake_up() masked out, mask:%x\n", state);
2719 }
2718 goto out; 2720 goto out;
2721 }
2719 2722
2720 success = 1; /* we're going to change ->state */ 2723 success = 1; /* we're going to change ->state */
2721 cpu = task_cpu(p); 2724 cpu = task_cpu(p);
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c
index 46217e78b1ec..07907e22bc09 100644
--- a/litmus/aux_tasks.c
+++ b/litmus/aux_tasks.c
@@ -122,7 +122,6 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s
122 122
123 return retval; 123 return retval;
124} 124}
125
126#endif 125#endif
127 126
128int aux_task_owner_increase_priority(struct task_struct *t) 127int aux_task_owner_increase_priority(struct task_struct *t)
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index ef22eb93dbf3..32ee5f464ef8 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -259,8 +259,6 @@ klmirqd_tie_break:
259 } 259 }
260 pid_break = (fhash == shash); 260 pid_break = (fhash == shash);
261#else 261#else
262
263
264 /* CONFIG_EDF_PID_TIE_BREAK */ 262 /* CONFIG_EDF_PID_TIE_BREAK */
265 pid_break = 1; // fall through to tie-break by pid; 263 pid_break = 1; // fall through to tie-break by pid;
266#endif 264#endif
@@ -271,31 +269,27 @@ klmirqd_tie_break:
271 return 1; 269 return 1;
272 } 270 }
273 else if (first_task->pid == second_task->pid) { 271 else if (first_task->pid == second_task->pid) {
272 /* there is inheritance going on. consider inheritors. */
274#ifdef CONFIG_LITMUS_SOFTIRQD 273#ifdef CONFIG_LITMUS_SOFTIRQD
275 if (tsk_rt(first_task)->is_interrupt_thread < tsk_rt(second_task)->is_interrupt_thread) { 274 /* non-interrupt thread gets prio */
275 if (!tsk_rt(first)->is_interrupt_thread && tsk_rt(second)->is_interrupt_thread) {
276 return 1; 276 return 1;
277 } 277 }
278 else if (tsk_rt(first_task)->is_interrupt_thread == tsk_rt(second_task)->is_interrupt_thread) { 278 else if (tsk_rt(first)->is_interrupt_thread == tsk_rt(second)->is_interrupt_thread) {
279#endif 279#endif
280 280
281#if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) 281#if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE)
282 if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { 282 /* non-aux thread gets prio */
283 if (!tsk_rt(first)->is_aux_task && tsk_rt(second)->is_aux_task) {
283 return 1; 284 return 1;
284 } 285 }
285 else if (tsk_rt(first)->is_aux_task == tsk_rt(second)->is_aux_task) { 286 else if (tsk_rt(first_task)->is_aux_task == tsk_rt(second_task)->is_aux_task) {
286#endif 287#endif
287 288 /* if both tasks inherit from the same task */
288 /* Something could be wrong if you get this far. */ 289 if (tsk_rt(first)->inh_task == tsk_rt(second)->inh_task) {
289 if (unlikely(tsk_rt(first)->inh_task == tsk_rt(second)->inh_task)) { 290 /* TODO: Make a recurive call to edf_higher_prio,
290 /* Both tasks have the same inherited priority. 291 comparing base priorities. */
291 * Likely in a bug-condition. 292 return (first->pid < second->pid);
292 */
293 if (first->pid < second->pid) {
294 return 1;
295 }
296 else if (first->pid == second->pid) {
297 //WARN_ON(1);
298 }
299 } 293 }
300 else { 294 else {
301 /* At least one task must inherit */ 295 /* At least one task must inherit */
@@ -304,18 +298,6 @@ klmirqd_tie_break:
304 298
305 /* The task withOUT the inherited priority wins. */ 299 /* The task withOUT the inherited priority wins. */
306 if (tsk_rt(second)->inh_task) { 300 if (tsk_rt(second)->inh_task) {
307 /*
308 * common with aux tasks.
309 TRACE_CUR("unusual comparison: "
310 "first = %s/%d first_task = %s/%d "
311 "second = %s/%d second_task = %s/%d\n",
312 first->comm, first->pid,
313 (tsk_rt(first)->inh_task) ? tsk_rt(first)->inh_task->comm : "(nil)",
314 (tsk_rt(first)->inh_task) ? tsk_rt(first)->inh_task->pid : 0,
315 second->comm, second->pid,
316 (tsk_rt(second)->inh_task) ? tsk_rt(second)->inh_task->comm : "(nil)",
317 (tsk_rt(second)->inh_task) ? tsk_rt(second)->inh_task->pid : 0);
318 */
319 return 1; 301 return 1;
320 } 302 }
321 } 303 }
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index 170c43fcc78e..5099f6eae537 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -869,7 +869,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
869 } 869 }
870 } 870 }
871 else { 871 else {
872 TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", 872 TRACE_TASK(t, "No change in effective priority (it is %s/%d).\n",
873 (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", 873 (new_max_eff_prio) ? new_max_eff_prio->comm : "nil",
874 (new_max_eff_prio) ? new_max_eff_prio->pid : -1); 874 (new_max_eff_prio) ? new_max_eff_prio->pid : -1);
875 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); 875 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock);
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 1feb2fbe42bc..dd64211a1402 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -240,26 +240,6 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
240 sched = &per_cpu(cedf_cpu_entries, on_cpu); 240 sched = &per_cpu(cedf_cpu_entries, on_cpu);
241 241
242 BUG_ON(sched->linked == linked); 242 BUG_ON(sched->linked == linked);
243#if 0
244 /* this should only happen if not linked already */
245 if (sched->linked == linked) {
246 printk(KERN_EMERG "%s/%d hit sched bug. is_aux = %d; has_aux = %d\n",
247 linked->comm, linked->pid, tsk_rt(linked)->is_aux_task, tsk_rt(linked)->has_aux_tasks);
248 printk(KERN_EMERG "eCPU: %d, eLinked: %s/%d, sCPU: %d, sched: %s/%d\n",
249 entry->cpu,
250 (entry->linked) ?
251 entry->linked->comm : "nil",
252 (entry->linked) ?
253 entry->linked->pid : -1,
254 sched->cpu,
255 (sched->linked) ?
256 sched->linked->comm : "nil",
257 (sched->linked) ?
258 sched->linked->pid : -1);
259 TRACE_TASK(linked, "LINK BUG!\n");
260 unlink(linked);
261 }
262#endif
263 243
264 /* If we are already scheduled on the CPU to which we 244 /* If we are already scheduled on the CPU to which we
265 * wanted to link, we don't need to do the swap -- 245 * wanted to link, we don't need to do the swap --
@@ -295,11 +275,9 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
295 */ 275 */
296static noinline void unlink(struct task_struct* t) 276static noinline void unlink(struct task_struct* t)
297{ 277{
298 cpu_entry_t *entry;
299
300 if (t->rt_param.linked_on != NO_CPU) { 278 if (t->rt_param.linked_on != NO_CPU) {
301 /* unlink */ 279 /* unlink */
302 entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); 280 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on);
303 t->rt_param.linked_on = NO_CPU; 281 t->rt_param.linked_on = NO_CPU;
304 link_task_to_cpu(NULL, entry); 282 link_task_to_cpu(NULL, entry);
305 } else if (is_queued(t)) { 283 } else if (is_queued(t)) {
@@ -338,7 +316,7 @@ static noinline void requeue(struct task_struct* task)
338 316
339 if (is_released(task, litmus_clock())) 317 if (is_released(task, litmus_clock()))
340#ifdef CONFIG_REALTIME_AUX_TASKS 318#ifdef CONFIG_REALTIME_AUX_TASKS
341 if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { 319 if (unlikely(tsk_rt(task)->is_aux_task && task->state != TASK_RUNNING)) {
342 /* aux_task probably transitioned to real-time while it was blocked */ 320 /* aux_task probably transitioned to real-time while it was blocked */
343 TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); 321 TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid);
344 unlink(task); /* really needed? */ 322 unlink(task); /* really needed? */
@@ -874,6 +852,15 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
874 TRACE_TASK(prev, "will be preempted by %s/%d\n", 852 TRACE_TASK(prev, "will be preempted by %s/%d\n",
875 entry->linked->comm, entry->linked->pid); 853 entry->linked->comm, entry->linked->pid);
876 854
855 if (tsk_rt(prev)->is_aux_task &&
856 (prev->state == TASK_INTERRUPTIBLE) &&
857 !blocks) {
858 TRACE_TASK(prev, "Deferring descheduling of aux task %s/%d.\n",
859 prev->comm, prev->pid);
860 next = prev; /* allow prev to continue. */
861 goto out_set_state;
862 }
863
877 /* Send the signal that the budget has been exhausted */ 864 /* Send the signal that the budget has been exhausted */
878 if (signal_budget) 865 if (signal_budget)
879 send_sigbudget(entry->scheduled); 866 send_sigbudget(entry->scheduled);
@@ -934,12 +921,17 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
934 entry->scheduled->rt_param.scheduled_on = NO_CPU; 921 entry->scheduled->rt_param.scheduled_on = NO_CPU;
935 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); 922 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
936 } 923 }
937 } else 924 }
925 else {
938 /* Only override Linux scheduler if we have a real-time task 926 /* Only override Linux scheduler if we have a real-time task
939 * scheduled that needs to continue. 927 * scheduled that needs to continue.
940 */ 928 */
941 if (exists) 929 if (exists) {
942 next = prev; 930 next = prev;
931 }
932 }
933
934out_set_state:
943 935
944 sched_state_task_picked(); 936 sched_state_task_picked();
945 raw_spin_unlock(&cluster->cluster_lock); 937 raw_spin_unlock(&cluster->cluster_lock);
@@ -988,6 +980,8 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
988 /* setup job params */ 980 /* setup job params */
989 release_at(t, litmus_clock()); 981 release_at(t, litmus_clock());
990 982
983 t->rt_param.linked_on = NO_CPU;
984
991 if (running) { 985 if (running) {
992 entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); 986 entry = &per_cpu(cedf_cpu_entries, task_cpu(t));
993 BUG_ON(entry->scheduled); 987 BUG_ON(entry->scheduled);
@@ -1007,15 +1001,11 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
1007 } else { 1001 } else {
1008 t->rt_param.scheduled_on = NO_CPU; 1002 t->rt_param.scheduled_on = NO_CPU;
1009 } 1003 }
1010 t->rt_param.linked_on = NO_CPU;
1011 1004
1012 if (is_running(t)) { 1005 if (is_running(t)) {
1013 cedf_job_arrival(t); 1006 cedf_job_arrival(t);
1014 } 1007 }
1015 else { 1008
1016 TRACE("Deferred job arrival because %d is blocked.\n", t->pid);
1017 unlink(t); /* needed?? */
1018 }
1019 raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); 1009 raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags);
1020} 1010}
1021 1011
@@ -1188,7 +1178,7 @@ static int __increase_priority_inheritance(struct task_struct* t,
1188 1178
1189 /* If it is scheduled, then we need to reorder the CPU heap. */ 1179 /* If it is scheduled, then we need to reorder the CPU heap. */
1190 if (linked_on != NO_CPU) { 1180 if (linked_on != NO_CPU) {
1191 TRACE_TASK(t, "%s: linked on %d\n", 1181 TRACE_TASK(t, "%s: linked on %d\n",
1192 __FUNCTION__, linked_on); 1182 __FUNCTION__, linked_on);
1193 /* Holder is scheduled; need to re-order CPUs. 1183 /* Holder is scheduled; need to re-order CPUs.
1194 * We can't use heap_decrease() here since 1184 * We can't use heap_decrease() here since
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 76ff892122aa..e1654da1ea6c 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -33,10 +33,10 @@ void preempt_if_preemptable(struct task_struct* t, int cpu)
33 33
34 TRACE_CUR("preempt_if_preemptable: %s/%d\n", 34 TRACE_CUR("preempt_if_preemptable: %s/%d\n",
35 (t) ? t->comm : "(nil)", 35 (t) ? t->comm : "(nil)",
36 (t) ? t->pid : 0); 36 (t) ? t->pid : -1);
37 37
38 if (!t) { 38 if (!t) {
39 TRACE_CUR("unconditionally reshcedule\n"); 39 TRACE_CUR("unconditionally reschedule\n");
40 /* move non-real-time task out of the way */ 40 /* move non-real-time task out of the way */
41 reschedule = 1; 41 reschedule = 1;
42 } 42 }
@@ -229,7 +229,6 @@ static int litmus_dummy_map_gpu_to_cpu(int gpu)
229} 229}
230#endif 230#endif
231 231
232
233/* The default scheduler plugin. It doesn't do anything and lets Linux do its 232/* The default scheduler plugin. It doesn't do anything and lets Linux do its
234 * job. 233 * job.
235 */ 234 */
@@ -272,7 +271,6 @@ struct sched_plugin linux_sched_plugin = {
272#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 271#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
273 .map_gpu_to_cpu = litmus_dummy_map_gpu_to_cpu, 272 .map_gpu_to_cpu = litmus_dummy_map_gpu_to_cpu,
274#endif 273#endif
275
276 .admit_task = litmus_dummy_admit_task 274 .admit_task = litmus_dummy_admit_task
277}; 275};
278 276
@@ -335,7 +333,6 @@ int register_sched_plugin(struct sched_plugin* plugin)
335#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 333#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
336 CHECK(map_gpu_to_cpu); 334 CHECK(map_gpu_to_cpu);
337#endif 335#endif
338
339 CHECK(admit_task); 336 CHECK(admit_task);
340 337
341 if (!plugin->release_at) 338 if (!plugin->release_at)