diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-10-09 18:42:07 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-10-09 18:42:07 -0400 |
commit | bc92369ba24f72b916384ed80d4381549c6537a7 (patch) | |
tree | 9e4f0ae48a5e2d8d4fbb877011c63f89ff3bd6f7 | |
parent | dd3b483c07497e34ad948890dc7cd871cd68dc7a (diff) |
Fixed bug where finish switch could strand level-C tasks in neverland
-rw-r--r-- | litmus/rt_domain.c | 4 | ||||
-rw-r--r-- | litmus/sched_mc.c | 17 |
2 files changed, 15 insertions, 6 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 93f2a35fb29d..f242f33e990e 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <litmus/bheap.h> | 20 | #include <litmus/bheap.h> |
21 | 21 | ||
22 | /* Uncomment when debugging timer races... */ | 22 | /* Uncomment when debugging timer races... */ |
23 | #if 1 | 23 | #if 0 |
24 | #define VTRACE_TASK TRACE_TASK | 24 | #define VTRACE_TASK TRACE_TASK |
25 | #define VTRACE TRACE | 25 | #define VTRACE TRACE |
26 | #else | 26 | #else |
@@ -404,7 +404,9 @@ void __add_release(rt_domain_t* rt, struct task_struct *task) | |||
404 | static void pd_requeue(domain_t *dom, struct task_struct *task) | 404 | static void pd_requeue(domain_t *dom, struct task_struct *task) |
405 | { | 405 | { |
406 | rt_domain_t *domain = (rt_domain_t*)dom->data; | 406 | rt_domain_t *domain = (rt_domain_t*)dom->data; |
407 | |||
407 | BUG_ON(!task || !is_realtime(task)); | 408 | BUG_ON(!task || !is_realtime(task)); |
409 | TRACE_TASK(task, "Requeueing"); | ||
408 | BUG_ON(is_queued(task)); | 410 | BUG_ON(is_queued(task)); |
409 | BUG_ON(get_task_domain(task) != dom); | 411 | BUG_ON(get_task_domain(task) != dom); |
410 | 412 | ||
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 277762709d3d..61eb961eda23 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -63,8 +63,10 @@ static int interrupt_cpu; | |||
63 | #define is_global_task(t) (is_global(get_task_domain(t))) | 63 | #define is_global_task(t) (is_global(get_task_domain(t))) |
64 | #define can_use(ce) \ | 64 | #define can_use(ce) \ |
65 | ((ce)->state == CS_ACTIVE || (ce->state == CS_ACTIVATE)) | 65 | ((ce)->state == CS_ACTIVE || (ce->state == CS_ACTIVATE)) |
66 | #define can_requeue(t) \ | 66 | #define can_requeue(t) \ |
67 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU) | 67 | ((t)->rt_param.linked_on == NO_CPU && /* Not linked anywhere */ \ |
68 | !is_queued(t) && /* Not gonna be linked */ \ | ||
69 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU)) | ||
68 | #define entry_level(e) \ | 70 | #define entry_level(e) \ |
69 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) | 71 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) |
70 | #define crit_cpu(ce) \ | 72 | #define crit_cpu(ce) \ |
@@ -288,11 +290,11 @@ static void job_arrival(struct task_struct *task) | |||
288 | TRACE_MC_TASK(task, "Job arriving\n"); | 290 | TRACE_MC_TASK(task, "Job arriving\n"); |
289 | BUG_ON(!task); | 291 | BUG_ON(!task); |
290 | 292 | ||
293 | raw_spin_lock(dom->lock); | ||
291 | if (can_requeue(task)) { | 294 | if (can_requeue(task)) { |
292 | raw_spin_lock(dom->lock); | 295 | BUG_ON(task->rt_param.linked_on != NO_CPU); |
293 | dom->requeue(dom, task); | 296 | dom->requeue(dom, task); |
294 | check_for_preempt(dom); | 297 | check_for_preempt(dom); |
295 | raw_spin_unlock(dom->lock); | ||
296 | } else { | 298 | } else { |
297 | /* If a global task is scheduled on one cpu, it CANNOT | 299 | /* If a global task is scheduled on one cpu, it CANNOT |
298 | * be requeued into a global domain. Another cpu might | 300 | * be requeued into a global domain. Another cpu might |
@@ -302,6 +304,7 @@ static void job_arrival(struct task_struct *task) | |||
302 | */ | 304 | */ |
303 | TRACE_MC_TASK(task, "Delayed arrival of scheduled task\n"); | 305 | TRACE_MC_TASK(task, "Delayed arrival of scheduled task\n"); |
304 | } | 306 | } |
307 | raw_spin_unlock(dom->lock); | ||
305 | } | 308 | } |
306 | 309 | ||
307 | /** | 310 | /** |
@@ -547,11 +550,15 @@ static void remove_from_all(struct task_struct* task) | |||
547 | update = 1; | 550 | update = 1; |
548 | link_task_to_cpu(entry, NULL); | 551 | link_task_to_cpu(entry, NULL); |
549 | } | 552 | } |
553 | } else { | ||
554 | TRACE_MC_TASK(task, "Unlinked before we got lock!"); | ||
550 | } | 555 | } |
551 | if (update) | 556 | if (update) |
552 | update_crit_levels(entry); | 557 | update_crit_levels(entry); |
553 | else | 558 | else |
554 | raw_spin_unlock(&entry->lock); | 559 | raw_spin_unlock(&entry->lock); |
560 | } else { | ||
561 | TRACE_MC_TASK(task, "Not linked to anything\n"); | ||
555 | } | 562 | } |
556 | 563 | ||
557 | /* Ensure the task isn't returned by its domain */ | 564 | /* Ensure the task isn't returned by its domain */ |
@@ -874,7 +881,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
874 | local_irq_save(flags); | 881 | local_irq_save(flags); |
875 | raw_spin_lock(&entry->lock); | 882 | raw_spin_lock(&entry->lock); |
876 | 883 | ||
877 | if (entry->scheduled) { | 884 | if (entry->scheduled && entry->scheduled != prev) { |
878 | TRACE_TASK(entry->scheduled, " and prev is %d\n", prev->pid); | 885 | TRACE_TASK(entry->scheduled, " and prev is %d\n", prev->pid); |
879 | } | 886 | } |
880 | 887 | ||