diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/ce_domain.c | 22 | ||||
-rw-r--r-- | litmus/event_group.c | 21 | ||||
-rw-r--r-- | litmus/rt_domain.c | 13 | ||||
-rw-r--r-- | litmus/sched_mc.c | 36 |
4 files changed, 57 insertions, 35 deletions
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c index c3ddc9dd63ad..5e1f7ccc000c 100644 --- a/litmus/ce_domain.c +++ b/litmus/ce_domain.c | |||
@@ -30,6 +30,8 @@ void ce_requeue(domain_t *dom, struct task_struct *ts) | |||
30 | "expected_job: %3u\n", | 30 | "expected_job: %3u\n", |
31 | asleep, just_finished, expected_job); | 31 | asleep, just_finished, expected_job); |
32 | 32 | ||
33 | tsk_mc_data(ts)->mc_task.lvl_a_eligible = 1; | ||
34 | |||
33 | /* When coming from job completion, the task will be asleep. */ | 35 | /* When coming from job completion, the task will be asleep. */ |
34 | if (asleep && just_finished < expected_job) { | 36 | if (asleep && just_finished < expected_job) { |
35 | TRACE_MC_TASK(ts, "appears behind\n"); | 37 | TRACE_MC_TASK(ts, "appears behind\n"); |
@@ -41,18 +43,27 @@ void ce_requeue(domain_t *dom, struct task_struct *ts) | |||
41 | } | 43 | } |
42 | 44 | ||
43 | /* | 45 | /* |
46 | * | ||
47 | */ | ||
48 | void ce_remove(domain_t *dom, struct task_struct *ts) | ||
49 | { | ||
50 | tsk_mc_data(ts)->mc_task.lvl_a_eligible = 0; | ||
51 | } | ||
52 | |||
53 | /* | ||
44 | * ce_take_ready and ce_peek_ready | 54 | * ce_take_ready and ce_peek_ready |
45 | */ | 55 | */ |
46 | struct task_struct* ce_peek_and_take_ready(domain_t *dom) | 56 | struct task_struct* ce_peek_and_take_ready(domain_t *dom) |
47 | { | 57 | { |
48 | struct task_struct *ret = NULL; | ||
49 | const struct ce_dom_data *ce_data = dom->data; | 58 | const struct ce_dom_data *ce_data = dom->data; |
50 | const int exists = NULL != ce_data->should_schedule; | 59 | struct task_struct *ret = NULL, *sched = ce_data->should_schedule; |
51 | const int blocked = exists && !is_running(ce_data->should_schedule); | 60 | const int exists = NULL != sched; |
61 | const int blocked = exists && !is_running(sched); | ||
62 | const int elig = exists && tsk_mc_data(sched)->mc_task.lvl_a_eligible; | ||
52 | 63 | ||
53 | /* Return the task we should schedule if it is not blocked or sleeping. */ | 64 | /* Return the task we should schedule if it is not blocked or sleeping. */ |
54 | if (exists && !blocked) | 65 | if (exists && !blocked && elig) |
55 | ret = ce_data->should_schedule; | 66 | ret = sched; |
56 | return ret; | 67 | return ret; |
57 | } | 68 | } |
58 | 69 | ||
@@ -78,6 +89,7 @@ void ce_domain_init(domain_t *dom, | |||
78 | domain_init(dom, lock, requeue, peek_ready, take_ready, preempt_needed, | 89 | domain_init(dom, lock, requeue, peek_ready, take_ready, preempt_needed, |
79 | task_prio); | 90 | task_prio); |
80 | dom->data = dom_data; | 91 | dom->data = dom_data; |
92 | dom->remove = ce_remove; | ||
81 | dom_data->cpu = cpu; | 93 | dom_data->cpu = cpu; |
82 | #ifdef CONFIG_MERGE_TIMERS | 94 | #ifdef CONFIG_MERGE_TIMERS |
83 | init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback, | 95 | init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback, |
diff --git a/litmus/event_group.c b/litmus/event_group.c index daf964fbb8cc..40c2f9bf2d18 100644 --- a/litmus/event_group.c +++ b/litmus/event_group.c | |||
@@ -71,18 +71,19 @@ void insert_event(struct event_list *el, struct rt_event *e) | |||
71 | queued = list_entry(pos, struct rt_event, list); | 71 | queued = list_entry(pos, struct rt_event, list); |
72 | last = pos; | 72 | last = pos; |
73 | if (e->prio < queued->prio) { | 73 | if (e->prio < queued->prio) { |
74 | VTRACE("Inserting priority %d 0x%p before %d 0x%p " | 74 | VTRACE("Inserting priority %d event 0x%p before %d 0x%p " |
75 | "in 0x%p, pos 0x%p\n", e->prio, &e->list, | 75 | "in 0x%p, pos 0x%p\n", e->prio, e, |
76 | queued->prio, &queued->list, el, pos); | 76 | queued->prio, &queued->list, el, pos); |
77 | BUG_ON(!list_empty(&e->list)); | 77 | BUG_ON(!list_empty(&e->list)); |
78 | list_add_tail(&e->list, pos); | 78 | list_add_tail(&e->list, pos); |
79 | return; | 79 | return; |
80 | } | 80 | } |
81 | } | 81 | } |
82 | VTRACE("Inserting priority %d 0x%p at end of 0x%p, last 0x%p\n", | 82 | VTRACE("Inserting priority %d event 0x%p at end of 0x%p, last 0x%p\n", |
83 | e->prio, &el->list, el, last); | 83 | e->prio, e, el, last); |
84 | BUG_ON(!list_empty(&e->list)); | 84 | BUG_ON(!list_empty(&e->list)); |
85 | list_add(&e->list, (last) ? last : pos); | 85 | list_add(&e->list, (last) ? last : pos); |
86 | VTRACE("Singular? %d\n", list_is_singular(&el->events)); | ||
86 | } | 87 | } |
87 | 88 | ||
88 | /* | 89 | /* |
@@ -100,7 +101,7 @@ static struct event_list* get_event_list(struct event_group *group, | |||
100 | unsigned int slot = time2slot(fire); | 101 | unsigned int slot = time2slot(fire); |
101 | int remaining = 300; | 102 | int remaining = 300; |
102 | 103 | ||
103 | VTRACE("Getting list for %llu, event 0x%p\n", fire, e); | 104 | VTRACE("Getting list for time %llu, event 0x%p\n", fire, e); |
104 | 105 | ||
105 | /* Initialize pos for the case that the list is empty */ | 106 | /* Initialize pos for the case that the list is empty */ |
106 | pos = group->event_queue[slot].next; | 107 | pos = group->event_queue[slot].next; |
@@ -145,7 +146,7 @@ static struct event_list* get_event_list(struct event_group *group, | |||
145 | static void reinit_event_list(struct rt_event *e) | 146 | static void reinit_event_list(struct rt_event *e) |
146 | { | 147 | { |
147 | struct event_list *el = e->event_list; | 148 | struct event_list *el = e->event_list; |
148 | VTRACE("Reinitting 0x%p for event 0x%p\n", el, e); | 149 | VTRACE("Reinitting list 0x%p for event 0x%p\n", el, e); |
149 | BUG_ON(hrtimer_try_to_cancel(&el->timer) == 1); | 150 | BUG_ON(hrtimer_try_to_cancel(&el->timer) == 1); |
150 | INIT_LIST_HEAD(&el->events); | 151 | INIT_LIST_HEAD(&el->events); |
151 | atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); | 152 | atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); |
@@ -213,13 +214,14 @@ void cancel_event(struct rt_event *e) | |||
213 | /* If our event_list contains any events, it is in use */ | 214 | /* If our event_list contains any events, it is in use */ |
214 | raw_spin_lock(&group->queue_lock); | 215 | raw_spin_lock(&group->queue_lock); |
215 | if (!list_empty(&e->event_list->events)) { | 216 | if (!list_empty(&e->event_list->events)) { |
217 | VTRACE("List 0x%p is not empty\n", e->event_list); | ||
216 | 218 | ||
217 | /* If our event_list contains events, we are the first element | 219 | /* If our event_list contains events, we are the first element |
218 | * in that list. If there is anyone after us in the list, then | 220 | * in that list. If there is anyone after us in the list, then |
219 | * swap our list with theirs to that the event_list can still | 221 | * swap our list with theirs to that the event_list can still |
220 | * trigger the queued events. | 222 | * trigger the queued events. |
221 | */ | 223 | */ |
222 | if (!list_is_singular(&e->list)) { | 224 | if (!list_is_singular(&e->event_list->events)) { |
223 | swap = list_entry(e->list.next, struct rt_event, list); | 225 | swap = list_entry(e->list.next, struct rt_event, list); |
224 | VTRACE("Swapping with event 0x%p of priority %d\n", | 226 | VTRACE("Swapping with event 0x%p of priority %d\n", |
225 | swap, swap->prio); | 227 | swap, swap->prio); |
@@ -230,8 +232,11 @@ void cancel_event(struct rt_event *e) | |||
230 | } | 232 | } |
231 | 233 | ||
232 | /* Disable the event_list */ | 234 | /* Disable the event_list */ |
235 | atomic_set(&e->event_list->info.state, HRTIMER_START_ON_INACTIVE); | ||
233 | hrtimer_try_to_cancel(&e->event_list->timer); | 236 | hrtimer_try_to_cancel(&e->event_list->timer); |
234 | list_del_init(&e->event_list->list); | 237 | list_del_init(&e->event_list->list); |
238 | } else { | ||
239 | VTRACE("List 0x%p is empty\n", e->event_list); | ||
235 | } | 240 | } |
236 | list_del_init(&e->list); | 241 | list_del_init(&e->list); |
237 | raw_spin_unlock(&group->queue_lock); | 242 | raw_spin_unlock(&group->queue_lock); |
@@ -248,7 +253,7 @@ struct event_list* event_list_alloc(int gfp_flags) | |||
248 | INIT_LIST_HEAD(&el->list); | 253 | INIT_LIST_HEAD(&el->list); |
249 | el->timer.function = on_timer; | 254 | el->timer.function = on_timer; |
250 | } else { | 255 | } else { |
251 | VTRACE("Failed to allocate event list!"); | 256 | VTRACE("Failed to allocate event list!\n"); |
252 | printk(KERN_CRIT "Failed to allocate event list.\n"); | 257 | printk(KERN_CRIT "Failed to allocate event list.\n"); |
253 | BUG(); | 258 | BUG(); |
254 | } | 259 | } |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index ffb3cab9cffd..93f2a35fb29d 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <litmus/bheap.h> | 20 | #include <litmus/bheap.h> |
21 | 21 | ||
22 | /* Uncomment when debugging timer races... */ | 22 | /* Uncomment when debugging timer races... */ |
23 | #if 0 | 23 | #if 1 |
24 | #define VTRACE_TASK TRACE_TASK | 24 | #define VTRACE_TASK TRACE_TASK |
25 | #define VTRACE TRACE | 25 | #define VTRACE TRACE |
26 | #else | 26 | #else |
@@ -293,8 +293,7 @@ static void setup_release(rt_domain_t *_rt) | |||
293 | #else | 293 | #else |
294 | arm_release_timer(rh); | 294 | arm_release_timer(rh); |
295 | #endif | 295 | #endif |
296 | } else | 296 | } |
297 | VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); | ||
298 | } | 297 | } |
299 | } | 298 | } |
300 | 299 | ||
@@ -439,6 +438,13 @@ static struct task_struct* pd_peek_ready(domain_t *dom) | |||
439 | return __next_ready((rt_domain_t*)dom->data); | 438 | return __next_ready((rt_domain_t*)dom->data); |
440 | } | 439 | } |
441 | 440 | ||
441 | static void pd_remove(domain_t *dom, struct task_struct *task) | ||
442 | { | ||
443 | if (is_queued(task)) { | ||
444 | remove((rt_domain_t*)dom->data, task); | ||
445 | } | ||
446 | } | ||
447 | |||
442 | /* pd_domain_init - create a generic domain wrapper for an rt_domain | 448 | /* pd_domain_init - create a generic domain wrapper for an rt_domain |
443 | */ | 449 | */ |
444 | void pd_domain_init(domain_t *dom, | 450 | void pd_domain_init(domain_t *dom, |
@@ -453,5 +459,6 @@ void pd_domain_init(domain_t *dom, | |||
453 | domain_init(dom, &domain->ready_lock, | 459 | domain_init(dom, &domain->ready_lock, |
454 | pd_requeue, pd_peek_ready, pd_take_ready, | 460 | pd_requeue, pd_peek_ready, pd_take_ready, |
455 | preempt_needed, priority); | 461 | preempt_needed, priority); |
462 | dom->remove = pd_remove; | ||
456 | dom->data = domain; | 463 | dom->data = domain; |
457 | } | 464 | } |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index b2a9ca205be4..f360abf34035 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -183,7 +183,7 @@ static void update_ghost_time(struct task_struct *p) | |||
183 | static inline void update_crit_position(struct crit_entry *ce) | 183 | static inline void update_crit_position(struct crit_entry *ce) |
184 | { | 184 | { |
185 | struct bheap *heap; | 185 | struct bheap *heap; |
186 | if (is_global(ce->domain)) { | 186 | if (is_global(ce->domain) && ce->usable) { |
187 | heap = domain_data(ce->domain)->heap; | 187 | heap = domain_data(ce->domain)->heap; |
188 | BUG_ON(!heap); | 188 | BUG_ON(!heap); |
189 | BUG_ON(!bheap_node_in_heap(ce->node)); | 189 | BUG_ON(!bheap_node_in_heap(ce->node)); |
@@ -451,9 +451,18 @@ static void check_for_preempt(struct domain *dom) | |||
451 | entry = crit_cpu(ce); | 451 | entry = crit_cpu(ce); |
452 | preempted = 0; | 452 | preempted = 0; |
453 | raw_spin_lock(&entry->lock); | 453 | raw_spin_lock(&entry->lock); |
454 | if (ce->usable && dom->preempt_needed(dom, ce->linked)){ | 454 | |
455 | if (!ce->usable) { | ||
456 | TRACE_CRIT_ENTRY(ce, "Removing"); | ||
457 | bheap_delete(cpu_lower_prio, | ||
458 | domain_data(dom)->heap, ce->node); | ||
459 | continue; | ||
460 | } | ||
461 | if (dom->preempt_needed(dom, ce->linked)){ | ||
455 | preempted = 1; | 462 | preempted = 1; |
456 | preempt(dom, ce); | 463 | preempt(dom, ce); |
464 | } else { | ||
465 | TRACE_CRIT_ENTRY(ce, "Stopped global check\n"); | ||
457 | } | 466 | } |
458 | raw_spin_unlock(&entry->lock); | 467 | raw_spin_unlock(&entry->lock); |
459 | } | 468 | } |
@@ -486,10 +495,10 @@ static void remove_from_all(struct task_struct* task) | |||
486 | 495 | ||
487 | raw_spin_lock(dom->lock); | 496 | raw_spin_lock(dom->lock); |
488 | 497 | ||
498 | /* Remove the task from any CPU state */ | ||
489 | if (task->rt_param.linked_on != NO_CPU) { | 499 | if (task->rt_param.linked_on != NO_CPU) { |
490 | entry = &per_cpu(cpus, task->rt_param.linked_on); | 500 | entry = &per_cpu(cpus, task->rt_param.linked_on); |
491 | raw_spin_lock(&entry->lock); | 501 | raw_spin_lock(&entry->lock); |
492 | |||
493 | /* Unlink only if task is still linked post lock */ | 502 | /* Unlink only if task is still linked post lock */ |
494 | ce = &entry->crit_entries[tsk_mc_crit(task)]; | 503 | ce = &entry->crit_entries[tsk_mc_crit(task)]; |
495 | if (task->rt_param.linked_on != NO_CPU) { | 504 | if (task->rt_param.linked_on != NO_CPU) { |
@@ -501,23 +510,15 @@ static void remove_from_all(struct task_struct* task) | |||
501 | link_task_to_cpu(entry, NULL); | 510 | link_task_to_cpu(entry, NULL); |
502 | } | 511 | } |
503 | } | 512 | } |
504 | |||
505 | if (update) | 513 | if (update) |
506 | update_crit_levels(entry); | 514 | update_crit_levels(entry); |
507 | else | 515 | else |
508 | raw_spin_unlock(&entry->lock); | 516 | raw_spin_unlock(&entry->lock); |
509 | } | 517 | } |
510 | 518 | ||
511 | if (is_queued(task)) { | 519 | /* Ensure the task isn't returned by its domain */ |
512 | /* This is an interesting situation: t is scheduled, | 520 | dom->remove(dom, task); |
513 | * but was just recently unlinked. It cannot be | 521 | |
514 | * linked anywhere else (because then it would have | ||
515 | * been relinked to this CPU), thus it must be in some | ||
516 | * queue. We must remove it from the list in this | ||
517 | * case. | ||
518 | */ | ||
519 | remove((rt_domain_t*)get_task_domain(task)->data, task); | ||
520 | } | ||
521 | BUG_ON(is_queued(task)); | 522 | BUG_ON(is_queued(task)); |
522 | raw_spin_unlock(dom->lock); | 523 | raw_spin_unlock(dom->lock); |
523 | } | 524 | } |
@@ -853,12 +854,9 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
853 | 854 | ||
854 | if (exists) { | 855 | if (exists) { |
855 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 856 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
856 | TRACE(TS | ||
857 | " blocks:%d out_of_time:%d sleep:%d preempt:%d " | ||
858 | "state:%d sig:%d global:%d\n", TA(prev), | ||
859 | blocks, out_of_time, sleep, preempt, | ||
860 | prev->state, signal_pending(prev), global); | ||
861 | } | 857 | } |
858 | TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d\n", | ||
859 | TA(prev), blocks, out_of_time, sleep, preempt); | ||
862 | raw_spin_unlock(&entry->lock); | 860 | raw_spin_unlock(&entry->lock); |
863 | 861 | ||
864 | 862 | ||