From 84df1f63c6a9967dc8a6c715292e92ef9b35dfa6 Mon Sep 17 00:00:00 2001 From: Jeremy Erickson Date: Thu, 20 Jan 2011 18:41:23 -0500 Subject: Numerous fixes --- litmus/sched_mc.c | 53 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 92b847495673..6a6d4c52065c 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c @@ -162,11 +162,11 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, struct task_struct *first_active, *second_active; first_active = first->linked; second_active = second->linked; - if (first->ghost_jobs[crit]){ - first->active = first->ghost_jobs[crit]; + if (first->ghost_tasks[crit]){ + first_active = first->ghost_tasks[crit]; } - if (second->ghost_jobs[crit]){ - second->active = second->ghost_jobs[crit]; + if (second->ghost_tasks[crit]){ + second_active = second->ghost_tasks[crit]; } return mc_edf_higher_prio(first_active, second_active); } @@ -178,7 +178,7 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, static int mc_edf_preemption_needed(rt_domain_t* rt, int crit, cpu_entry_t* entry) { - task_struct *active_task; + struct task_struct *active_task; /* we need the read lock for edf_ready_queue */ /* no need to preempt if there is nothing pending */ @@ -286,9 +286,9 @@ static void update_cpu_position(cpu_entry_t *entry) if (likely(bheap_node_in_heap(entry->hn_c))) bheap_delete(cpu_lower_prio_c, &mc_cpu_heap_c, entry->hn_c); if (likely(bheap_node_in_heap(entry->hn_d))) - bheap_delete(cpu_lower_prio_d, &mc_dpu_heap_d, entry->hn_d); - bheap_insert(cpu_lower_prio, &mc_cpu_heap_c, entry->hn_c); - bheap_insert(cpu_lower_prio, &mc_cpu_heap_d, entry->hn_d); + bheap_delete(cpu_lower_prio_d, &mc_cpu_heap_d, entry->hn_d); + bheap_insert(cpu_lower_prio_c, &mc_cpu_heap_c, entry->hn_c); + bheap_insert(cpu_lower_prio_d, &mc_cpu_heap_d, entry->hn_d); } /* caller must hold global lock @@ -320,7 +320,7 @@ static noinline void job_completion(struct task_struct *t, int forced); /* update_ghost_time - Do time accounting for a ghost job. * Updates ghost budget and handles expired ghost budget. * Called from unlink(), mc_tick(). - * TODO: LOCK ACCOUNTING SHOULD BE CONSIDERED + * Caller holds global lock. */ static void update_ghost_time(struct task_struct *p) { @@ -534,7 +534,9 @@ static void preempt(cpu_entry_t *entry) */ static noinline void requeue(struct task_struct* task) { - BUG_ON(!task || !is_realtime(task)); +/* BUG_ON(!task || !is_realtime(task));*/ + BUG_ON(!task); + BUG_ON(!is_realtime(task)); /* sanity check before insertion */ BUG_ON(is_queued(task)); @@ -558,8 +560,8 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { crit) requeue(cpu->linked); /* Can change ghost task at our level as well. */ - if (cpu->ghost_jobs[crit]) - requeue(cpu->ghost_jobs[crit]); + if (cpu->ghost_tasks[crit]) + requeue(cpu->ghost_tasks[crit]); } else{ /* Changing linked tasks could affect both real and ghost * tasks at multiple levels @@ -567,8 +569,8 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { if (cpu->linked) requeue(cpu->linked); for (i = crit; i <= CRIT_LEVEL_D; i++) { - if (cpu->ghost_jobs[i]) - requeue(cpu->ghost_jobs[i]); + if (cpu->ghost_tasks[i]) + requeue(cpu->ghost_tasks[i]); } } link_task_to_cpu(task, cpu); @@ -580,7 +582,7 @@ static void check_for_c_preemptions(rt_domain_t *dom){ cpu_entry_t* last; for (last = lowest_prio_cpu_c(); mc_edf_preemption_needed(dom, CRIT_LEVEL_C, - last->linked); + last); last = lowest_prio_cpu_c()) { prepare_preemption(dom, last, CRIT_LEVEL_C); } @@ -590,20 +592,20 @@ static void check_for_d_preemptions(rt_domain_t *dom){ cpu_entry_t* last; for (last = lowest_prio_cpu_d(); mc_edf_preemption_needed(dom, CRIT_LEVEL_D, - last->linked); + last); last = lowest_prio_cpu_d()) { prepare_preemption(dom, last, CRIT_LEVEL_D); } } static void check_for_a_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { - if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu->linked)) { + if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu)) { prepare_preemption(dom, cpu, CRIT_LEVEL_A); } } static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { - if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu->linked)) { + if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu)) { prepare_preemption(dom, cpu, CRIT_LEVEL_B); } } @@ -730,13 +732,15 @@ static noinline void job_completion(struct task_struct *t, int forced) * checks whether the current task has expired and checks * whether we need to preempt it if it has not expired * Called from LITMUS core - * Does not use locks + * Locks when calling update_ghost_time(t) * Just sets reschedule flags on task and CPU and request_exit_np flag on task */ static void mc_tick(struct task_struct* t) { if (is_ghost(t)) { + raw_spin_lock_irqsave(&global_lock, flags); update_ghost_time(t); + raw_spin_lock_irqrestore(&global_lock, flags); } if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { if (!is_np(t)) { @@ -897,7 +901,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) ready_crit = CRIT_LEVEL_D + 1; } for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { - requeue(entry->ghost_tasks[i]); + if (entry->ghost_tasks[i]) + requeue(entry->ghost_tasks[i]); } link_task_to_cpu(ready_task, entry); if (ready_task) @@ -1090,14 +1095,16 @@ static long mc_activate_plugin(void) int cpu; cpu_entry_t *entry; - bheap_init(&mc_cpu_heap); + bheap_init(&mc_cpu_heap_c); + bheap_init(&mc_cpu_heap_d); #ifdef CONFIG_RELEASE_MASTER mc.release_master = atomic_read(&release_master_cpu); #endif for_each_online_cpu(cpu) { entry = &per_cpu(mc_cpu_entries, cpu); - bheap_node_init(&entry->hn, entry); + bheap_node_init(&entry->hn_c, entry); + bheap_node_init(&entry->hn_d, entry); atomic_set(&entry->will_schedule, 0); entry->linked = NULL; entry->scheduled = NULL; @@ -1135,7 +1142,7 @@ static int __init init_mc(void) { int cpu; int i; - cpu_entry_t *entry_c, *entry_d; + cpu_entry_t *entry; bheap_init(&mc_cpu_heap_c); bheap_init(&mc_cpu_heap_d); -- cgit v1.2.2