diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2011-01-20 18:41:23 -0500 |
---|---|---|
committer | Jeremy Erickson <jerickso@cs.unc.edu> | 2011-01-20 18:41:23 -0500 |
commit | 84df1f63c6a9967dc8a6c715292e92ef9b35dfa6 (patch) | |
tree | d5b30a5725028a8f6e4f03834757e0dbacde4222 | |
parent | d0b9fb8bdbc6a6cfefd458b9011e21a076375aa8 (diff) |
Numerous fixes
-rw-r--r-- | litmus/sched_mc.c | 53 |
1 files changed, 30 insertions, 23 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 92b847495673..6a6d4c52065c 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -162,11 +162,11 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, | |||
162 | struct task_struct *first_active, *second_active; | 162 | struct task_struct *first_active, *second_active; |
163 | first_active = first->linked; | 163 | first_active = first->linked; |
164 | second_active = second->linked; | 164 | second_active = second->linked; |
165 | if (first->ghost_jobs[crit]){ | 165 | if (first->ghost_tasks[crit]){ |
166 | first->active = first->ghost_jobs[crit]; | 166 | first_active = first->ghost_tasks[crit]; |
167 | } | 167 | } |
168 | if (second->ghost_jobs[crit]){ | 168 | if (second->ghost_tasks[crit]){ |
169 | second->active = second->ghost_jobs[crit]; | 169 | second_active = second->ghost_tasks[crit]; |
170 | } | 170 | } |
171 | return mc_edf_higher_prio(first_active, second_active); | 171 | return mc_edf_higher_prio(first_active, second_active); |
172 | } | 172 | } |
@@ -178,7 +178,7 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, | |||
178 | static int mc_edf_preemption_needed(rt_domain_t* rt, int crit, | 178 | static int mc_edf_preemption_needed(rt_domain_t* rt, int crit, |
179 | cpu_entry_t* entry) | 179 | cpu_entry_t* entry) |
180 | { | 180 | { |
181 | task_struct *active_task; | 181 | struct task_struct *active_task; |
182 | 182 | ||
183 | /* we need the read lock for edf_ready_queue */ | 183 | /* we need the read lock for edf_ready_queue */ |
184 | /* no need to preempt if there is nothing pending */ | 184 | /* no need to preempt if there is nothing pending */ |
@@ -286,9 +286,9 @@ static void update_cpu_position(cpu_entry_t *entry) | |||
286 | if (likely(bheap_node_in_heap(entry->hn_c))) | 286 | if (likely(bheap_node_in_heap(entry->hn_c))) |
287 | bheap_delete(cpu_lower_prio_c, &mc_cpu_heap_c, entry->hn_c); | 287 | bheap_delete(cpu_lower_prio_c, &mc_cpu_heap_c, entry->hn_c); |
288 | if (likely(bheap_node_in_heap(entry->hn_d))) | 288 | if (likely(bheap_node_in_heap(entry->hn_d))) |
289 | bheap_delete(cpu_lower_prio_d, &mc_dpu_heap_d, entry->hn_d); | 289 | bheap_delete(cpu_lower_prio_d, &mc_cpu_heap_d, entry->hn_d); |
290 | bheap_insert(cpu_lower_prio, &mc_cpu_heap_c, entry->hn_c); | 290 | bheap_insert(cpu_lower_prio_c, &mc_cpu_heap_c, entry->hn_c); |
291 | bheap_insert(cpu_lower_prio, &mc_cpu_heap_d, entry->hn_d); | 291 | bheap_insert(cpu_lower_prio_d, &mc_cpu_heap_d, entry->hn_d); |
292 | } | 292 | } |
293 | 293 | ||
294 | /* caller must hold global lock | 294 | /* caller must hold global lock |
@@ -320,7 +320,7 @@ static noinline void job_completion(struct task_struct *t, int forced); | |||
320 | /* update_ghost_time - Do time accounting for a ghost job. | 320 | /* update_ghost_time - Do time accounting for a ghost job. |
321 | * Updates ghost budget and handles expired ghost budget. | 321 | * Updates ghost budget and handles expired ghost budget. |
322 | * Called from unlink(), mc_tick(). | 322 | * Called from unlink(), mc_tick(). |
323 | * TODO: LOCK ACCOUNTING SHOULD BE CONSIDERED | 323 | * Caller holds global lock. |
324 | */ | 324 | */ |
325 | static void update_ghost_time(struct task_struct *p) | 325 | static void update_ghost_time(struct task_struct *p) |
326 | { | 326 | { |
@@ -534,7 +534,9 @@ static void preempt(cpu_entry_t *entry) | |||
534 | */ | 534 | */ |
535 | static noinline void requeue(struct task_struct* task) | 535 | static noinline void requeue(struct task_struct* task) |
536 | { | 536 | { |
537 | BUG_ON(!task || !is_realtime(task)); | 537 | /* BUG_ON(!task || !is_realtime(task));*/ |
538 | BUG_ON(!task); | ||
539 | BUG_ON(!is_realtime(task)); | ||
538 | /* sanity check before insertion */ | 540 | /* sanity check before insertion */ |
539 | BUG_ON(is_queued(task)); | 541 | BUG_ON(is_queued(task)); |
540 | 542 | ||
@@ -558,8 +560,8 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { | |||
558 | crit) | 560 | crit) |
559 | requeue(cpu->linked); | 561 | requeue(cpu->linked); |
560 | /* Can change ghost task at our level as well. */ | 562 | /* Can change ghost task at our level as well. */ |
561 | if (cpu->ghost_jobs[crit]) | 563 | if (cpu->ghost_tasks[crit]) |
562 | requeue(cpu->ghost_jobs[crit]); | 564 | requeue(cpu->ghost_tasks[crit]); |
563 | } | 565 | } |
564 | else{ | 566 | else{ |
565 | /* Changing linked tasks could affect both real and ghost * tasks at multiple levels | 567 | /* Changing linked tasks could affect both real and ghost * tasks at multiple levels |
@@ -567,8 +569,8 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { | |||
567 | if (cpu->linked) | 569 | if (cpu->linked) |
568 | requeue(cpu->linked); | 570 | requeue(cpu->linked); |
569 | for (i = crit; i <= CRIT_LEVEL_D; i++) { | 571 | for (i = crit; i <= CRIT_LEVEL_D; i++) { |
570 | if (cpu->ghost_jobs[i]) | 572 | if (cpu->ghost_tasks[i]) |
571 | requeue(cpu->ghost_jobs[i]); | 573 | requeue(cpu->ghost_tasks[i]); |
572 | } | 574 | } |
573 | } | 575 | } |
574 | link_task_to_cpu(task, cpu); | 576 | link_task_to_cpu(task, cpu); |
@@ -580,7 +582,7 @@ static void check_for_c_preemptions(rt_domain_t *dom){ | |||
580 | cpu_entry_t* last; | 582 | cpu_entry_t* last; |
581 | for (last = lowest_prio_cpu_c(); | 583 | for (last = lowest_prio_cpu_c(); |
582 | mc_edf_preemption_needed(dom, CRIT_LEVEL_C, | 584 | mc_edf_preemption_needed(dom, CRIT_LEVEL_C, |
583 | last->linked); | 585 | last); |
584 | last = lowest_prio_cpu_c()) { | 586 | last = lowest_prio_cpu_c()) { |
585 | prepare_preemption(dom, last, CRIT_LEVEL_C); | 587 | prepare_preemption(dom, last, CRIT_LEVEL_C); |
586 | } | 588 | } |
@@ -590,20 +592,20 @@ static void check_for_d_preemptions(rt_domain_t *dom){ | |||
590 | cpu_entry_t* last; | 592 | cpu_entry_t* last; |
591 | for (last = lowest_prio_cpu_d(); | 593 | for (last = lowest_prio_cpu_d(); |
592 | mc_edf_preemption_needed(dom, CRIT_LEVEL_D, | 594 | mc_edf_preemption_needed(dom, CRIT_LEVEL_D, |
593 | last->linked); | 595 | last); |
594 | last = lowest_prio_cpu_d()) { | 596 | last = lowest_prio_cpu_d()) { |
595 | prepare_preemption(dom, last, CRIT_LEVEL_D); | 597 | prepare_preemption(dom, last, CRIT_LEVEL_D); |
596 | } | 598 | } |
597 | } | 599 | } |
598 | 600 | ||
599 | static void check_for_a_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 601 | static void check_for_a_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
600 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu->linked)) { | 602 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu)) { |
601 | prepare_preemption(dom, cpu, CRIT_LEVEL_A); | 603 | prepare_preemption(dom, cpu, CRIT_LEVEL_A); |
602 | } | 604 | } |
603 | } | 605 | } |
604 | 606 | ||
605 | static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 607 | static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
606 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu->linked)) { | 608 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu)) { |
607 | prepare_preemption(dom, cpu, CRIT_LEVEL_B); | 609 | prepare_preemption(dom, cpu, CRIT_LEVEL_B); |
608 | } | 610 | } |
609 | } | 611 | } |
@@ -730,13 +732,15 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
730 | * checks whether the current task has expired and checks | 732 | * checks whether the current task has expired and checks |
731 | * whether we need to preempt it if it has not expired | 733 | * whether we need to preempt it if it has not expired |
732 | * Called from LITMUS core | 734 | * Called from LITMUS core |
733 | * Does not use locks | 735 | * Locks when calling update_ghost_time(t) |
734 | * Just sets reschedule flags on task and CPU and request_exit_np flag on task | 736 | * Just sets reschedule flags on task and CPU and request_exit_np flag on task |
735 | */ | 737 | */ |
736 | static void mc_tick(struct task_struct* t) | 738 | static void mc_tick(struct task_struct* t) |
737 | { | 739 | { |
738 | if (is_ghost(t)) { | 740 | if (is_ghost(t)) { |
741 | raw_spin_lock_irqsave(&global_lock, flags); | ||
739 | update_ghost_time(t); | 742 | update_ghost_time(t); |
743 | raw_spin_lock_irqrestore(&global_lock, flags); | ||
740 | } | 744 | } |
741 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 745 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { |
742 | if (!is_np(t)) { | 746 | if (!is_np(t)) { |
@@ -897,7 +901,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
897 | ready_crit = CRIT_LEVEL_D + 1; | 901 | ready_crit = CRIT_LEVEL_D + 1; |
898 | } | 902 | } |
899 | for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { | 903 | for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { |
900 | requeue(entry->ghost_tasks[i]); | 904 | if (entry->ghost_tasks[i]) |
905 | requeue(entry->ghost_tasks[i]); | ||
901 | } | 906 | } |
902 | link_task_to_cpu(ready_task, entry); | 907 | link_task_to_cpu(ready_task, entry); |
903 | if (ready_task) | 908 | if (ready_task) |
@@ -1090,14 +1095,16 @@ static long mc_activate_plugin(void) | |||
1090 | int cpu; | 1095 | int cpu; |
1091 | cpu_entry_t *entry; | 1096 | cpu_entry_t *entry; |
1092 | 1097 | ||
1093 | bheap_init(&mc_cpu_heap); | 1098 | bheap_init(&mc_cpu_heap_c); |
1099 | bheap_init(&mc_cpu_heap_d); | ||
1094 | #ifdef CONFIG_RELEASE_MASTER | 1100 | #ifdef CONFIG_RELEASE_MASTER |
1095 | mc.release_master = atomic_read(&release_master_cpu); | 1101 | mc.release_master = atomic_read(&release_master_cpu); |
1096 | #endif | 1102 | #endif |
1097 | 1103 | ||
1098 | for_each_online_cpu(cpu) { | 1104 | for_each_online_cpu(cpu) { |
1099 | entry = &per_cpu(mc_cpu_entries, cpu); | 1105 | entry = &per_cpu(mc_cpu_entries, cpu); |
1100 | bheap_node_init(&entry->hn, entry); | 1106 | bheap_node_init(&entry->hn_c, entry); |
1107 | bheap_node_init(&entry->hn_d, entry); | ||
1101 | atomic_set(&entry->will_schedule, 0); | 1108 | atomic_set(&entry->will_schedule, 0); |
1102 | entry->linked = NULL; | 1109 | entry->linked = NULL; |
1103 | entry->scheduled = NULL; | 1110 | entry->scheduled = NULL; |
@@ -1135,7 +1142,7 @@ static int __init init_mc(void) | |||
1135 | { | 1142 | { |
1136 | int cpu; | 1143 | int cpu; |
1137 | int i; | 1144 | int i; |
1138 | cpu_entry_t *entry_c, *entry_d; | 1145 | cpu_entry_t *entry; |
1139 | 1146 | ||
1140 | bheap_init(&mc_cpu_heap_c); | 1147 | bheap_init(&mc_cpu_heap_c); |
1141 | bheap_init(&mc_cpu_heap_d); | 1148 | bheap_init(&mc_cpu_heap_d); |