diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2011-01-20 15:50:28 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-25 14:41:20 -0400 |
commit | 6c739c4b5c75568753ec6491ff0962d60dce1c5b (patch) | |
tree | 33de178422f4f0d4482c4582f6bfe0d98644d17c | |
parent | f8f74f2fc4f5bc7f61a768eb6ba5f64039c3da48 (diff) |
Code to actually schedule ghost jobs
-rw-r--r-- | litmus/sched_mc.c | 141 |
1 files changed, 109 insertions, 32 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index f4b2a46fd077..92b847495673 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -115,7 +115,7 @@ cpu_entry_t* mc_cpus[NR_CPUS]; | |||
115 | 115 | ||
116 | 116 | ||
117 | /* the cpus queue themselves according to priority in here */ | 117 | /* the cpus queue themselves according to priority in here */ |
118 | static struct bheap_node mc_heap_node[NR_CPUS]; | 118 | static struct bheap_node mc_heap_node_c[NR_CPUS], mc_heap_node_d[NR_CPUS]; |
119 | static struct bheap mc_cpu_heap_c, mc_cpu_heap_d; | 119 | static struct bheap mc_cpu_heap_c, mc_cpu_heap_d; |
120 | 120 | ||
121 | /* Create per-CPU domains for criticality A */ | 121 | /* Create per-CPU domains for criticality A */ |
@@ -437,8 +437,8 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
437 | i < CRIT_LEVEL_D + 1; i++){ | 437 | i < CRIT_LEVEL_D + 1; i++){ |
438 | if (entry->ghost_tasks[i]){ | 438 | if (entry->ghost_tasks[i]){ |
439 | unlink(entry->ghost_tasks[i]); | 439 | unlink(entry->ghost_tasks[i]); |
440 | /*TODO: make sure booted tasks | 440 | /* WARNING: it is up to the |
441 | * get rescheduled if needed | 441 | * caller to requeue ghost jobs |
442 | */ | 442 | */ |
443 | } | 443 | } |
444 | } | 444 | } |
@@ -546,30 +546,65 @@ static noinline void requeue(struct task_struct* task) | |||
546 | } | 546 | } |
547 | } | 547 | } |
548 | 548 | ||
549 | static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 549 | static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { |
550 | struct task_struct* task; | 550 | struct task_struct* task; |
551 | int i; | ||
551 | task = __take_ready(dom); | 552 | task = __take_ready(dom); |
552 | TRACE("prepare_preemption: attempting to link task %d to %d\n", | 553 | TRACE("prepare_preemption: attempting to link task %d to %d\n", |
553 | task->pid, cpu->cpu); | 554 | task->pid, cpu->cpu); |
554 | if (cpu->linked) | 555 | if (is_ghost(task)){ |
555 | requeue(cpu->linked); | 556 | /* Changing ghost task only affects linked task at our level */ |
557 | if (cpu->linked && cpu->linked->rt_param.task_params.crit == | ||
558 | crit) | ||
559 | requeue(cpu->linked); | ||
560 | /* Can change ghost task at our level as well. */ | ||
561 | if (cpu->ghost_jobs[crit]) | ||
562 | requeue(cpu->ghost_jobs[crit]); | ||
563 | } | ||
564 | else{ | ||
565 | /* Changing linked tasks could affect both real and ghost * tasks at multiple levels | ||
566 | */ | ||
567 | if (cpu->linked) | ||
568 | requeue(cpu->linked); | ||
569 | for (i = crit; i <= CRIT_LEVEL_D; i++) { | ||
570 | if (cpu->ghost_jobs[i]) | ||
571 | requeue(cpu->ghost_jobs[i]); | ||
572 | } | ||
573 | } | ||
556 | link_task_to_cpu(task, cpu); | 574 | link_task_to_cpu(task, cpu); |
557 | preempt(cpu); | 575 | preempt(cpu); |
558 | } | 576 | } |
559 | 577 | ||
560 | /* Callers always have global lock */ | 578 | /* Callers always have global lock for functions in this section*/ |
561 | static void check_for_gedf_preemptions(rt_domain_t *dom){ | 579 | static void check_for_c_preemptions(rt_domain_t *dom){ |
580 | cpu_entry_t* last; | ||
581 | for (last = lowest_prio_cpu_c(); | ||
582 | mc_edf_preemption_needed(dom, CRIT_LEVEL_C, | ||
583 | last->linked); | ||
584 | last = lowest_prio_cpu_c()) { | ||
585 | prepare_preemption(dom, last, CRIT_LEVEL_C); | ||
586 | } | ||
587 | } | ||
588 | |||
589 | static void check_for_d_preemptions(rt_domain_t *dom){ | ||
562 | cpu_entry_t* last; | 590 | cpu_entry_t* last; |
563 | for (last = lowest_prio_cpu(); | 591 | for (last = lowest_prio_cpu_d(); |
564 | mc_edf_preemption_needed(dom, last->linked); | 592 | mc_edf_preemption_needed(dom, CRIT_LEVEL_D, |
565 | last = lowest_prio_cpu()) { | 593 | last->linked); |
566 | prepare_preemption(dom, last); | 594 | last = lowest_prio_cpu_d()) { |
595 | prepare_preemption(dom, last, CRIT_LEVEL_D); | ||
567 | } | 596 | } |
568 | } | 597 | } |
569 | 598 | ||
570 | static void check_for_pedf_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 599 | static void check_for_a_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
571 | if (mc_edf_preemption_needed(dom, cpu->linked)) { | 600 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu->linked)) { |
572 | prepare_preemption(dom, cpu); | 601 | prepare_preemption(dom, cpu, CRIT_LEVEL_A); |
602 | } | ||
603 | } | ||
604 | |||
605 | static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | ||
606 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu->linked)) { | ||
607 | prepare_preemption(dom, cpu, CRIT_LEVEL_B); | ||
573 | } | 608 | } |
574 | } | 609 | } |
575 | 610 | ||
@@ -585,18 +620,18 @@ static noinline void mc_job_arrival(struct task_struct* task) | |||
585 | TRACE("mc_job_arrival triggered\n"); | 620 | TRACE("mc_job_arrival triggered\n"); |
586 | requeue(task); | 621 | requeue(task); |
587 | if (task->rt_param.task_params.crit == CRIT_LEVEL_A){ | 622 | if (task->rt_param.task_params.crit == CRIT_LEVEL_A){ |
588 | check_for_pedf_preemption(remote_a_queue(get_partition(task)), | 623 | check_for_a_preemption(remote_a_queue(get_partition(task)), |
589 | remote_cpu_entry(get_partition(task))); | 624 | remote_cpu_entry(get_partition(task))); |
590 | } | 625 | } |
591 | else if (task->rt_param.task_params.crit == CRIT_LEVEL_B){ | 626 | else if (task->rt_param.task_params.crit == CRIT_LEVEL_B){ |
592 | check_for_pedf_preemption(remote_b_queue(get_partition(task)), | 627 | check_for_a_preemption(remote_b_queue(get_partition(task)), |
593 | remote_cpu_entry(get_partition(task))); | 628 | remote_cpu_entry(get_partition(task))); |
594 | } | 629 | } |
595 | else if (task->rt_param.task_params.crit == CRIT_LEVEL_C){ | 630 | else if (task->rt_param.task_params.crit == CRIT_LEVEL_C){ |
596 | check_for_gedf_preemptions(&crit_c); | 631 | check_for_c_preemptions(&crit_c); |
597 | } | 632 | } |
598 | else if (task->rt_param.task_params.crit == CRIT_LEVEL_D){ | 633 | else if (task->rt_param.task_params.crit == CRIT_LEVEL_D){ |
599 | check_for_gedf_preemptions(&crit_d); | 634 | check_for_d_preemptions(&crit_d); |
600 | } | 635 | } |
601 | } | 636 | } |
602 | 637 | ||
@@ -616,14 +651,17 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
616 | 651 | ||
617 | for (i = 0; i < NR_CPUS; i++){ | 652 | for (i = 0; i < NR_CPUS; i++){ |
618 | if (rt == remote_b_queue(i)){ | 653 | if (rt == remote_b_queue(i)){ |
619 | check_for_pedf_preemption(rt, remote_cpu_entry(i)); | 654 | check_for_b_preemption(rt, remote_cpu_entry(i)); |
620 | } | 655 | } |
621 | else if (rt == remote_a_queue(i)){ | 656 | else if (rt == remote_a_queue(i)){ |
622 | check_for_pedf_preemption(rt, remote_cpu_entry(i)); | 657 | check_for_a_preemption(rt, remote_cpu_entry(i)); |
623 | } | 658 | } |
624 | } | 659 | } |
625 | if (rt == &crit_c || rt == &crit_d){ | 660 | if (rt == &crit_c){ |
626 | check_for_gedf_preemptions(rt); | 661 | check_for_c_preemptions(rt); |
662 | } | ||
663 | if (rt == &crit_d){ | ||
664 | check_for_d_preemptions(rt); | ||
627 | } | 665 | } |
628 | 666 | ||
629 | raw_spin_unlock_irqrestore(&global_lock, flags); | 667 | raw_spin_unlock_irqrestore(&global_lock, flags); |
@@ -665,6 +703,9 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
665 | } | 703 | } |
666 | } | 704 | } |
667 | /* prepare for next period */ | 705 | /* prepare for next period */ |
706 | /* TODO: check what happens if we actually finish with exactly our | ||
707 | * budget and never convert to ghost | ||
708 | */ | ||
668 | if (is_ghost(t)) { | 709 | if (is_ghost(t)) { |
669 | t->rt_param.job_params.ghost_budget = 0; | 710 | t->rt_param.job_params.ghost_budget = 0; |
670 | prepare_for_next_period(t); | 711 | prepare_for_next_period(t); |
@@ -751,6 +792,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
751 | int out_of_time, sleep, preempt, np, exists, blocks; | 792 | int out_of_time, sleep, preempt, np, exists, blocks; |
752 | struct task_struct* next = NULL; | 793 | struct task_struct* next = NULL; |
753 | struct task_struct* ready_task = NULL; | 794 | struct task_struct* ready_task = NULL; |
795 | int ready_crit, i; | ||
754 | 796 | ||
755 | #ifdef CONFIG_RELEASE_MASTER | 797 | #ifdef CONFIG_RELEASE_MASTER |
756 | /* Bail out early if we are the release master. | 798 | /* Bail out early if we are the release master. |
@@ -819,16 +861,48 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
819 | /* Link pending task if we became unlinked. | 861 | /* Link pending task if we became unlinked. |
820 | */ | 862 | */ |
821 | if (!entry->linked){ | 863 | if (!entry->linked){ |
822 | ready_task = __take_ready(local_a_queue); | 864 | if (!entry->ghost_tasks[CRIT_LEVEL_A]) { |
823 | if (!ready_task) | 865 | ready_task = __take_ready(local_a_queue); |
866 | ready_crit = CRIT_LEVEL_A; | ||
867 | if (ready_task && is_ghost(ready_task)) { | ||
868 | link_task_to_cpu(ready_task, entry); | ||
869 | ready_task = NULL; | ||
870 | } | ||
871 | } | ||
872 | if (!ready_task && !entry->ghost_tasks[CRIT_LEVEL_B]) { | ||
824 | ready_task = __take_ready(local_b_queue); | 873 | ready_task = __take_ready(local_b_queue); |
825 | if (!ready_task) | 874 | ready_crit = CRIT_LEVEL_C; |
875 | if (ready_task && is_ghost(ready_task)) { | ||
876 | link_task_to_cpu(ready_task, entry); | ||
877 | ready_task = NULL; | ||
878 | } | ||
879 | } | ||
880 | if (!ready_task && !entry->ghost_tasks[CRIT_LEVEL_C]) { | ||
826 | ready_task = __take_ready(&crit_c); | 881 | ready_task = __take_ready(&crit_c); |
827 | if (!ready_task) | 882 | ready_crit = CRIT_LEVEL_C; |
883 | if (ready_task && is_ghost(ready_task)) { | ||
884 | link_task_to_cpu(ready_task, entry); | ||
885 | ready_task = NULL; | ||
886 | } | ||
887 | } | ||
888 | if (!ready_task && !entry->ghost_tasks[CRIT_LEVEL_D]) { | ||
828 | ready_task = __take_ready(&crit_d); | 889 | ready_task = __take_ready(&crit_d); |
890 | ready_crit = CRIT_LEVEL_D; | ||
891 | if (ready_task && is_ghost(ready_task)) { | ||
892 | link_task_to_cpu(ready_task, entry); | ||
893 | ready_task = NULL; | ||
894 | } | ||
895 | } | ||
896 | if (!ready_task) { | ||
897 | ready_crit = CRIT_LEVEL_D + 1; | ||
898 | } | ||
899 | for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { | ||
900 | requeue(entry->ghost_tasks[i]); | ||
901 | } | ||
829 | link_task_to_cpu(ready_task, entry); | 902 | link_task_to_cpu(ready_task, entry); |
830 | if (ready_task) | 903 | if (ready_task) |
831 | TRACE_TASK(ready_task, "Linked task inside scheduler\n"); | 904 | TRACE_TASK(ready_task, |
905 | "Linked task inside scheduler\n"); | ||
832 | } | 906 | } |
833 | 907 | ||
834 | /* The final scheduling decision. Do we need to switch for some reason? | 908 | /* The final scheduling decision. Do we need to switch for some reason? |
@@ -1061,17 +1135,20 @@ static int __init init_mc(void) | |||
1061 | { | 1135 | { |
1062 | int cpu; | 1136 | int cpu; |
1063 | int i; | 1137 | int i; |
1064 | cpu_entry_t *entry; | 1138 | cpu_entry_t *entry_c, *entry_d; |
1065 | 1139 | ||
1066 | bheap_init(&mc_cpu_heap); | 1140 | bheap_init(&mc_cpu_heap_c); |
1141 | bheap_init(&mc_cpu_heap_d); | ||
1067 | /* initialize CPU state */ | 1142 | /* initialize CPU state */ |
1068 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1143 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1069 | entry = &per_cpu(mc_cpu_entries, cpu); | 1144 | entry = &per_cpu(mc_cpu_entries, cpu); |
1070 | mc_cpus[cpu] = entry; | 1145 | mc_cpus[cpu] = entry; |
1071 | atomic_set(&entry->will_schedule, 0); | 1146 | atomic_set(&entry->will_schedule, 0); |
1072 | entry->cpu = cpu; | 1147 | entry->cpu = cpu; |
1073 | entry->hn = &mc_heap_node[cpu]; | 1148 | entry->hn_c = &mc_heap_node_c[cpu]; |
1074 | bheap_node_init(&entry->hn, entry); | 1149 | entry->hn_d = &mc_heap_node_d[cpu]; |
1150 | bheap_node_init(&entry->hn_c, entry); | ||
1151 | bheap_node_init(&entry->hn_d, entry); | ||
1075 | } | 1152 | } |
1076 | mc_edf_domain_init(&crit_c, NULL, mc_release_jobs); | 1153 | mc_edf_domain_init(&crit_c, NULL, mc_release_jobs); |
1077 | mc_edf_domain_init(&crit_d, NULL, mc_release_jobs); | 1154 | mc_edf_domain_init(&crit_d, NULL, mc_release_jobs); |