diff options
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r-- | litmus/sched_mc.c | 147 |
1 files changed, 62 insertions, 85 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index f24c51653fe2..8c2238c6bf43 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /** | 1 | /* |
2 | * litmus/sched_mc.c | 2 | * litmus/sched_mc.c |
3 | * | 3 | * |
4 | * Implementation of the Mixed Criticality scheduling algorithm. | 4 | * Implementation of the Mixed Criticality scheduling algorithm. |
@@ -270,13 +270,13 @@ static void update_ghost_time(struct task_struct *p) | |||
270 | delta = 0; | 270 | delta = 0; |
271 | TRACE_MC_TASK(p, "WARNING: negative time delta\n"); | 271 | TRACE_MC_TASK(p, "WARNING: negative time delta\n"); |
272 | } | 272 | } |
273 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { | 273 | if (budget_remaining(p) <= delta) { |
274 | TRACE_MC_TASK(p, "Ghost job could have ended\n"); | 274 | TRACE_MC_TASK(p, "Ghost job could have ended\n"); |
275 | tsk_mc_data(p)->mc_job.ghost_budget = 0; | 275 | tsk_rt(p)->job_params.exec_time = get_exec_cost(p); |
276 | p->se.exec_start = clock; | 276 | p->se.exec_start = clock; |
277 | } else { | 277 | } else { |
278 | TRACE_MC_TASK(p, "Ghost job updated, but didn't finish\n"); | 278 | TRACE_MC_TASK(p, "Ghost job updated, but didn't finish\n"); |
279 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; | 279 | tsk_rt(p)->job_params.exec_time += delta; |
280 | p->se.exec_start = clock; | 280 | p->se.exec_start = clock; |
281 | } | 281 | } |
282 | } | 282 | } |
@@ -302,13 +302,13 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
302 | ce->linked->rt_param.linked_on = NO_CPU; | 302 | ce->linked->rt_param.linked_on = NO_CPU; |
303 | if (is_ghost(ce->linked)) { | 303 | if (is_ghost(ce->linked)) { |
304 | cancel_ghost(ce); | 304 | cancel_ghost(ce); |
305 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) { | 305 | if (!budget_exhausted(ce->linked)) { |
306 | /* Job isn't finished, so do accounting */ | 306 | /* Job isn't finished, so do accounting */ |
307 | update_ghost_time(ce->linked); | 307 | update_ghost_time(ce->linked); |
308 | } | 308 | } |
309 | } | 309 | } |
310 | sched_trace_server_switch_away(sid(ce), 0, ce->linked->pid, | 310 | sched_trace_server_switch_away(sid(ce), 0, -ce->linked->pid, |
311 | tsk_rt(ce->linked)->job_params.job_no); | 311 | get_rt_job(ce->linked)); |
312 | } | 312 | } |
313 | 313 | ||
314 | /* Actually link task */ | 314 | /* Actually link task */ |
@@ -321,15 +321,11 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
321 | * tasks. Otherwise reset the budget timer. | 321 | * tasks. Otherwise reset the budget timer. |
322 | */ | 322 | */ |
323 | task->se.exec_start = litmus_clock(); | 323 | task->se.exec_start = litmus_clock(); |
324 | when_to_fire = task->se.exec_start + | 324 | when_to_fire = task->se.exec_start + budget_remaining(task); |
325 | tsk_mc_data(task)->mc_job.ghost_budget; | ||
326 | arm_ghost(ce, when_to_fire); | 325 | arm_ghost(ce, when_to_fire); |
327 | |||
328 | sched_trace_server_switch_to(sid(ce), 0, 0, 0); | ||
329 | } else { | ||
330 | sched_trace_server_switch_to(sid(ce), 0, task->pid, | ||
331 | get_rt_job(ce->linked)); | ||
332 | } | 326 | } |
327 | sched_trace_server_switch_to(sid(ce), 0, -task->pid, | ||
328 | get_rt_job(ce->linked)); | ||
333 | } | 329 | } |
334 | } | 330 | } |
335 | 331 | ||
@@ -434,18 +430,19 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
434 | 430 | ||
435 | if (entry->linked) { | 431 | if (entry->linked) { |
436 | sched_trace_server_switch_away(-entry->linked->pid, | 432 | sched_trace_server_switch_away(-entry->linked->pid, |
437 | get_server_job(entry->linked), | 433 | get_rt_job(entry->linked), |
438 | entry->linked->pid, | 434 | entry->linked->pid, |
439 | get_rt_job(entry->linked)); | 435 | get_user_job(entry->linked)); |
440 | } | 436 | } |
441 | 437 | ||
442 | if (task){ | 438 | if (task) { |
443 | set_rt_flags(task, RT_F_RUNNING); | 439 | set_rt_flags(task, RT_F_RUNNING); |
444 | sched_trace_server_switch_to(-task->pid, | 440 | sched_trace_server_switch_to(-task->pid, |
445 | get_server_job(task), | 441 | get_rt_job(task), |
446 | task->pid, | 442 | task->pid, |
447 | get_rt_job(task)); | 443 | get_user_job(task)); |
448 | } | 444 | } |
445 | |||
449 | entry->linked = task; | 446 | entry->linked = task; |
450 | 447 | ||
451 | /* Higher criticality crit entries are now usable */ | 448 | /* Higher criticality crit entries are now usable */ |
@@ -480,7 +477,11 @@ static void preempt(struct domain *dom, struct crit_entry *ce) | |||
480 | } | 477 | } |
481 | update_crit_position(ce); | 478 | update_crit_position(ce); |
482 | 479 | ||
483 | /* Preempt actual execution if this is a running task */ | 480 | /* Preempt actual execution if this is a running task. |
481 | * We know that our task is higher priority than what is currently | ||
482 | * running on this CPU as otherwise the crit_entry would have | ||
483 | * been disabled and a preemption could not have occurred | ||
484 | */ | ||
484 | if (!is_ghost(task)) { | 485 | if (!is_ghost(task)) { |
485 | link_task_to_cpu(entry, task); | 486 | link_task_to_cpu(entry, task); |
486 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 487 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
@@ -564,7 +565,7 @@ static void check_for_preempt(struct domain *dom) | |||
564 | if (!can_use(ce)) | 565 | if (!can_use(ce)) |
565 | /* CPU disabled while locking! */ | 566 | /* CPU disabled while locking! */ |
566 | fix_crit_position(ce); | 567 | fix_crit_position(ce); |
567 | else if (dom->preempt_needed(dom, ce->linked)) | 568 | else if (mc_preempt_needed(dom, ce->linked)) |
568 | /* Success! Check for more preemptions */ | 569 | /* Success! Check for more preemptions */ |
569 | preempt(dom, ce); | 570 | preempt(dom, ce); |
570 | else { | 571 | else { |
@@ -582,7 +583,7 @@ static void check_for_preempt(struct domain *dom) | |||
582 | dom->peek_ready(dom); | 583 | dom->peek_ready(dom); |
583 | 584 | ||
584 | raw_spin_lock(&entry->lock); | 585 | raw_spin_lock(&entry->lock); |
585 | if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) { | 586 | if (can_use(ce) && mc_preempt_needed(dom, ce->linked)) { |
586 | preempt(dom, ce); | 587 | preempt(dom, ce); |
587 | update_crit_levels(entry); | 588 | update_crit_levels(entry); |
588 | } else { | 589 | } else { |
@@ -609,6 +610,7 @@ static void remove_from_all(struct task_struct* task) | |||
609 | 610 | ||
610 | /* Remove the task from any CPU state */ | 611 | /* Remove the task from any CPU state */ |
611 | if (task->rt_param.linked_on != NO_CPU) { | 612 | if (task->rt_param.linked_on != NO_CPU) { |
613 | TRACE_MC_TASK(task, "Linked to something\n"); | ||
612 | entry = &per_cpu(cpus, task->rt_param.linked_on); | 614 | entry = &per_cpu(cpus, task->rt_param.linked_on); |
613 | raw_spin_lock(&entry->lock); | 615 | raw_spin_lock(&entry->lock); |
614 | 616 | ||
@@ -618,7 +620,7 @@ static void remove_from_all(struct task_struct* task) | |||
618 | BUG_ON(ce->linked != task); | 620 | BUG_ON(ce->linked != task); |
619 | link_task_to_crit(ce, NULL); | 621 | link_task_to_crit(ce, NULL); |
620 | update_crit_position(ce); | 622 | update_crit_position(ce); |
621 | if (!is_ghost(task) && entry->linked == task) { | 623 | if (entry->linked == task) { |
622 | update = 1; | 624 | update = 1; |
623 | link_task_to_cpu(entry, NULL); | 625 | link_task_to_cpu(entry, NULL); |
624 | } | 626 | } |
@@ -646,41 +648,25 @@ static void remove_from_all(struct task_struct* task) | |||
646 | */ | 648 | */ |
647 | static void job_completion(struct task_struct *task, int forced) | 649 | static void job_completion(struct task_struct *task, int forced) |
648 | { | 650 | { |
649 | int behind; | ||
650 | TRACE_MC_TASK(task, "Completed\n"); | 651 | TRACE_MC_TASK(task, "Completed\n"); |
651 | 652 | ||
652 | /* Logically stop the task execution */ | 653 | /* Logically stop the task execution */ |
653 | set_rt_flags(task, RT_F_SLEEP); | 654 | set_rt_flags(task, RT_F_SLEEP); |
654 | remove_from_all(task); | 655 | remove_from_all(task); |
655 | 656 | ||
656 | /* Level-A tasks cannot ever get behind */ | 657 | if (!forced) { |
657 | behind = tsk_mc_crit(task) != CRIT_LEVEL_A && behind_server(task); | 658 | /* Userspace releases */ |
658 | 659 | sched_trace_task_completion(current, 0); | |
659 | if (!forced && !is_ghost(task)) { | 660 | setup_user_release(current, get_user_deadline(current)); |
660 | /* Task voluntarily ceased execution. Move on to next period */ | ||
661 | task_release(task); | ||
662 | sched_trace_task_completion(task, forced); | ||
663 | |||
664 | /* Convert to ghost job */ | ||
665 | tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); | ||
666 | tsk_mc_data(task)->mc_job.is_ghost = 1; | ||
667 | } | 661 | } |
668 | 662 | ||
669 | /* If the task has no ghost budget, convert back from ghost. | ||
670 | * If the task is behind, undo ghost conversion so that it | ||
671 | * can catch up. | ||
672 | */ | ||
673 | if (behind || tsk_mc_data(task)->mc_job.ghost_budget == 0) { | ||
674 | TRACE_MC_TASK(task, "Not a ghost task\n"); | ||
675 | tsk_mc_data(task)->mc_job.is_ghost = 0; | ||
676 | tsk_mc_data(task)->mc_job.ghost_budget = 0; | ||
677 | } | ||
678 | 663 | ||
679 | /* If server has run out of budget, wait until next release */ | 664 | /* If server has run out of budget, wait until next release |
680 | if (budget_exhausted(task)) { | 665 | * TODO: Level A does this independently and should not. |
681 | sched_trace_server_completion(-task->pid, | 666 | */ |
682 | get_server_job(task)); | 667 | if (budget_exhausted(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { |
683 | server_release(task); | 668 | sched_trace_server_completion(-task->pid, get_rt_job(task)); |
669 | prepare_for_next_period(task); | ||
684 | } | 670 | } |
685 | 671 | ||
686 | /* Requeue non-blocking tasks */ | 672 | /* Requeue non-blocking tasks */ |
@@ -706,7 +692,7 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
706 | 692 | ||
707 | local_irq_save(flags); | 693 | local_irq_save(flags); |
708 | TRACE("Ghost exhausted\n"); | 694 | TRACE("Ghost exhausted\n"); |
709 | TRACE_CRIT_ENTRY(ce, "Firing here\n"); | 695 | TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock()); |
710 | 696 | ||
711 | /* Due to race conditions, we cannot just set the linked | 697 | /* Due to race conditions, we cannot just set the linked |
712 | * task's budget to 0 as it may no longer be the task | 698 | * task's budget to 0 as it may no longer be the task |
@@ -716,14 +702,14 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
716 | raw_spin_lock(&crit_cpu(ce)->lock); | 702 | raw_spin_lock(&crit_cpu(ce)->lock); |
717 | if (ce->linked && is_ghost(ce->linked)) { | 703 | if (ce->linked && is_ghost(ce->linked)) { |
718 | update_ghost_time(ce->linked); | 704 | update_ghost_time(ce->linked); |
719 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget == 0) { | 705 | if (budget_exhausted(ce->linked)) { |
720 | tmp = ce->linked; | 706 | tmp = ce->linked; |
721 | } | 707 | } |
722 | } | 708 | } |
723 | raw_spin_unlock(&crit_cpu(ce)->lock); | 709 | raw_spin_unlock(&crit_cpu(ce)->lock); |
724 | 710 | ||
725 | if (tmp) | 711 | if (tmp) |
726 | job_completion(tmp, 0); | 712 | job_completion(tmp, 1); |
727 | 713 | ||
728 | local_irq_restore(flags); | 714 | local_irq_restore(flags); |
729 | #ifndef CONFIG_MERGE_TIMERS | 715 | #ifndef CONFIG_MERGE_TIMERS |
@@ -748,12 +734,11 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data) | |||
748 | 734 | ||
749 | raw_spin_lock(&crit_cpu(ce)->lock); | 735 | raw_spin_lock(&crit_cpu(ce)->lock); |
750 | if (ce->linked && | 736 | if (ce->linked && |
751 | ce->linked == ce_data->should_schedule && | 737 | ce->linked == ce_data->should_schedule) |
752 | is_ghost(ce->linked)) | ||
753 | { | 738 | { |
754 | old_link = ce->linked; | 739 | old_link = ce->linked; |
755 | tsk_mc_data(ce->linked)->mc_job.ghost_budget = 0; | ||
756 | link_task_to_crit(ce, NULL); | 740 | link_task_to_crit(ce, NULL); |
741 | mc_ce_job_completion(dom, old_link); | ||
757 | } | 742 | } |
758 | raw_spin_unlock(&crit_cpu(ce)->lock); | 743 | raw_spin_unlock(&crit_cpu(ce)->lock); |
759 | 744 | ||
@@ -764,7 +749,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data) | |||
764 | if (NULL != old_link) { | 749 | if (NULL != old_link) { |
765 | STRACE("old_link " TS " so will call job completion\n", TA(old_link)); | 750 | STRACE("old_link " TS " so will call job completion\n", TA(old_link)); |
766 | raw_spin_unlock(dom->lock); | 751 | raw_spin_unlock(dom->lock); |
767 | job_completion(old_link, 0); | 752 | job_completion(old_link, 1); |
768 | } else { | 753 | } else { |
769 | STRACE("old_link was null, so will call check for preempt\n"); | 754 | STRACE("old_link was null, so will call check for preempt\n"); |
770 | raw_spin_unlock(dom->lock); | 755 | raw_spin_unlock(dom->lock); |
@@ -837,12 +822,13 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
837 | unsigned long flags; | 822 | unsigned long flags; |
838 | struct cpu_entry* entry; | 823 | struct cpu_entry* entry; |
839 | enum crit_level level = tsk_mc_crit(t); | 824 | enum crit_level level = tsk_mc_crit(t); |
840 | char name[TASK_COMM_LEN]; | ||
841 | strcpy(name, "rtspin"); | ||
842 | 825 | ||
843 | local_irq_save(flags); | 826 | local_irq_save(flags); |
844 | TRACE("New mixed criticality task %d\n", t->pid); | 827 | TRACE("New mixed criticality task %d\n", t->pid); |
845 | 828 | ||
829 | if (level == CRIT_LEVEL_A) | ||
830 | get_rt_relative_deadline(t) = get_exec_cost(t); | ||
831 | |||
846 | /* Assign domain */ | 832 | /* Assign domain */ |
847 | if (level < CRIT_LEVEL_C) | 833 | if (level < CRIT_LEVEL_C) |
848 | entry = &per_cpu(cpus, get_partition(t)); | 834 | entry = &per_cpu(cpus, get_partition(t)); |
@@ -850,19 +836,15 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
850 | entry = &per_cpu(cpus, task_cpu(t)); | 836 | entry = &per_cpu(cpus, task_cpu(t)); |
851 | t->rt_param._domain = entry->crit_entries[level].domain; | 837 | t->rt_param._domain = entry->crit_entries[level].domain; |
852 | 838 | ||
853 | if (budget_enforced(t)) { | 839 | /* Userspace and kernelspace view of task state may differ. |
854 | /* Userspace and kernelspace view of task state may differ. | 840 | * Model kernel state as an additional container |
855 | * Model kernel state as an additional container | 841 | */ |
856 | */ | 842 | sched_trace_container_param(t->pid, t->comm); |
857 | sched_trace_container_param(t->pid, name); | 843 | sched_trace_server_param(-t->pid, t->pid, |
858 | sched_trace_server_param(-t->pid, t->pid, | 844 | get_exec_cost(t), get_rt_period(t)); |
859 | get_exec_cost(t), get_rt_period(t)); | ||
860 | } | ||
861 | 845 | ||
862 | /* Setup job params */ | 846 | /* Setup job params */ |
863 | release_at(t, litmus_clock()); | 847 | release_at(t, litmus_clock()); |
864 | tsk_mc_data(t)->mc_job.ghost_budget = 0; | ||
865 | tsk_mc_data(t)->mc_job.is_ghost = 0; | ||
866 | if (running) { | 848 | if (running) { |
867 | BUG_ON(entry->scheduled); | 849 | BUG_ON(entry->scheduled); |
868 | entry->scheduled = t; | 850 | entry->scheduled = t; |
@@ -872,7 +854,6 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
872 | } | 854 | } |
873 | t->rt_param.linked_on = NO_CPU; | 855 | t->rt_param.linked_on = NO_CPU; |
874 | 856 | ||
875 | |||
876 | job_arrival(t); | 857 | job_arrival(t); |
877 | 858 | ||
878 | local_irq_restore(flags); | 859 | local_irq_restore(flags); |
@@ -1057,12 +1038,14 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1057 | raw_spin_lock(&entry->lock); | 1038 | raw_spin_lock(&entry->lock); |
1058 | 1039 | ||
1059 | if (!entry->linked && !ce->linked && dtask && can_use(ce)) { | 1040 | if (!entry->linked && !ce->linked && dtask && can_use(ce)) { |
1041 | /* Pop dtask */ | ||
1060 | dom->take_ready(dom); | 1042 | dom->take_ready(dom); |
1043 | |||
1061 | link_task_to_crit(ce, dtask); | 1044 | link_task_to_crit(ce, dtask); |
1062 | update_crit_position(ce); | 1045 | update_crit_position(ce); |
1063 | ready_task = (is_ghost(dtask)) ? NULL : dtask; | ||
1064 | 1046 | ||
1065 | /* Task found! */ | 1047 | /* Actual running task found */ |
1048 | ready_task = (is_ghost(dtask)) ? NULL : dtask; | ||
1066 | if (ready_task) { | 1049 | if (ready_task) { |
1067 | link_task_to_cpu(entry, ready_task); | 1050 | link_task_to_cpu(entry, ready_task); |
1068 | raw_spin_unlock(dom->lock); | 1051 | raw_spin_unlock(dom->lock); |
@@ -1084,6 +1067,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1084 | raw_spin_unlock(&entry->lock); | 1067 | raw_spin_unlock(&entry->lock); |
1085 | local_irq_restore(flags); | 1068 | local_irq_restore(flags); |
1086 | if (next) { | 1069 | if (next) { |
1070 | BUG_ON(!get_rt_job(next)); | ||
1087 | TRACE_MC_TASK(next, "Picked this task\n"); | 1071 | TRACE_MC_TASK(next, "Picked this task\n"); |
1088 | } else if (exists && !next) | 1072 | } else if (exists && !next) |
1089 | TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock()); | 1073 | TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock()); |
@@ -1105,11 +1089,7 @@ void mc_finish_switch(struct task_struct *prev) | |||
1105 | */ | 1089 | */ |
1106 | void mc_release_at(struct task_struct *ts, lt_t start) | 1090 | void mc_release_at(struct task_struct *ts, lt_t start) |
1107 | { | 1091 | { |
1108 | /* hack so that we can have CE timers start at the right time */ | 1092 | release_at(ts, start); |
1109 | if (CRIT_LEVEL_A == tsk_mc_crit(ts)) | ||
1110 | mc_ce_release_at_common(ts, start); | ||
1111 | else | ||
1112 | release_at(ts, start); | ||
1113 | } | 1093 | } |
1114 | 1094 | ||
1115 | long mc_deactivate_plugin(void) | 1095 | long mc_deactivate_plugin(void) |
@@ -1191,7 +1171,7 @@ static void mc_release_ts(lt_t time) | |||
1191 | strcpy(name, "LVL-A"); | 1171 | strcpy(name, "LVL-A"); |
1192 | for_each_online_cpu(cpu) { | 1172 | for_each_online_cpu(cpu) { |
1193 | entry = &per_cpu(cpus, cpu); | 1173 | entry = &per_cpu(cpus, cpu); |
1194 | trace_litmus_container_param(++cont_id, (const char*)&name); | 1174 | sched_trace_container_param(++cont_id, (const char*)&name); |
1195 | ce = &entry->crit_entries[level]; | 1175 | ce = &entry->crit_entries[level]; |
1196 | sched_trace_server_param(sid(ce), cont_id, 0, 0); | 1176 | sched_trace_server_param(sid(ce), cont_id, 0, 0); |
1197 | } | 1177 | } |
@@ -1200,22 +1180,21 @@ static void mc_release_ts(lt_t time) | |||
1200 | strcpy(name, "LVL-B"); | 1180 | strcpy(name, "LVL-B"); |
1201 | for_each_online_cpu(cpu) { | 1181 | for_each_online_cpu(cpu) { |
1202 | entry = &per_cpu(cpus, cpu); | 1182 | entry = &per_cpu(cpus, cpu); |
1203 | trace_litmus_container_param(++cont_id, (const char*)&name); | 1183 | sched_trace_container_param(++cont_id, (const char*)&name); |
1204 | ce = &entry->crit_entries[level]; | 1184 | ce = &entry->crit_entries[level]; |
1205 | sched_trace_server_param(sid(ce), cont_id, 0, 0); | 1185 | sched_trace_server_param(sid(ce), cont_id, 0, 0); |
1206 | } | 1186 | } |
1207 | 1187 | ||
1208 | level = CRIT_LEVEL_C; | 1188 | level = CRIT_LEVEL_C; |
1209 | strcpy(name, "LVL-C"); | 1189 | strcpy(name, "LVL-C"); |
1210 | trace_litmus_container_param(++cont_id, (const char*)&name); | 1190 | sched_trace_container_param(++cont_id, (const char*)&name); |
1211 | for_each_online_cpu(cpu) { | 1191 | for_each_online_cpu(cpu) { |
1212 | entry = &per_cpu(cpus, cpu); | 1192 | entry = &per_cpu(cpus, cpu); |
1213 | ce = &entry->crit_entries[level]; | 1193 | ce = &entry->crit_entries[level]; |
1214 | sched_trace_server_param(sid(ce), cont_id, 0, 0); | 1194 | sched_trace_server_param(sid(ce), cont_id, 0, 0); |
1215 | } | 1195 | } |
1216 | 1196 | ||
1217 | 1197 | mc_ce_release_at_common(NULL, time); | |
1218 | |||
1219 | } | 1198 | } |
1220 | 1199 | ||
1221 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | 1200 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { |
@@ -1287,8 +1266,7 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, | |||
1287 | enum crit_level prio, int is_partitioned, int cpu) | 1266 | enum crit_level prio, int is_partitioned, int cpu) |
1288 | { | 1267 | { |
1289 | pd_domain_init(dom, rt, edf_ready_order, NULL, | 1268 | pd_domain_init(dom, rt, edf_ready_order, NULL, |
1290 | mc_release_jobs, mc_preempt_needed, | 1269 | mc_release_jobs, edf_higher_prio); |
1291 | edf_higher_prio); | ||
1292 | rt->level = prio; | 1270 | rt->level = prio; |
1293 | #if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS) | 1271 | #if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS) |
1294 | /* All timers are on one CPU and release-master is using the event | 1272 | /* All timers are on one CPU and release-master is using the event |
@@ -1345,8 +1323,7 @@ static int __init init_mc(void) | |||
1345 | raw_spin_lock_init(a_dom_lock); | 1323 | raw_spin_lock_init(a_dom_lock); |
1346 | ce_domain_init(&dom_data->domain, | 1324 | ce_domain_init(&dom_data->domain, |
1347 | a_dom_lock, ce_requeue, ce_peek_and_take_ready, | 1325 | a_dom_lock, ce_requeue, ce_peek_and_take_ready, |
1348 | ce_peek_and_take_ready, mc_preempt_needed, | 1326 | ce_peek_and_take_ready, ce_higher_prio, ce_data, cpu, |
1349 | ce_higher_prio, ce_data, cpu, | ||
1350 | ce_timer_function); | 1327 | ce_timer_function); |
1351 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); | 1328 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); |
1352 | dom_data->domain.name = "LVL-A"; | 1329 | dom_data->domain.name = "LVL-A"; |