diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-27 20:15:32 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-27 20:36:04 -0400 |
commit | 23a00b911b968c6290251913ecc34171836b4d32 (patch) | |
tree | f6c8289054d2961902931e89bdc11ccc01bc3a73 /litmus/sched_mc.c | |
parent | f21e1d0ef90c2e88ae6a563afc31ea601ed968c7 (diff) | |
parent | 609c45f71b7a2405230fd2f8436837d6389ec599 (diff) |
Merged with ce domains
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r-- | litmus/sched_mc.c | 224 |
1 files changed, 153 insertions, 71 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 30898246ea38..7b74958d1f4f 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -25,28 +25,7 @@ | |||
25 | #include <litmus/event_group.h> | 25 | #include <litmus/event_group.h> |
26 | 26 | ||
27 | #include <litmus/sched_mc.h> | 27 | #include <litmus/sched_mc.h> |
28 | 28 | #include <litmus/ce_domain.h> | |
29 | /** | ||
30 | * crit_entry_t - State of a CPU within each criticality level system. | ||
31 | * @level Criticality level of this entry | ||
32 | * @linked Logically running task, ghost or regular | ||
33 | * @domain Domain from which to draw tasks | ||
34 | * @usable False if a higher criticality task is running | ||
35 | * @timer For ghost task budget enforcement | ||
36 | * @node Used to sort crit_entries by preemptability in global domains | ||
37 | */ | ||
38 | struct crit_entry { | ||
39 | enum crit_level level; | ||
40 | struct task_struct* linked; | ||
41 | struct domain* domain; | ||
42 | int usable; | ||
43 | #ifdef CONFIG_MERGE_TIMERS | ||
44 | struct rt_event event; | ||
45 | #else | ||
46 | struct hrtimer timer; | ||
47 | #endif | ||
48 | struct bheap_node* node; | ||
49 | }; | ||
50 | 29 | ||
51 | /** | 30 | /** |
52 | * struct cpu_entry - State of a CPU for the entire MC system | 31 | * struct cpu_entry - State of a CPU for the entire MC system |
@@ -71,18 +50,6 @@ struct cpu_entry { | |||
71 | #endif | 50 | #endif |
72 | }; | 51 | }; |
73 | 52 | ||
74 | /** | ||
75 | * struct domain_data - Wrap domains with related CPU state | ||
76 | * @domain A domain for a criticality level | ||
77 | * @heap The preemptable heap of crit entries (for global domains) | ||
78 | * @crit_entry The crit entry for this domain (for partitioned domains) | ||
79 | */ | ||
80 | struct domain_data { | ||
81 | struct domain domain; | ||
82 | struct bheap* heap; | ||
83 | struct crit_entry* crit_entry; | ||
84 | }; | ||
85 | |||
86 | DEFINE_PER_CPU(struct cpu_entry, cpus); | 53 | DEFINE_PER_CPU(struct cpu_entry, cpus); |
87 | #ifdef CONFIG_RELEASE_MASTER | 54 | #ifdef CONFIG_RELEASE_MASTER |
88 | static int interrupt_cpu; | 55 | static int interrupt_cpu; |
@@ -101,9 +68,9 @@ static struct event_group* global_group; | |||
101 | #define crit_cpu(ce) \ | 68 | #define crit_cpu(ce) \ |
102 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) | 69 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) |
103 | #define TRACE_ENTRY(e, fmt, args...) \ | 70 | #define TRACE_ENTRY(e, fmt, args...) \ |
104 | TRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args) | 71 | STRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args) |
105 | #define TRACE_CRIT_ENTRY(ce, fmt, args...) \ | 72 | #define TRACE_CRIT_ENTRY(ce, fmt, args...) \ |
106 | TRACE("%s P%d, linked=" TS " " fmt "\n", \ | 73 | STRACE("%s P%d, linked=" TS " " fmt "\n", \ |
107 | (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args) | 74 | (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args) |
108 | 75 | ||
109 | /* | 76 | /* |
@@ -162,7 +129,7 @@ static inline struct crit_entry* lowest_prio_cpu(struct domain *dom) | |||
162 | static inline void cancel_ghost(struct crit_entry *ce) | 129 | static inline void cancel_ghost(struct crit_entry *ce) |
163 | { | 130 | { |
164 | #ifdef CONFIG_MERGE_TIMERS | 131 | #ifdef CONFIG_MERGE_TIMERS |
165 | cancel_event(&ce->event); | 132 | cancel_event(crit_cpu(ce)->event_group, &ce->event); |
166 | #else | 133 | #else |
167 | hrtimer_try_to_cancel(&ce->timer); | 134 | hrtimer_try_to_cancel(&ce->timer); |
168 | #endif | 135 | #endif |
@@ -174,9 +141,7 @@ static inline void cancel_ghost(struct crit_entry *ce) | |||
174 | static inline void arm_ghost(struct crit_entry *ce, lt_t fire) | 141 | static inline void arm_ghost(struct crit_entry *ce, lt_t fire) |
175 | { | 142 | { |
176 | #ifdef CONFIG_MERGE_TIMERS | 143 | #ifdef CONFIG_MERGE_TIMERS |
177 | struct event_group* group = (is_global(ce->domain)) ? | 144 | add_event(crit_cpu(ce)->event_group, &ce->event, fire); |
178 | global_group : crit_cpu(ce)->event_group; | ||
179 | add_event(group, &ce->event, fire); | ||
180 | #else | 145 | #else |
181 | __hrtimer_start_range_ns(&ce->timer, | 146 | __hrtimer_start_range_ns(&ce->timer, |
182 | ns_to_ktime(when_to_fire), | 147 | ns_to_ktime(when_to_fire), |
@@ -243,10 +208,14 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
243 | ce->linked = task; | 208 | ce->linked = task; |
244 | if (task) { | 209 | if (task) { |
245 | task->rt_param.linked_on = crit_cpu(ce)->cpu; | 210 | task->rt_param.linked_on = crit_cpu(ce)->cpu; |
246 | if (is_ghost(task)) { | 211 | if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { |
212 | /* There is a level-A timer that will force a | ||
213 | * preemption, so we don't set this for level-A | ||
214 | * tasks. | ||
215 | */ | ||
247 | /* Reset budget timer */ | 216 | /* Reset budget timer */ |
248 | task->se.exec_start = litmus_clock(); | 217 | task->se.exec_start = litmus_clock(); |
249 | when_to_fire = litmus_clock() + | 218 | when_to_fire = task->se.exec_start + |
250 | tsk_mc_data(task)->mc_job.ghost_budget; | 219 | tsk_mc_data(task)->mc_job.ghost_budget; |
251 | arm_ghost(ce, when_to_fire); | 220 | arm_ghost(ce, when_to_fire); |
252 | } | 221 | } |
@@ -261,6 +230,7 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
261 | } | 230 | } |
262 | 231 | ||
263 | static void check_for_preempt(struct domain*); | 232 | static void check_for_preempt(struct domain*); |
233 | |||
264 | /** | 234 | /** |
265 | * job_arrival() - Called when a task re-enters the system. | 235 | * job_arrival() - Called when a task re-enters the system. |
266 | * Caller must hold no locks. | 236 | * Caller must hold no locks. |
@@ -330,7 +300,7 @@ static void fix_global_levels(void) | |||
330 | struct list_head *pos, *safe; | 300 | struct list_head *pos, *safe; |
331 | struct task_struct *t; | 301 | struct task_struct *t; |
332 | 302 | ||
333 | TRACE("Fixing global levels\n"); | 303 | STRACE("Fixing global levels"); |
334 | for_each_online_cpu(c) { | 304 | for_each_online_cpu(c) { |
335 | e = &per_cpu(cpus, c); | 305 | e = &per_cpu(cpus, c); |
336 | raw_spin_lock(&e->redir_lock); | 306 | raw_spin_lock(&e->redir_lock); |
@@ -527,6 +497,7 @@ static void remove_from_all(struct task_struct* task) | |||
527 | */ | 497 | */ |
528 | static void job_completion(struct task_struct *task, int forced) | 498 | static void job_completion(struct task_struct *task, int forced) |
529 | { | 499 | { |
500 | lt_t now; | ||
530 | TRACE_MC_TASK(task, "Completed"); | 501 | TRACE_MC_TASK(task, "Completed"); |
531 | sched_trace_task_completion(task, forced); | 502 | sched_trace_task_completion(task, forced); |
532 | BUG_ON(!task); | 503 | BUG_ON(!task); |
@@ -535,8 +506,11 @@ static void job_completion(struct task_struct *task, int forced) | |||
535 | set_rt_flags(task, RT_F_SLEEP); | 506 | set_rt_flags(task, RT_F_SLEEP); |
536 | remove_from_all(task); | 507 | remove_from_all(task); |
537 | 508 | ||
509 | now = litmus_clock(); | ||
510 | |||
538 | /* If it's not a ghost job, do ghost job conversion */ | 511 | /* If it's not a ghost job, do ghost job conversion */ |
539 | if (!is_ghost(task)) { | 512 | if (!is_ghost(task)) { |
513 | TRACE_MC_TASK(task, "is not a ghost task"); | ||
540 | tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); | 514 | tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); |
541 | tsk_mc_data(task)->mc_job.is_ghost = 1; | 515 | tsk_mc_data(task)->mc_job.is_ghost = 1; |
542 | } | 516 | } |
@@ -546,6 +520,7 @@ static void job_completion(struct task_struct *task, int forced) | |||
546 | * conversion. Revert back to a normal task and complete the period. | 520 | * conversion. Revert back to a normal task and complete the period. |
547 | */ | 521 | */ |
548 | if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { | 522 | if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { |
523 | TRACE_MC_TASK(task, "has zero ghost budget"); | ||
549 | tsk_mc_data(task)->mc_job.is_ghost = 0; | 524 | tsk_mc_data(task)->mc_job.is_ghost = 0; |
550 | prepare_for_next_period(task); | 525 | prepare_for_next_period(task); |
551 | if (is_released(task, litmus_clock())) | 526 | if (is_released(task, litmus_clock())) |
@@ -573,9 +548,9 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
573 | unsigned long flags; | 548 | unsigned long flags; |
574 | struct task_struct *tmp = NULL; | 549 | struct task_struct *tmp = NULL; |
575 | 550 | ||
576 | |||
577 | local_irq_save(flags); | 551 | local_irq_save(flags); |
578 | TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); | 552 | TRACE("Ghost exhausted\n"); |
553 | TRACE_CRIT_ENTRY(ce, "Firing here"); | ||
579 | 554 | ||
580 | /* Due to race conditions, we cannot just set the linked | 555 | /* Due to race conditions, we cannot just set the linked |
581 | * task's budget to 0 as it may no longer be the task | 556 | * task's budget to 0 as it may no longer be the task |
@@ -601,6 +576,52 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
601 | #endif | 576 | #endif |
602 | } | 577 | } |
603 | 578 | ||
579 | static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) | ||
580 | { | ||
581 | struct ce_dom_data *ce_data = | ||
582 | container_of(timer, struct ce_dom_data, timer); | ||
583 | struct crit_entry *ce = &per_cpu(cpus, ce_data->cpu).crit_entries[CRIT_LEVEL_A]; | ||
584 | struct domain *dom = ce->domain; | ||
585 | struct task_struct *old_link = NULL; | ||
586 | unsigned long flags; | ||
587 | |||
588 | TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu); | ||
589 | |||
590 | local_irq_save(flags); | ||
591 | |||
592 | raw_spin_lock(dom->lock); | ||
593 | |||
594 | raw_spin_lock(&crit_cpu(ce)->lock); | ||
595 | if (ce->linked && | ||
596 | ce->linked == ce_data->should_schedule && | ||
597 | is_ghost(ce->linked)) | ||
598 | { | ||
599 | old_link = ce->linked; | ||
600 | tsk_mc_data(ce->linked)->mc_job.ghost_budget = 0; | ||
601 | link_task_to_crit(ce, NULL); | ||
602 | } | ||
603 | raw_spin_unlock(&crit_cpu(ce)->lock); | ||
604 | |||
605 | mc_ce_timer_callback_common(dom, timer); | ||
606 | |||
607 | /* job completion will check for preemptions by means of calling job | ||
608 | * arrival if the task is not blocked */ | ||
609 | if (NULL != old_link) { | ||
610 | STRACE("old_link " TS " so will call job completion\n", TA(old_link)); | ||
611 | raw_spin_unlock(dom->lock); | ||
612 | job_completion(old_link, 0); | ||
613 | } else { | ||
614 | STRACE("old_link was null, so will call check for preempt\n"); | ||
615 | raw_spin_unlock(dom->lock); | ||
616 | check_for_preempt(dom); | ||
617 | } | ||
618 | |||
619 | local_irq_restore(flags); | ||
620 | |||
621 | return HRTIMER_RESTART; | ||
622 | } | ||
623 | |||
624 | |||
604 | /** | 625 | /** |
605 | * mc_release_jobs() - Add heap of tasks to the system, check for preemptions. | 626 | * mc_release_jobs() - Add heap of tasks to the system, check for preemptions. |
606 | */ | 627 | */ |
@@ -611,7 +632,7 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
611 | struct domain *dom = get_task_domain(first); | 632 | struct domain *dom = get_task_domain(first); |
612 | 633 | ||
613 | raw_spin_lock_irqsave(dom->lock, flags); | 634 | raw_spin_lock_irqsave(dom->lock, flags); |
614 | TRACE_MC_TASK(first, "Jobs released"); | 635 | TRACE(TS "Jobs released\n", TA(first)); |
615 | __merge_ready(rt, tasks); | 636 | __merge_ready(rt, tasks); |
616 | check_for_preempt(dom); | 637 | check_for_preempt(dom); |
617 | raw_spin_unlock_irqrestore(dom->lock, flags); | 638 | raw_spin_unlock_irqrestore(dom->lock, flags); |
@@ -664,7 +685,7 @@ static void mc_task_wake_up(struct task_struct *task) | |||
664 | lt_t now = litmus_clock(); | 685 | lt_t now = litmus_clock(); |
665 | local_irq_save(flags); | 686 | local_irq_save(flags); |
666 | 687 | ||
667 | TRACE_MC_TASK(task, "Wakes up"); | 688 | TRACE(TS " wakes up\n", TA(task)); |
668 | if (is_tardy(task, now)) { | 689 | if (is_tardy(task, now)) { |
669 | /* Task missed its last release */ | 690 | /* Task missed its last release */ |
670 | release_at(task, now); | 691 | release_at(task, now); |
@@ -683,7 +704,7 @@ static void mc_task_block(struct task_struct *task) | |||
683 | { | 704 | { |
684 | unsigned long flags; | 705 | unsigned long flags; |
685 | local_irq_save(flags); | 706 | local_irq_save(flags); |
686 | TRACE_MC_TASK(task, "Block at %llu", litmus_clock()); | 707 | TRACE(TS " blocks\n", TA(task)); |
687 | remove_from_all(task); | 708 | remove_from_all(task); |
688 | local_irq_restore(flags); | 709 | local_irq_restore(flags); |
689 | } | 710 | } |
@@ -696,7 +717,7 @@ static void mc_task_exit(struct task_struct *task) | |||
696 | unsigned long flags; | 717 | unsigned long flags; |
697 | local_irq_save(flags); | 718 | local_irq_save(flags); |
698 | BUG_ON(!is_realtime(task)); | 719 | BUG_ON(!is_realtime(task)); |
699 | TRACE_MC_TASK(task, "RIP"); | 720 | TRACE(TS " RIP\n", TA(task)); |
700 | 721 | ||
701 | remove_from_all(task); | 722 | remove_from_all(task); |
702 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | 723 | if (tsk_rt(task)->scheduled_on != NO_CPU) { |
@@ -704,6 +725,9 @@ static void mc_task_exit(struct task_struct *task) | |||
704 | tsk_rt(task)->scheduled_on = NO_CPU; | 725 | tsk_rt(task)->scheduled_on = NO_CPU; |
705 | } | 726 | } |
706 | 727 | ||
728 | if (CRIT_LEVEL_A == tsk_mc_crit(task)) | ||
729 | mc_ce_task_exit_common(task); | ||
730 | |||
707 | local_irq_restore(flags); | 731 | local_irq_restore(flags); |
708 | } | 732 | } |
709 | 733 | ||
@@ -713,19 +737,30 @@ static void mc_task_exit(struct task_struct *task) | |||
713 | */ | 737 | */ |
714 | static long mc_admit_task(struct task_struct* task) | 738 | static long mc_admit_task(struct task_struct* task) |
715 | { | 739 | { |
740 | const enum crit_level crit = tsk_mc_crit(task); | ||
741 | long ret; | ||
716 | if (!tsk_mc_data(task)) { | 742 | if (!tsk_mc_data(task)) { |
717 | printk(KERN_WARNING "Tried to admit task with no criticality " | 743 | printk(KERN_WARNING "Tried to admit task with no criticality " |
718 | "level\n"); | 744 | "level\n"); |
719 | return -EINVAL; | 745 | ret = -EINVAL; |
746 | goto out; | ||
720 | } | 747 | } |
721 | if (tsk_mc_crit(task) < CRIT_LEVEL_C && get_partition(task) == NO_CPU) { | 748 | if (crit < CRIT_LEVEL_C && get_partition(task) == NO_CPU) { |
722 | printk(KERN_WARNING "Tried to admit partitioned task with no " | 749 | printk(KERN_WARNING "Tried to admit partitioned task with no " |
723 | "partition\n"); | 750 | "partition\n"); |
724 | return -EINVAL; | 751 | ret = -EINVAL; |
752 | goto out; | ||
753 | } | ||
754 | if (crit == CRIT_LEVEL_A) { | ||
755 | ret = mc_ce_admit_task_common(task); | ||
756 | if (ret) | ||
757 | goto out; | ||
725 | } | 758 | } |
726 | printk(KERN_INFO "Admitted task with criticality level %d\n", | 759 | printk(KERN_INFO "Admitted task with criticality level %d\n", |
727 | tsk_mc_crit(task)); | 760 | tsk_mc_crit(task)); |
728 | return 0; | 761 | ret = 0; |
762 | out: | ||
763 | return ret; | ||
729 | } | 764 | } |
730 | 765 | ||
731 | /** | 766 | /** |
@@ -761,11 +796,11 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
761 | 796 | ||
762 | if (exists) { | 797 | if (exists) { |
763 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 798 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
764 | TRACE_MC_TASK(prev, | 799 | TRACE(TS |
765 | "blocks:%d out_of_time:%d sleep:%d preempt:%d " | 800 | " blocks:%d out_of_time:%d sleep:%d preempt:%d " |
766 | "state:%d sig:%d global:%d", | 801 | "state:%d sig:%d global:%d\n", TA(prev), |
767 | blocks, out_of_time, sleep, preempt, | 802 | blocks, out_of_time, sleep, preempt, |
768 | prev->state, signal_pending(prev), global); | 803 | prev->state, signal_pending(prev), global); |
769 | } | 804 | } |
770 | raw_spin_unlock(&entry->lock); | 805 | raw_spin_unlock(&entry->lock); |
771 | 806 | ||
@@ -781,7 +816,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
781 | /* Any task which exhausts its budget or sleeps waiting for its next | 816 | /* Any task which exhausts its budget or sleeps waiting for its next |
782 | * period completes unless its execution has been forcibly stopped. | 817 | * period completes unless its execution has been forcibly stopped. |
783 | */ | 818 | */ |
784 | if ((out_of_time || sleep) && !blocks && !preempt) | 819 | if ((out_of_time || sleep) && !blocks)/* && !preempt)*/ |
785 | job_completion(entry->scheduled, !sleep); | 820 | job_completion(entry->scheduled, !sleep); |
786 | /* Global scheduled tasks must wait for a deschedule before they | 821 | /* Global scheduled tasks must wait for a deschedule before they |
787 | * can rejoin the global state. Rejoin them here. | 822 | * can rejoin the global state. Rejoin them here. |
@@ -836,10 +871,29 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
836 | if (next) | 871 | if (next) |
837 | TRACE_MC_TASK(next, "Scheduled at %llu", litmus_clock()); | 872 | TRACE_MC_TASK(next, "Scheduled at %llu", litmus_clock()); |
838 | else if (exists && !next) | 873 | else if (exists && !next) |
839 | TRACE("Becomes idle at %llu\n", litmus_clock()); | 874 | TRACE_ENTRY(entry, "Becomes idle at %llu", litmus_clock()); |
840 | return next; | 875 | return next; |
841 | } | 876 | } |
842 | 877 | ||
878 | /* | ||
879 | * This is the plugin's release at function, called by the release task-set | ||
880 | * system call. Other places in the file use the generic LITMUS release_at(), | ||
881 | * which is not this. | ||
882 | */ | ||
883 | void mc_release_at(struct task_struct *ts, lt_t start) | ||
884 | { | ||
885 | /* hack so that we can have CE timers start at the right time */ | ||
886 | if (CRIT_LEVEL_A == tsk_mc_crit(ts)) | ||
887 | mc_ce_release_at_common(ts, start); | ||
888 | else | ||
889 | release_at(ts, start); | ||
890 | } | ||
891 | |||
892 | long mc_deactivate_plugin(void) | ||
893 | { | ||
894 | return mc_ce_deactivate_plugin_common(); | ||
895 | } | ||
896 | |||
843 | /* ************************************************************************** | 897 | /* ************************************************************************** |
844 | * Initialization | 898 | * Initialization |
845 | * ************************************************************************** */ | 899 | * ************************************************************************** */ |
@@ -850,7 +904,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
850 | 904 | ||
851 | /* LVL-A */ | 905 | /* LVL-A */ |
852 | DEFINE_PER_CPU(struct domain_data, _mc_crit_a); | 906 | DEFINE_PER_CPU(struct domain_data, _mc_crit_a); |
853 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_a_rt); | 907 | DEFINE_PER_CPU(raw_spinlock_t, _mc_crit_a_lock); |
908 | DEFINE_PER_CPU(struct ce_dom_data, _mc_crit_a_ce_data); | ||
854 | /* LVL-B */ | 909 | /* LVL-B */ |
855 | DEFINE_PER_CPU(struct domain_data, _mc_crit_b); | 910 | DEFINE_PER_CPU(struct domain_data, _mc_crit_b); |
856 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt); | 911 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt); |
@@ -870,12 +925,31 @@ DEFINE_PER_CPU(struct event_group, _mc_groups); | |||
870 | 925 | ||
871 | static long mc_activate_plugin(void) | 926 | static long mc_activate_plugin(void) |
872 | { | 927 | { |
928 | struct domain_data *dom_data; | ||
929 | struct domain *dom; | ||
930 | struct domain_data *our_domains[NR_CPUS]; | ||
931 | int cpu, n = 0; | ||
932 | long ret; | ||
933 | |||
873 | #ifdef CONFIG_RELEASE_MASTER | 934 | #ifdef CONFIG_RELEASE_MASTER |
874 | interrupt_cpu = atomic_read(&release_master_cpu); | 935 | interrupt_cpu = atomic_read(&release_master_cpu); |
875 | if (interrupt_cpu == NO_CPU) | 936 | if (interrupt_cpu == NO_CPU) |
876 | interrupt_cpu = 0; | 937 | interrupt_cpu = 0; |
877 | #endif | 938 | #endif |
878 | return 0; | 939 | |
940 | for_each_online_cpu(cpu) { | ||
941 | BUG_ON(NR_CPUS <= n); | ||
942 | dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; | ||
943 | dom_data = domain_data(dom); | ||
944 | our_domains[cpu] = dom_data; | ||
945 | n++; | ||
946 | } | ||
947 | ret = mc_ce_set_domains(n, our_domains); | ||
948 | if (ret) | ||
949 | goto out; | ||
950 | ret = mc_ce_activate_plugin_common(); | ||
951 | out: | ||
952 | return ret; | ||
879 | } | 953 | } |
880 | 954 | ||
881 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | 955 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { |
@@ -888,6 +962,8 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | |||
888 | .task_block = mc_task_block, | 962 | .task_block = mc_task_block, |
889 | .admit_task = mc_admit_task, | 963 | .admit_task = mc_admit_task, |
890 | .activate_plugin = mc_activate_plugin, | 964 | .activate_plugin = mc_activate_plugin, |
965 | .release_at = mc_release_at, | ||
966 | .deactivate_plugin = mc_deactivate_plugin, | ||
891 | }; | 967 | }; |
892 | 968 | ||
893 | static void init_crit_entry(struct crit_entry *ce, enum crit_level level, | 969 | static void init_crit_entry(struct crit_entry *ce, enum crit_level level, |
@@ -958,13 +1034,15 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, | |||
958 | #endif | 1034 | #endif |
959 | } | 1035 | } |
960 | 1036 | ||
1037 | struct domain_data *ce_domain_for(int); | ||
961 | static int __init init_mc(void) | 1038 | static int __init init_mc(void) |
962 | { | 1039 | { |
963 | int cpu; | 1040 | int cpu; |
964 | struct cpu_entry *entry; | 1041 | struct cpu_entry *entry; |
965 | struct domain_data *dom_data; | 1042 | struct domain_data *dom_data; |
966 | rt_domain_t *rt; | 1043 | rt_domain_t *rt; |
967 | raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ | 1044 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ |
1045 | struct ce_dom_data *ce_data; | ||
968 | 1046 | ||
969 | for_each_online_cpu(cpu) { | 1047 | for_each_online_cpu(cpu) { |
970 | entry = &per_cpu(cpus, cpu); | 1048 | entry = &per_cpu(cpus, cpu); |
@@ -992,11 +1070,15 @@ static int __init init_mc(void) | |||
992 | 1070 | ||
993 | /* CRIT_LEVEL_A */ | 1071 | /* CRIT_LEVEL_A */ |
994 | dom_data = &per_cpu(_mc_crit_a, cpu); | 1072 | dom_data = &per_cpu(_mc_crit_a, cpu); |
995 | rt = &per_cpu(_mc_crit_a_rt, cpu); | 1073 | ce_data = &per_cpu(_mc_crit_a_ce_data, cpu); |
1074 | a_dom_lock = &per_cpu(_mc_crit_a_lock, cpu); | ||
1075 | raw_spin_lock_init(a_dom_lock); | ||
1076 | ce_domain_init(&dom_data->domain, | ||
1077 | a_dom_lock, ce_requeue, ce_peek_and_take_ready, | ||
1078 | ce_peek_and_take_ready, mc_preempt_needed, | ||
1079 | ce_higher_prio, ce_data, cpu, | ||
1080 | ce_timer_function); | ||
996 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); | 1081 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); |
997 | init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_A); | ||
998 | a_dom = dom_data->domain.lock; | ||
999 | raw_spin_lock_init(a_dom); | ||
1000 | dom_data->domain.name = "LVL-A"; | 1082 | dom_data->domain.name = "LVL-A"; |
1001 | 1083 | ||
1002 | /* CRIT_LEVEL_B */ | 1084 | /* CRIT_LEVEL_B */ |
@@ -1004,8 +1086,8 @@ static int __init init_mc(void) | |||
1004 | rt = &per_cpu(_mc_crit_b_rt, cpu); | 1086 | rt = &per_cpu(_mc_crit_b_rt, cpu); |
1005 | init_local_domain(entry, dom_data, CRIT_LEVEL_B); | 1087 | init_local_domain(entry, dom_data, CRIT_LEVEL_B); |
1006 | init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_B); | 1088 | init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_B); |
1007 | b_dom = dom_data->domain.lock; | 1089 | b_dom_lock = dom_data->domain.lock; |
1008 | raw_spin_lock_init(b_dom); | 1090 | raw_spin_lock_init(b_dom_lock); |
1009 | dom_data->domain.name = "LVL-B"; | 1091 | dom_data->domain.name = "LVL-B"; |
1010 | } | 1092 | } |
1011 | 1093 | ||
@@ -1022,8 +1104,8 @@ static int __init init_mc(void) | |||
1022 | init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, | 1104 | init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, |
1023 | &_mc_heap_c, _mc_nodes_c); | 1105 | &_mc_heap_c, _mc_nodes_c); |
1024 | init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, 0, CRIT_LEVEL_C); | 1106 | init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, 0, CRIT_LEVEL_C); |
1025 | c_dom = _mc_crit_c.domain.lock; | 1107 | c_dom_lock = _mc_crit_c.domain.lock; |
1026 | raw_spin_lock_init(c_dom); | 1108 | raw_spin_lock_init(c_dom_lock); |
1027 | _mc_crit_c.domain.name = "LVL-C"; | 1109 | _mc_crit_c.domain.name = "LVL-C"; |
1028 | 1110 | ||
1029 | return register_sched_plugin(&mc_plugin); | 1111 | return register_sched_plugin(&mc_plugin); |