aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r--litmus/sched_mc.c135
1 files changed, 75 insertions, 60 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 7b74958d1f4f..17f84c9eba79 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -27,6 +27,8 @@
27#include <litmus/sched_mc.h> 27#include <litmus/sched_mc.h>
28#include <litmus/ce_domain.h> 28#include <litmus/ce_domain.h>
29 29
30/* XXX TODO Do we ever want to move level-A timers? */
31
30/** 32/**
31 * struct cpu_entry - State of a CPU for the entire MC system 33 * struct cpu_entry - State of a CPU for the entire MC system
32 * @cpu CPU id 34 * @cpu CPU id
@@ -34,6 +36,9 @@
34 * @linked Task that should be running / is logically running 36 * @linked Task that should be running / is logically running
35 * @lock For serialization 37 * @lock For serialization
36 * @crit_entries Array of CPU state per criticality level 38 * @crit_entries Array of CPU state per criticality level
39 * @redir List of redirected work for this CPU.
40 * @redir_lock Lock for @redir.
41 * @event_group Event group for timer merging.
37 */ 42 */
38struct cpu_entry { 43struct cpu_entry {
39 int cpu; 44 int cpu;
@@ -45,18 +50,12 @@ struct cpu_entry {
45 struct list_head redir; 50 struct list_head redir;
46 raw_spinlock_t redir_lock; 51 raw_spinlock_t redir_lock;
47#endif 52#endif
48#ifdef CONFIG_MERGE_TIMERS
49 struct event_group* event_group;
50#endif
51}; 53};
52 54
53DEFINE_PER_CPU(struct cpu_entry, cpus); 55DEFINE_PER_CPU(struct cpu_entry, cpus);
54#ifdef CONFIG_RELEASE_MASTER 56#ifdef CONFIG_RELEASE_MASTER
55static int interrupt_cpu; 57static int interrupt_cpu;
56#endif 58#endif
57#ifdef CONFIG_MERGE_TIMERS
58static struct event_group* global_group;
59#endif
60 59
61#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 60#define domain_data(dom) (container_of(dom, struct domain_data, domain))
62#define is_global(dom) (domain_data(dom)->heap) 61#define is_global(dom) (domain_data(dom)->heap)
@@ -67,6 +66,7 @@ static struct event_group* global_group;
67 (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) 66 (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1)
68#define crit_cpu(ce) \ 67#define crit_cpu(ce) \
69 (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) 68 (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries))
69#define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level])
70#define TRACE_ENTRY(e, fmt, args...) \ 70#define TRACE_ENTRY(e, fmt, args...) \
71 STRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args) 71 STRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args)
72#define TRACE_CRIT_ENTRY(ce, fmt, args...) \ 72#define TRACE_CRIT_ENTRY(ce, fmt, args...) \
@@ -129,7 +129,7 @@ static inline struct crit_entry* lowest_prio_cpu(struct domain *dom)
129static inline void cancel_ghost(struct crit_entry *ce) 129static inline void cancel_ghost(struct crit_entry *ce)
130{ 130{
131#ifdef CONFIG_MERGE_TIMERS 131#ifdef CONFIG_MERGE_TIMERS
132 cancel_event(crit_cpu(ce)->event_group, &ce->event); 132 cancel_event(&ce->event);
133#else 133#else
134 hrtimer_try_to_cancel(&ce->timer); 134 hrtimer_try_to_cancel(&ce->timer);
135#endif 135#endif
@@ -141,10 +141,10 @@ static inline void cancel_ghost(struct crit_entry *ce)
141static inline void arm_ghost(struct crit_entry *ce, lt_t fire) 141static inline void arm_ghost(struct crit_entry *ce, lt_t fire)
142{ 142{
143#ifdef CONFIG_MERGE_TIMERS 143#ifdef CONFIG_MERGE_TIMERS
144 add_event(crit_cpu(ce)->event_group, &ce->event, fire); 144 add_event(get_event_group_for(crit_cpu(ce)->cpu), &ce->event, fire);
145#else 145#else
146 __hrtimer_start_range_ns(&ce->timer, 146 __hrtimer_start_range_ns(&ce->timer,
147 ns_to_ktime(when_to_fire), 147 ns_to_ktime(fire),
148 0 /* delta */, 148 0 /* delta */,
149 HRTIMER_MODE_ABS_PINNED, 149 HRTIMER_MODE_ABS_PINNED,
150 0 /* no wakeup */); 150 0 /* no wakeup */);
@@ -270,10 +270,8 @@ static void low_prio_arrival(struct task_struct *task)
270 if (!can_requeue(task)) return; 270 if (!can_requeue(task)) return;
271 271
272#ifdef CONFIG_PLUGIN_MC_REDIRECT 272#ifdef CONFIG_PLUGIN_MC_REDIRECT
273#ifndef CONFIG_PLUGIN_MC_REDIRECT_ALL
274 if (!is_global_task(task)) 273 if (!is_global_task(task))
275 goto arrive; 274 goto arrive;
276#endif
277 if (smp_processor_id() != interrupt_cpu) { 275 if (smp_processor_id() != interrupt_cpu) {
278 entry = &__get_cpu_var(cpus); 276 entry = &__get_cpu_var(cpus);
279 raw_spin_lock(&entry->redir_lock); 277 raw_spin_lock(&entry->redir_lock);
@@ -284,7 +282,7 @@ static void low_prio_arrival(struct task_struct *task)
284 } else 282 } else
285#endif 283#endif
286 { 284 {
287 arrive: 285arrive:
288 job_arrival(task); 286 job_arrival(task);
289 } 287 }
290} 288}
@@ -576,19 +574,19 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
576#endif 574#endif
577} 575}
578 576
579static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) 577/*
578 * The MC-CE common timer callback code for merged and non-merged timers.
579 * Returns the next time the timer should fire.
580 */
581static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
580{ 582{
581 struct ce_dom_data *ce_data = 583 struct crit_entry *ce = get_crit_entry_for(ce_data->cpu, CRIT_LEVEL_A);
582 container_of(timer, struct ce_dom_data, timer);
583 struct crit_entry *ce = &per_cpu(cpus, ce_data->cpu).crit_entries[CRIT_LEVEL_A];
584 struct domain *dom = ce->domain; 584 struct domain *dom = ce->domain;
585 struct task_struct *old_link = NULL; 585 struct task_struct *old_link = NULL;
586 unsigned long flags; 586 lt_t next_timer_abs;
587 587
588 TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu); 588 TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu);
589 589
590 local_irq_save(flags);
591
592 raw_spin_lock(dom->lock); 590 raw_spin_lock(dom->lock);
593 591
594 raw_spin_lock(&crit_cpu(ce)->lock); 592 raw_spin_lock(&crit_cpu(ce)->lock);
@@ -602,7 +600,7 @@ static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
602 } 600 }
603 raw_spin_unlock(&crit_cpu(ce)->lock); 601 raw_spin_unlock(&crit_cpu(ce)->lock);
604 602
605 mc_ce_timer_callback_common(dom, timer); 603 next_timer_abs = mc_ce_timer_callback_common(dom);
606 604
607 /* job completion will check for preemptions by means of calling job 605 /* job completion will check for preemptions by means of calling job
608 * arrival if the task is not blocked */ 606 * arrival if the task is not blocked */
@@ -615,11 +613,38 @@ static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
615 raw_spin_unlock(dom->lock); 613 raw_spin_unlock(dom->lock);
616 check_for_preempt(dom); 614 check_for_preempt(dom);
617 } 615 }
616 return next_timer_abs;
617}
618 618
619#ifdef CONFIG_MERGE_TIMERS
620static void ce_timer_function(struct rt_event *e)
621{
622 struct ce_dom_data *ce_data =
623 container_of(e, struct ce_dom_data, event);
624 struct event_group *event_group = get_event_group_for(ce_data->cpu);
625 unsigned long flags;
626 lt_t next_timer_abs;
627
628 local_irq_save(flags);
629 next_timer_abs = __ce_timer_function(ce_data);
630 add_event(event_group, e, next_timer_abs);
619 local_irq_restore(flags); 631 local_irq_restore(flags);
632}
633#else /* else to CONFIG_MERGE_TIMERS */
634static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
635{
636 struct ce_dom_data *ce_data =
637 container_of(timer, struct ce_dom_data, timer);
638 unsigned long flags;
639 lt_t next_timer_abs;
620 640
641 local_irq_save(flags);
642 next_timer_abs = __ce_timer_function(ce_data);
643 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
644 local_irq_restore(flags);
621 return HRTIMER_RESTART; 645 return HRTIMER_RESTART;
622} 646}
647#endif /* CONFIG_MERGE_TIMERS */
623 648
624 649
625/** 650/**
@@ -915,14 +940,6 @@ static rt_domain_t _mc_crit_c_rt;
915struct bheap _mc_heap_c; 940struct bheap _mc_heap_c;
916struct bheap_node _mc_nodes_c[NR_CPUS]; 941struct bheap_node _mc_nodes_c[NR_CPUS];
917 942
918#ifdef CONFIG_MERGE_TIMERS
919#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
920struct event_group _mc_group;
921#else
922DEFINE_PER_CPU(struct event_group, _mc_groups);
923#endif
924#endif
925
926static long mc_activate_plugin(void) 943static long mc_activate_plugin(void)
927{ 944{
928 struct domain_data *dom_data; 945 struct domain_data *dom_data;
@@ -933,8 +950,14 @@ static long mc_activate_plugin(void)
933 950
934#ifdef CONFIG_RELEASE_MASTER 951#ifdef CONFIG_RELEASE_MASTER
935 interrupt_cpu = atomic_read(&release_master_cpu); 952 interrupt_cpu = atomic_read(&release_master_cpu);
936 if (interrupt_cpu == NO_CPU) 953#if defined(CONFIG_PLUGIN_MC_REDIRECT) || \
937 interrupt_cpu = 0; 954 (defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS))
955 if (NO_CPU == interrupt_cpu) {
956 printk(KERN_ERR "LITMUS-MC: need a release master\n");
957 ret = -EINVAL;
958 goto out;
959 }
960#endif
938#endif 961#endif
939 962
940 for_each_online_cpu(cpu) { 963 for_each_online_cpu(cpu) {
@@ -1016,20 +1039,30 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev
1016} 1039}
1017 1040
1018static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, 1041static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt,
1019 int timer_cpu, int prio) 1042 int prio, int is_partitioned, int cpu)
1020{ 1043{
1021 pd_domain_init(dom, rt, edf_ready_order, NULL, 1044 pd_domain_init(dom, rt, edf_ready_order, NULL,
1022 mc_release_jobs, mc_preempt_needed, 1045 mc_release_jobs, mc_preempt_needed,
1023 edf_higher_prio); 1046 edf_higher_prio);
1024#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER 1047#if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS)
1025#ifdef CONFIG_MERGE_TIMERS 1048 /* All timers are on one CPU and release-master is using the event
1026 rt->event_group = &_mc_group; 1049 * merging interface as well. */
1050 BUG_ON(NO_CPU == interrupt_cpu);
1051 rt->event_group = get_event_group_for(interrupt_cpu);
1027 rt->prio = prio; 1052 rt->prio = prio;
1028#else 1053#elif defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && !defined(CONFIG_MERGE_TIMERS)
1054 /* Using release master, but not merging timers. */
1029 rt->release_master = interrupt_cpu; 1055 rt->release_master = interrupt_cpu;
1030#endif 1056#elif !defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS)
1031#elif CONFIG_MERGE_TIMERS 1057 /* Merge the timers, but don't move them to the release master. */
1032 rt->event_group = &_mc_groups[timer_cpu]; 1058 if (is_partitioned) {
1059 rt->event_group = get_event_group_for(cpu);
1060 } else {
1061 /* Global timers will be added to the event groups that code is
1062 * executing on when add_event() is called.
1063 */
1064 rt->event_group = NULL;
1065 }
1033 rt->prio = prio; 1066 rt->prio = prio;
1034#endif 1067#endif
1035} 1068}
@@ -1058,16 +1091,6 @@ static int __init init_mc(void)
1058 INIT_LIST_HEAD(&entry->redir); 1091 INIT_LIST_HEAD(&entry->redir);
1059#endif 1092#endif
1060 1093
1061#ifdef CONFIG_MERGE_TIMERS
1062#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
1063 entry->event_group = &_mc_group;
1064#else
1065 init_event_group(&_mc_groups[cpu],
1066 CONFIG_MERGE_TIMERS_WINDOW, cpu);
1067 entry->event_group = &_mc_groups[cpu];
1068#endif
1069#endif
1070
1071 /* CRIT_LEVEL_A */ 1094 /* CRIT_LEVEL_A */
1072 dom_data = &per_cpu(_mc_crit_a, cpu); 1095 dom_data = &per_cpu(_mc_crit_a, cpu);
1073 ce_data = &per_cpu(_mc_crit_a_ce_data, cpu); 1096 ce_data = &per_cpu(_mc_crit_a_ce_data, cpu);
@@ -1085,25 +1108,17 @@ static int __init init_mc(void)
1085 dom_data = &per_cpu(_mc_crit_b, cpu); 1108 dom_data = &per_cpu(_mc_crit_b, cpu);
1086 rt = &per_cpu(_mc_crit_b_rt, cpu); 1109 rt = &per_cpu(_mc_crit_b_rt, cpu);
1087 init_local_domain(entry, dom_data, CRIT_LEVEL_B); 1110 init_local_domain(entry, dom_data, CRIT_LEVEL_B);
1088 init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_B); 1111 init_edf_domain(&dom_data->domain, rt, CRIT_LEVEL_B, 1, cpu);
1089 b_dom_lock = dom_data->domain.lock; 1112 b_dom_lock = dom_data->domain.lock;
1090 raw_spin_lock_init(b_dom_lock); 1113 raw_spin_lock_init(b_dom_lock);
1091 dom_data->domain.name = "LVL-B"; 1114 dom_data->domain.name = "LVL-B";
1092 } 1115 }
1093 1116
1094#ifdef CONFIG_MERGE_TIMERS
1095#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
1096 init_event_group(&_mc_group, CONFIG_MERGE_TIMERS_WINDOW, interrupt_cpu);
1097 global_group = &_mc_group;
1098#else
1099 global_group = &_mc_groups[0];
1100#endif
1101#endif
1102
1103 /* CRIT_LEVEL_C */ 1117 /* CRIT_LEVEL_C */
1104 init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, 1118 init_global_domain(&_mc_crit_c, CRIT_LEVEL_C,
1105 &_mc_heap_c, _mc_nodes_c); 1119 &_mc_heap_c, _mc_nodes_c);
1106 init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, 0, CRIT_LEVEL_C); 1120 init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, CRIT_LEVEL_C,
1121 0, NO_CPU);
1107 c_dom_lock = _mc_crit_c.domain.lock; 1122 c_dom_lock = _mc_crit_c.domain.lock;
1108 raw_spin_lock_init(c_dom_lock); 1123 raw_spin_lock_init(c_dom_lock);
1109 _mc_crit_c.domain.name = "LVL-C"; 1124 _mc_crit_c.domain.name = "LVL-C";