aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2011-09-30 01:23:20 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2011-09-30 01:23:20 -0400
commitcd5685b6483df2f1ba8affc0ff8a0679f4044db8 (patch)
treeb2c15c6f04fdfd96a738900d8e822057847ea641 /litmus
parent23a00b911b968c6290251913ecc34171836b4d32 (diff)
Refactor timer merging and add it to CE plugin.
THIS CODE IS UNTESTED We now initialize one event group for each cpu on system start. We can get the event group for a CPU via a function in event_group.c Another change is that an event now stores what group it is in when it add_event() is called on it. This lets us cancel it without knowing what event group it is in. The above is important because Level-C events (like releases) have a NULL event group. When calling add_event(), it will get the event group of the current CPU. If the event needs to be canceled later, we need that saved group in the event so we know where to remove it from.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile8
-rw-r--r--litmus/ce_domain.c6
-rw-r--r--litmus/event_group.c65
-rw-r--r--litmus/rt_domain.c19
-rw-r--r--litmus/sched_mc.c135
-rw-r--r--litmus/sched_mc_ce.c49
6 files changed, 191 insertions, 91 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index af8bc7618b95..b6ca7ab66b16 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -16,12 +16,12 @@ obj-y = sched_plugin.o litmus.o \
16 srp.o \ 16 srp.o \
17 bheap.o \ 17 bheap.o \
18 ctrldev.o \ 18 ctrldev.o \
19 domain.o \ 19 domain.o
20 event_group.o
21 20
22# obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 21obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
23# obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 22obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
24obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o 23obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o
24obj-$(CONFIG_MERGE_TIMERS) += event_group.o
25 25
26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c
index ac6cc14d44f7..54a4a18e01b7 100644
--- a/litmus/ce_domain.c
+++ b/litmus/ce_domain.c
@@ -1,6 +1,7 @@
1#include <linux/pid.h> 1#include <linux/pid.h>
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/hrtimer.h> 3#include <linux/hrtimer.h>
4#include <linux/slab.h>
4 5
5#include <litmus/litmus.h> 6#include <litmus/litmus.h>
6#include <litmus/debug_trace.h> 7#include <litmus/debug_trace.h>
@@ -78,7 +79,12 @@ void ce_domain_init(domain_t *dom,
78 task_prio); 79 task_prio);
79 dom->data = dom_data; 80 dom->data = dom_data;
80 dom_data->cpu = cpu; 81 dom_data->cpu = cpu;
82#ifdef CONFIG_MERGE_TIMERS
83 init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback,
84 event_list_alloc(GFP_ATOMIC));
85#else
81 hrtimer_start_on_info_init(&dom_data->timer_info); 86 hrtimer_start_on_info_init(&dom_data->timer_info);
82 hrtimer_init(&dom_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 87 hrtimer_init(&dom_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
83 dom_data->timer.function = ce_timer_callback; 88 dom_data->timer.function = ce_timer_callback;
89#endif
84} 90}
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 276ba5dd242d..db43961258bf 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -1,5 +1,6 @@
1#include <linux/slab.h> 1#include <linux/slab.h>
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/module.h>
3 4
4#include <litmus/litmus.h> 5#include <litmus/litmus.h>
5#include <litmus/trace.h> 6#include <litmus/trace.h>
@@ -158,6 +159,12 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
158 VTRACE("Adding event 0x%p with priority %d for time %llu\n", 159 VTRACE("Adding event 0x%p with priority %d for time %llu\n",
159 e, e->prio, fire); 160 e, e->prio, fire);
160 161
162 /* A NULL group means use the group of the currently executing CPU */
163 if (NULL == group)
164 group = get_event_group_for(NO_CPU);
165 /* Saving the group is important for cancellations */
166 e->_event_group = group;
167
161 raw_spin_lock(&group->queue_lock); 168 raw_spin_lock(&group->queue_lock);
162 el = get_event_list(group, e, fire, 0); 169 el = get_event_list(group, e, fire, 0);
163 if (!el) { 170 if (!el) {
@@ -192,11 +199,12 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
192/** 199/**
193 * cancel_event() - Remove event from the group. 200 * cancel_event() - Remove event from the group.
194 */ 201 */
195void cancel_event(struct event_group *group, struct rt_event *e) 202void cancel_event(struct rt_event *e)
196{ 203{
197 struct list_head *swap = NULL; 204 struct list_head *swap = NULL;
198 struct rt_event *swappy; 205 struct rt_event *swappy;
199 struct event_list *tmp; 206 struct event_list *tmp;
207 struct event_group *group = e->_event_group;
200 208
201 if (e->list.next != &e->list) { 209 if (e->list.next != &e->list) {
202 raw_spin_lock(&group->queue_lock); 210 raw_spin_lock(&group->queue_lock);
@@ -222,25 +230,12 @@ void cancel_event(struct event_group *group, struct rt_event *e)
222 230
223 hrtimer_try_to_cancel(&e->event_list->timer); 231 hrtimer_try_to_cancel(&e->event_list->timer);
224 list_del_init(&e->event_list->list); 232 list_del_init(&e->event_list->list);
233 e->_event_group = NULL;
225 234
226 raw_spin_unlock(&group->queue_lock); 235 raw_spin_unlock(&group->queue_lock);
227 } 236 }
228} 237}
229 238
230/**
231 * init_event_group() - Prepare group for events.
232 */
233void init_event_group(struct event_group *group, lt_t res, int cpu)
234{
235 int i;
236 VTRACE("Creating group with res %llu on CPU %d", res, cpu);
237 group->res = res;
238 group->cpu = cpu;
239 for (i = 0; i < EVENT_QUEUE_SLOTS; i++)
240 INIT_LIST_HEAD(&group->event_queue[i]);
241 raw_spin_lock_init(&group->queue_lock);
242}
243
244struct kmem_cache *event_list_cache, *event_cache; 239struct kmem_cache *event_list_cache, *event_cache;
245 240
246struct event_list* event_list_alloc(int gfp_flags) 241struct event_list* event_list_alloc(int gfp_flags)
@@ -264,3 +259,43 @@ void init_event(struct rt_event *e, int prio, fire_event_t function,
264 e->event_list = el; 259 e->event_list = el;
265 INIT_LIST_HEAD(&e->list); 260 INIT_LIST_HEAD(&e->list);
266} 261}
262
263/**
264 * init_event_group() - Prepare group for events.
265 * @group Group to prepare
266 * @res Timer resolution. Two events of @res distance will be merged
267 * @cpu Cpu on which to fire timers
268 */
269static void init_event_group(struct event_group *group, lt_t res, int cpu)
270{
271 int i;
272 VTRACE("Creating group with resolution %llu on CPU %d", res, cpu);
273 group->res = res;
274 group->cpu = cpu;
275 for (i = 0; i < EVENT_QUEUE_SLOTS; i++)
276 INIT_LIST_HEAD(&group->event_queue[i]);
277 raw_spin_lock_init(&group->queue_lock);
278}
279
280
281DEFINE_PER_CPU(struct event_group, _event_groups);
282
283struct event_group *get_event_group_for(const int cpu)
284{
285 return &per_cpu(_event_groups,
286 (NO_CPU == cpu) ? smp_processor_id() : cpu);
287}
288
289static int __init _init_event_groups(void)
290{
291 int cpu;
292 printk("Initializing LITMUS^RT event groups.\n");
293
294 for_each_online_cpu(cpu) {
295 init_event_group(get_event_group_for(cpu),
296 CONFIG_MERGE_TIMERS_WINDOW, cpu);
297 }
298 return 0;
299}
300
301module_init(_init_event_groups);
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index db92e849f084..fbd91c829619 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -166,7 +166,7 @@ static void reinit_release_heap(rt_domain_t *rt, struct task_struct* t)
166 166
167#ifdef CONFIG_MERGE_TIMERS 167#ifdef CONFIG_MERGE_TIMERS
168 rh->event.prio = rt->prio; 168 rh->event.prio = rt->prio;
169 cancel_event(rt->event_group, &rh->event); 169 cancel_event(&rh->event);
170#else 170#else
171 /* Make sure it is safe to use. The timer callback could still 171 /* Make sure it is safe to use. The timer callback could still
172 * be executing on another CPU; hrtimer_cancel() will wait 172 * be executing on another CPU; hrtimer_cancel() will wait
@@ -188,7 +188,11 @@ static void reinit_release_heap(rt_domain_t *rt, struct task_struct* t)
188 188
189} 189}
190 190
191#ifdef CONFIG_RELEASE_MASTER
192static void arm_release_timer_on(struct release_heap *rh, int target_cpu)
193#else
191static void arm_release_timer(struct release_heap *rh) 194static void arm_release_timer(struct release_heap *rh)
195#endif
192{ 196{
193#ifdef CONFIG_MERGE_TIMERS 197#ifdef CONFIG_MERGE_TIMERS
194 add_event(rh->dom->event_group, &rh->event, rh->release_time); 198 add_event(rh->dom->event_group, &rh->event, rh->release_time);
@@ -200,8 +204,7 @@ static void arm_release_timer(struct release_heap *rh)
200 */ 204 */
201 205
202#ifdef CONFIG_RELEASE_MASTER 206#ifdef CONFIG_RELEASE_MASTER
203 if (rt->release_master == NO_CPU && 207 if (rh->dom->release_master == NO_CPU && target_cpu == NO_CPU)
204 target_cpu == NO_CPU)
205#endif 208#endif
206 __hrtimer_start_range_ns(&rh->timer, 209 __hrtimer_start_range_ns(&rh->timer,
207 ns_to_ktime(rh->release_time), 210 ns_to_ktime(rh->release_time),
@@ -210,7 +213,7 @@ static void arm_release_timer(struct release_heap *rh)
210 else 213 else
211 hrtimer_start_on(/* target_cpu overrides release master */ 214 hrtimer_start_on(/* target_cpu overrides release master */
212 (target_cpu != NO_CPU ? 215 (target_cpu != NO_CPU ?
213 target_cpu : rt->release_master), 216 target_cpu : rh->dom->release_master),
214 &rh->info, &rh->timer, 217 &rh->info, &rh->timer,
215 ns_to_ktime(rh->release_time), 218 ns_to_ktime(rh->release_time),
216 HRTIMER_MODE_ABS_PINNED); 219 HRTIMER_MODE_ABS_PINNED);
@@ -278,9 +281,13 @@ static void setup_release(rt_domain_t *_rt)
278 * owner do the arming (which is the "first" task to reference 281 * owner do the arming (which is the "first" task to reference
279 * this release_heap anyway). 282 * this release_heap anyway).
280 */ 283 */
281 if (rh == tsk_rt(t)->rel_heap) 284 if (rh == tsk_rt(t)->rel_heap) {
285#ifdef CONFIG_RELEASE_MASTER
286 arm_release_timer_on(rh, target_cpu);
287#else
282 arm_release_timer(rh); 288 arm_release_timer(rh);
283 else 289#endif
290 } else
284 VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); 291 VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer);
285 } 292 }
286} 293}
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 7b74958d1f4f..17f84c9eba79 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -27,6 +27,8 @@
27#include <litmus/sched_mc.h> 27#include <litmus/sched_mc.h>
28#include <litmus/ce_domain.h> 28#include <litmus/ce_domain.h>
29 29
30/* XXX TODO Do we ever want to move level-A timers? */
31
30/** 32/**
31 * struct cpu_entry - State of a CPU for the entire MC system 33 * struct cpu_entry - State of a CPU for the entire MC system
32 * @cpu CPU id 34 * @cpu CPU id
@@ -34,6 +36,9 @@
34 * @linked Task that should be running / is logically running 36 * @linked Task that should be running / is logically running
35 * @lock For serialization 37 * @lock For serialization
36 * @crit_entries Array of CPU state per criticality level 38 * @crit_entries Array of CPU state per criticality level
39 * @redir List of redirected work for this CPU.
40 * @redir_lock Lock for @redir.
41 * @event_group Event group for timer merging.
37 */ 42 */
38struct cpu_entry { 43struct cpu_entry {
39 int cpu; 44 int cpu;
@@ -45,18 +50,12 @@ struct cpu_entry {
45 struct list_head redir; 50 struct list_head redir;
46 raw_spinlock_t redir_lock; 51 raw_spinlock_t redir_lock;
47#endif 52#endif
48#ifdef CONFIG_MERGE_TIMERS
49 struct event_group* event_group;
50#endif
51}; 53};
52 54
53DEFINE_PER_CPU(struct cpu_entry, cpus); 55DEFINE_PER_CPU(struct cpu_entry, cpus);
54#ifdef CONFIG_RELEASE_MASTER 56#ifdef CONFIG_RELEASE_MASTER
55static int interrupt_cpu; 57static int interrupt_cpu;
56#endif 58#endif
57#ifdef CONFIG_MERGE_TIMERS
58static struct event_group* global_group;
59#endif
60 59
61#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 60#define domain_data(dom) (container_of(dom, struct domain_data, domain))
62#define is_global(dom) (domain_data(dom)->heap) 61#define is_global(dom) (domain_data(dom)->heap)
@@ -67,6 +66,7 @@ static struct event_group* global_group;
67 (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) 66 (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1)
68#define crit_cpu(ce) \ 67#define crit_cpu(ce) \
69 (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) 68 (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries))
69#define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level])
70#define TRACE_ENTRY(e, fmt, args...) \ 70#define TRACE_ENTRY(e, fmt, args...) \
71 STRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args) 71 STRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args)
72#define TRACE_CRIT_ENTRY(ce, fmt, args...) \ 72#define TRACE_CRIT_ENTRY(ce, fmt, args...) \
@@ -129,7 +129,7 @@ static inline struct crit_entry* lowest_prio_cpu(struct domain *dom)
129static inline void cancel_ghost(struct crit_entry *ce) 129static inline void cancel_ghost(struct crit_entry *ce)
130{ 130{
131#ifdef CONFIG_MERGE_TIMERS 131#ifdef CONFIG_MERGE_TIMERS
132 cancel_event(crit_cpu(ce)->event_group, &ce->event); 132 cancel_event(&ce->event);
133#else 133#else
134 hrtimer_try_to_cancel(&ce->timer); 134 hrtimer_try_to_cancel(&ce->timer);
135#endif 135#endif
@@ -141,10 +141,10 @@ static inline void cancel_ghost(struct crit_entry *ce)
141static inline void arm_ghost(struct crit_entry *ce, lt_t fire) 141static inline void arm_ghost(struct crit_entry *ce, lt_t fire)
142{ 142{
143#ifdef CONFIG_MERGE_TIMERS 143#ifdef CONFIG_MERGE_TIMERS
144 add_event(crit_cpu(ce)->event_group, &ce->event, fire); 144 add_event(get_event_group_for(crit_cpu(ce)->cpu), &ce->event, fire);
145#else 145#else
146 __hrtimer_start_range_ns(&ce->timer, 146 __hrtimer_start_range_ns(&ce->timer,
147 ns_to_ktime(when_to_fire), 147 ns_to_ktime(fire),
148 0 /* delta */, 148 0 /* delta */,
149 HRTIMER_MODE_ABS_PINNED, 149 HRTIMER_MODE_ABS_PINNED,
150 0 /* no wakeup */); 150 0 /* no wakeup */);
@@ -270,10 +270,8 @@ static void low_prio_arrival(struct task_struct *task)
270 if (!can_requeue(task)) return; 270 if (!can_requeue(task)) return;
271 271
272#ifdef CONFIG_PLUGIN_MC_REDIRECT 272#ifdef CONFIG_PLUGIN_MC_REDIRECT
273#ifndef CONFIG_PLUGIN_MC_REDIRECT_ALL
274 if (!is_global_task(task)) 273 if (!is_global_task(task))
275 goto arrive; 274 goto arrive;
276#endif
277 if (smp_processor_id() != interrupt_cpu) { 275 if (smp_processor_id() != interrupt_cpu) {
278 entry = &__get_cpu_var(cpus); 276 entry = &__get_cpu_var(cpus);
279 raw_spin_lock(&entry->redir_lock); 277 raw_spin_lock(&entry->redir_lock);
@@ -284,7 +282,7 @@ static void low_prio_arrival(struct task_struct *task)
284 } else 282 } else
285#endif 283#endif
286 { 284 {
287 arrive: 285arrive:
288 job_arrival(task); 286 job_arrival(task);
289 } 287 }
290} 288}
@@ -576,19 +574,19 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
576#endif 574#endif
577} 575}
578 576
579static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) 577/*
578 * The MC-CE common timer callback code for merged and non-merged timers.
579 * Returns the next time the timer should fire.
580 */
581static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
580{ 582{
581 struct ce_dom_data *ce_data = 583 struct crit_entry *ce = get_crit_entry_for(ce_data->cpu, CRIT_LEVEL_A);
582 container_of(timer, struct ce_dom_data, timer);
583 struct crit_entry *ce = &per_cpu(cpus, ce_data->cpu).crit_entries[CRIT_LEVEL_A];
584 struct domain *dom = ce->domain; 584 struct domain *dom = ce->domain;
585 struct task_struct *old_link = NULL; 585 struct task_struct *old_link = NULL;
586 unsigned long flags; 586 lt_t next_timer_abs;
587 587
588 TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu); 588 TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu);
589 589
590 local_irq_save(flags);
591
592 raw_spin_lock(dom->lock); 590 raw_spin_lock(dom->lock);
593 591
594 raw_spin_lock(&crit_cpu(ce)->lock); 592 raw_spin_lock(&crit_cpu(ce)->lock);
@@ -602,7 +600,7 @@ static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
602 } 600 }
603 raw_spin_unlock(&crit_cpu(ce)->lock); 601 raw_spin_unlock(&crit_cpu(ce)->lock);
604 602
605 mc_ce_timer_callback_common(dom, timer); 603 next_timer_abs = mc_ce_timer_callback_common(dom);
606 604
607 /* job completion will check for preemptions by means of calling job 605 /* job completion will check for preemptions by means of calling job
608 * arrival if the task is not blocked */ 606 * arrival if the task is not blocked */
@@ -615,11 +613,38 @@ static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
615 raw_spin_unlock(dom->lock); 613 raw_spin_unlock(dom->lock);
616 check_for_preempt(dom); 614 check_for_preempt(dom);
617 } 615 }
616 return next_timer_abs;
617}
618 618
619#ifdef CONFIG_MERGE_TIMERS
620static void ce_timer_function(struct rt_event *e)
621{
622 struct ce_dom_data *ce_data =
623 container_of(e, struct ce_dom_data, event);
624 struct event_group *event_group = get_event_group_for(ce_data->cpu);
625 unsigned long flags;
626 lt_t next_timer_abs;
627
628 local_irq_save(flags);
629 next_timer_abs = __ce_timer_function(ce_data);
630 add_event(event_group, e, next_timer_abs);
619 local_irq_restore(flags); 631 local_irq_restore(flags);
632}
633#else /* else to CONFIG_MERGE_TIMERS */
634static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
635{
636 struct ce_dom_data *ce_data =
637 container_of(timer, struct ce_dom_data, timer);
638 unsigned long flags;
639 lt_t next_timer_abs;
620 640
641 local_irq_save(flags);
642 next_timer_abs = __ce_timer_function(ce_data);
643 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
644 local_irq_restore(flags);
621 return HRTIMER_RESTART; 645 return HRTIMER_RESTART;
622} 646}
647#endif /* CONFIG_MERGE_TIMERS */
623 648
624 649
625/** 650/**
@@ -915,14 +940,6 @@ static rt_domain_t _mc_crit_c_rt;
915struct bheap _mc_heap_c; 940struct bheap _mc_heap_c;
916struct bheap_node _mc_nodes_c[NR_CPUS]; 941struct bheap_node _mc_nodes_c[NR_CPUS];
917 942
918#ifdef CONFIG_MERGE_TIMERS
919#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
920struct event_group _mc_group;
921#else
922DEFINE_PER_CPU(struct event_group, _mc_groups);
923#endif
924#endif
925
926static long mc_activate_plugin(void) 943static long mc_activate_plugin(void)
927{ 944{
928 struct domain_data *dom_data; 945 struct domain_data *dom_data;
@@ -933,8 +950,14 @@ static long mc_activate_plugin(void)
933 950
934#ifdef CONFIG_RELEASE_MASTER 951#ifdef CONFIG_RELEASE_MASTER
935 interrupt_cpu = atomic_read(&release_master_cpu); 952 interrupt_cpu = atomic_read(&release_master_cpu);
936 if (interrupt_cpu == NO_CPU) 953#if defined(CONFIG_PLUGIN_MC_REDIRECT) || \
937 interrupt_cpu = 0; 954 (defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS))
955 if (NO_CPU == interrupt_cpu) {
956 printk(KERN_ERR "LITMUS-MC: need a release master\n");
957 ret = -EINVAL;
958 goto out;
959 }
960#endif
938#endif 961#endif
939 962
940 for_each_online_cpu(cpu) { 963 for_each_online_cpu(cpu) {
@@ -1016,20 +1039,30 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev
1016} 1039}
1017 1040
1018static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, 1041static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt,
1019 int timer_cpu, int prio) 1042 int prio, int is_partitioned, int cpu)
1020{ 1043{
1021 pd_domain_init(dom, rt, edf_ready_order, NULL, 1044 pd_domain_init(dom, rt, edf_ready_order, NULL,
1022 mc_release_jobs, mc_preempt_needed, 1045 mc_release_jobs, mc_preempt_needed,
1023 edf_higher_prio); 1046 edf_higher_prio);
1024#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER 1047#if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS)
1025#ifdef CONFIG_MERGE_TIMERS 1048 /* All timers are on one CPU and release-master is using the event
1026 rt->event_group = &_mc_group; 1049 * merging interface as well. */
1050 BUG_ON(NO_CPU == interrupt_cpu);
1051 rt->event_group = get_event_group_for(interrupt_cpu);
1027 rt->prio = prio; 1052 rt->prio = prio;
1028#else 1053#elif defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && !defined(CONFIG_MERGE_TIMERS)
1054 /* Using release master, but not merging timers. */
1029 rt->release_master = interrupt_cpu; 1055 rt->release_master = interrupt_cpu;
1030#endif 1056#elif !defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS)
1031#elif CONFIG_MERGE_TIMERS 1057 /* Merge the timers, but don't move them to the release master. */
1032 rt->event_group = &_mc_groups[timer_cpu]; 1058 if (is_partitioned) {
1059 rt->event_group = get_event_group_for(cpu);
1060 } else {
1061 /* Global timers will be added to the event groups that code is
1062 * executing on when add_event() is called.
1063 */
1064 rt->event_group = NULL;
1065 }
1033 rt->prio = prio; 1066 rt->prio = prio;
1034#endif 1067#endif
1035} 1068}
@@ -1058,16 +1091,6 @@ static int __init init_mc(void)
1058 INIT_LIST_HEAD(&entry->redir); 1091 INIT_LIST_HEAD(&entry->redir);
1059#endif 1092#endif
1060 1093
1061#ifdef CONFIG_MERGE_TIMERS
1062#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
1063 entry->event_group = &_mc_group;
1064#else
1065 init_event_group(&_mc_groups[cpu],
1066 CONFIG_MERGE_TIMERS_WINDOW, cpu);
1067 entry->event_group = &_mc_groups[cpu];
1068#endif
1069#endif
1070
1071 /* CRIT_LEVEL_A */ 1094 /* CRIT_LEVEL_A */
1072 dom_data = &per_cpu(_mc_crit_a, cpu); 1095 dom_data = &per_cpu(_mc_crit_a, cpu);
1073 ce_data = &per_cpu(_mc_crit_a_ce_data, cpu); 1096 ce_data = &per_cpu(_mc_crit_a_ce_data, cpu);
@@ -1085,25 +1108,17 @@ static int __init init_mc(void)
1085 dom_data = &per_cpu(_mc_crit_b, cpu); 1108 dom_data = &per_cpu(_mc_crit_b, cpu);
1086 rt = &per_cpu(_mc_crit_b_rt, cpu); 1109 rt = &per_cpu(_mc_crit_b_rt, cpu);
1087 init_local_domain(entry, dom_data, CRIT_LEVEL_B); 1110 init_local_domain(entry, dom_data, CRIT_LEVEL_B);
1088 init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_B); 1111 init_edf_domain(&dom_data->domain, rt, CRIT_LEVEL_B, 1, cpu);
1089 b_dom_lock = dom_data->domain.lock; 1112 b_dom_lock = dom_data->domain.lock;
1090 raw_spin_lock_init(b_dom_lock); 1113 raw_spin_lock_init(b_dom_lock);
1091 dom_data->domain.name = "LVL-B"; 1114 dom_data->domain.name = "LVL-B";
1092 } 1115 }
1093 1116
1094#ifdef CONFIG_MERGE_TIMERS
1095#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
1096 init_event_group(&_mc_group, CONFIG_MERGE_TIMERS_WINDOW, interrupt_cpu);
1097 global_group = &_mc_group;
1098#else
1099 global_group = &_mc_groups[0];
1100#endif
1101#endif
1102
1103 /* CRIT_LEVEL_C */ 1117 /* CRIT_LEVEL_C */
1104 init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, 1118 init_global_domain(&_mc_crit_c, CRIT_LEVEL_C,
1105 &_mc_heap_c, _mc_nodes_c); 1119 &_mc_heap_c, _mc_nodes_c);
1106 init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, 0, CRIT_LEVEL_C); 1120 init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, CRIT_LEVEL_C,
1121 0, NO_CPU);
1107 c_dom_lock = _mc_crit_c.domain.lock; 1122 c_dom_lock = _mc_crit_c.domain.lock;
1108 raw_spin_lock_init(c_dom_lock); 1123 raw_spin_lock_init(c_dom_lock);
1109 _mc_crit_c.domain.name = "LVL-C"; 1124 _mc_crit_c.domain.name = "LVL-C";
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index 63b0470e1f52..c5066918f282 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -52,6 +52,10 @@ struct ce_pid_entry {
52 unsigned int expected_job; 52 unsigned int expected_job;
53}; 53};
54 54
55/*
56 * Each CPU needs a mapping of level A ID (integer) to struct pid so that we
57 * can get its task struct.
58 */
55struct ce_pid_table { 59struct ce_pid_table {
56 struct ce_pid_entry entries[CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS]; 60 struct ce_pid_entry entries[CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS];
57 int num_pid_entries; 61 int num_pid_entries;
@@ -434,7 +438,10 @@ void mc_ce_task_exit_common(struct task_struct *ts)
434 * Timer stuff 438 * Timer stuff
435 **********************************************************/ 439 **********************************************************/
436 440
437void mc_ce_timer_callback_common(struct domain *dom, struct hrtimer *timer) 441/*
442 * Returns the next absolute time that the timer should fire.
443 */
444lt_t mc_ce_timer_callback_common(struct domain *dom)
438{ 445{
439 /* relative and absolute times for cycles */ 446 /* relative and absolute times for cycles */
440 lt_t now, offset_rel, cycle_start_abs, next_timer_abs; 447 lt_t now, offset_rel, cycle_start_abs, next_timer_abs;
@@ -455,9 +462,7 @@ void mc_ce_timer_callback_common(struct domain *dom, struct hrtimer *timer)
455 cycle_start_abs = now - offset_rel; 462 cycle_start_abs = now - offset_rel;
456 idx = mc_ce_schedule_at(dom, offset_rel); 463 idx = mc_ce_schedule_at(dom, offset_rel);
457 pid_entry = get_pid_entry(ce_data->cpu, idx); 464 pid_entry = get_pid_entry(ce_data->cpu, idx);
458 /* set the timer to fire at the next cycle start */
459 next_timer_abs = cycle_start_abs + pid_entry->acc_time; 465 next_timer_abs = cycle_start_abs + pid_entry->acc_time;
460 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
461 466
462 STRACE("timer: now: %llu offset_rel: %llu cycle_start_abs: %llu " 467 STRACE("timer: now: %llu offset_rel: %llu cycle_start_abs: %llu "
463 "next_timer_abs: %llu\n", now, offset_rel, 468 "next_timer_abs: %llu\n", now, offset_rel,
@@ -495,32 +500,52 @@ void mc_ce_timer_callback_common(struct domain *dom, struct hrtimer *timer)
495 sched_trace_task_release(should_schedule); 500 sched_trace_task_release(should_schedule);
496 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); 501 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING);
497 } 502 }
503 return next_timer_abs;
498} 504}
499 505
500/* 506/*
501 * What to do when a timer fires. The timer should only be armed if the number 507 * What to do when a timer fires. The timer should only be armed if the number
502 * of PID entries is positive. 508 * of PID entries is positive.
503 */ 509 */
510#ifdef CONFIG_MERGE_TIMERS
511static void mc_ce_timer_callback(struct rt_event *e)
512#else
504static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer) 513static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
514#endif
505{ 515{
506 struct ce_dom_data *ce_data; 516 struct ce_dom_data *ce_data;
507 unsigned long flags; 517 unsigned long flags;
508 struct domain *dom; 518 struct domain *dom;
509 519 lt_t next_timer_abs;
520#ifdef CONFIG_MERGE_TIMERS
521 struct event_group *event_group;
522 ce_data = container_of(e, struct ce_dom_data, event);
523 event_group = get_event_group_for(ce_data->cpu);
524#else
510 ce_data = container_of(timer, struct ce_dom_data, timer); 525 ce_data = container_of(timer, struct ce_dom_data, timer);
526#endif
511 dom = get_domain_for(ce_data->cpu); 527 dom = get_domain_for(ce_data->cpu);
512 528
513 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu); 529 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
514 530
515 raw_spin_lock_irqsave(dom->lock, flags); 531 raw_spin_lock_irqsave(dom->lock, flags);
516 mc_ce_timer_callback_common(dom, timer); 532 next_timer_abs = mc_ce_timer_callback_common(dom);
533
534 /* setup an event or timer for the next release in the CE schedule */
535#ifdef CONFIG_MERGE_TIMERS
536 add_event(event_group, e, next_timer_abs);
537#else
538 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
539#endif
517 540
518 if (ce_data->scheduled != ce_data->should_schedule) 541 if (ce_data->scheduled != ce_data->should_schedule)
519 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu); 542 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
520 543
521 raw_spin_unlock_irqrestore(dom->lock, flags); 544 raw_spin_unlock_irqrestore(dom->lock, flags);
522 545
546#ifndef CONFIG_MERGE_TIMERS
523 return HRTIMER_RESTART; 547 return HRTIMER_RESTART;
548#endif
524} 549}
525 550
526/* 551/*
@@ -530,7 +555,10 @@ static int cancel_all_timers(void)
530{ 555{
531 struct ce_dom_data *ce_data; 556 struct ce_dom_data *ce_data;
532 struct domain *dom; 557 struct domain *dom;
533 int cpu, cancel_res, ret = 0; 558 int cpu, ret = 0;
559#ifndef CONFIG_MERGE_TIMERS
560 int cancel_res;
561#endif
534 562
535 TRACE("cancel all timers\n"); 563 TRACE("cancel all timers\n");
536 564
@@ -538,10 +566,14 @@ static int cancel_all_timers(void)
538 dom = get_domain_for(cpu); 566 dom = get_domain_for(cpu);
539 ce_data = dom->data; 567 ce_data = dom->data;
540 ce_data->should_schedule = NULL; 568 ce_data->should_schedule = NULL;
569#ifdef CONFIG_MERGE_TIMERS
570 cancel_event(&ce_data->event);
571#else
541 cancel_res = hrtimer_cancel(&ce_data->timer); 572 cancel_res = hrtimer_cancel(&ce_data->timer);
542 atomic_set(&ce_data->timer_info.state, 573 atomic_set(&ce_data->timer_info.state,
543 HRTIMER_START_ON_INACTIVE); 574 HRTIMER_START_ON_INACTIVE);
544 ret = ret || cancel_res; 575 ret = ret || cancel_res;
576#endif
545 } 577 }
546 return ret; 578 return ret;
547} 579}
@@ -570,9 +602,14 @@ static void arm_all_timers(void)
570 for (idx = 0; idx < pid_table->num_pid_entries; idx++) { 602 for (idx = 0; idx < pid_table->num_pid_entries; idx++) {
571 pid_table->entries[idx].expected_job = 0; 603 pid_table->entries[idx].expected_job = 0;
572 } 604 }
605#ifdef CONFIG_MERGE_TIMERS
606 TRACE("adding event for CPU %d\n", cpu);
607 add_event(get_event_group_for(cpu), &ce_data->event, start);
608#else
573 TRACE("arming timer for CPU %d\n", cpu); 609 TRACE("arming timer for CPU %d\n", cpu);
574 hrtimer_start_on(cpu, &ce_data->timer_info, &ce_data->timer, 610 hrtimer_start_on(cpu, &ce_data->timer_info, &ce_data->timer,
575 ns_to_ktime(start), HRTIMER_MODE_ABS_PINNED); 611 ns_to_ktime(start), HRTIMER_MODE_ABS_PINNED);
612#endif
576 } 613 }
577} 614}
578 615