aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2011-09-25 16:05:50 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2011-09-25 16:08:53 -0400
commitd990b018b1eb12eb52713d553e5d61a3cdd1249b (patch)
tree6db29b03b94c122af06367acc45bbd8b3b33cbbe
parent2fe725ef2142dd6c1bbf72e8d1b0a6f7e885d7ed (diff)
Untested merge of CE and MC plugins
-rw-r--r--include/litmus/sched_mc.h31
-rw-r--r--litmus/ce_domain.c31
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/sched_mc.c79
-rw-r--r--litmus/sched_mc_ce.c95
5 files changed, 135 insertions, 103 deletions
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
index 95cd22cd7202..ad5d097b3d61 100644
--- a/include/litmus/sched_mc.h
+++ b/include/litmus/sched_mc.h
@@ -59,6 +59,37 @@ struct ce_dom_data {
59 struct hrtimer timer; 59 struct hrtimer timer;
60}; 60};
61 61
62/**
63 * crit_entry_t - State of a CPU within each criticality level system.
64 * @level Criticality level of this entry
65 * @linked Logically running task, ghost or regular
66 * @domain Domain from which to draw tasks
67 * @usable False if a higher criticality task is running
68 * @timer For ghost task budget enforcement
69 * @node Used to sort crit_entries by preemptability in global domains
70 */
71typedef struct {
72 enum crit_level level;
73 struct task_struct* linked;
74 domain_t* domain;
75 int usable;
76 struct hrtimer timer;
77 struct bheap_node* node;
78 atomic_t dirty;
79} crit_entry_t;
80
81/**
82 * domain_data_t - Wrap domains with related CPU state
83 * @domain A domain for a criticality level
84 * @heap The preemptable heap of crit entries (for global domains)
85 * @crit_entry The crit entry for this domain (for partitioned domains)
86 */
87typedef struct {
88 domain_t domain;
89 struct bheap* heap;
90 crit_entry_t* crit_entry;
91} domain_data_t;
92
62#endif /* __KERNEL__ */ 93#endif /* __KERNEL__ */
63 94
64#endif 95#endif
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c
index 5b4fd1cb438f..d6f713fbf789 100644
--- a/litmus/ce_domain.c
+++ b/litmus/ce_domain.c
@@ -7,6 +7,7 @@
7#include <litmus/rt_param.h> 7#include <litmus/rt_param.h>
8#include <litmus/domain.h> 8#include <litmus/domain.h>
9#include <litmus/sched_mc.h> 9#include <litmus/sched_mc.h>
10#include <litmus/ce_domain.h>
10 11
11/* 12/*
12 * Called for: 13 * Called for:
@@ -41,24 +42,13 @@ void ce_requeue(domain_t *dom, struct task_struct *ts)
41 } 42 }
42} 43}
43 44
45void mc_ce_task_exit(struct task_struct*);
44/* 46/*
45 * Called when a task exits the system. 47 * Called when a task exits the system.
46 */ 48 */
47void ce_exit(domain_t *dom, struct task_struct *ts) 49void ce_task_exit(domain_t *dom, struct task_struct *ts)
48{ 50{
49 struct ce_dom_data *ce_data = dom->data; 51 mc_ce_task_exit(ts);
50 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;
51 struct pid *pid;
52
53 BUG_ON(task_cpu(ts) != get_partition(ts));
54 BUG_ON(CRIT_LEVEL_A != tsk_mc_crit(ts));
55 BUG_ON(lvl_a_id >= ce_data->num_pid_entries);
56 pid = ce_data->pid_entries[lvl_a_id].pid;
57 BUG_ON(!pid);
58 put_pid(pid);
59 ce_data->pid_entries[lvl_a_id].pid = NULL;
60 if (ce_data->should_schedule == ts)
61 ce_data->should_schedule = NULL;
62} 52}
63 53
64/* 54/*
@@ -80,11 +70,12 @@ struct task_struct* ce_peek_and_take_ready(domain_t *dom)
80 return ret; 70 return ret;
81} 71}
82 72
83int ce_higher_prio(domain_t *dom, struct task_struct *_a, 73int ce_higher_prio(struct task_struct *_a,
84 struct task_struct *_b) 74 struct task_struct *_b)
85{ 75{
86 const struct task_struct *a = _a; 76 const struct task_struct *a = _a;
87 struct ce_dom_data *ce_data = dom->data; 77 const domain_t *dom = get_task_domain(a);
78 const struct ce_dom_data *ce_data = dom->data;
88 return (a == ce_data->should_schedule); 79 return (a == ce_data->should_schedule);
89} 80}
90 81
@@ -102,18 +93,18 @@ void ce_start(struct task_struct *ts, lt_t start)
102 mc_ce_release_at(ts, start); 93 mc_ce_release_at(ts, start);
103} 94}
104 95
96domain_data_t *ce_domain_for(int);
105long mc_ce_activate_plugin(void); 97long mc_ce_activate_plugin(void);
106domain_t *ce_domain_for(int);
107long ce_activate_plugin(void) 98long ce_activate_plugin(void)
108{ 99{
109 domain_t *dom; 100 domain_data_t *dom_data;
110 struct ce_dom_data *ce_data; 101 struct ce_dom_data *ce_data;
111 int cpu; 102 int cpu;
112 103
113 /* first change the timer callback function */ 104 /* first change the timer callback function */
114 for_each_online_cpu(cpu) { 105 for_each_online_cpu(cpu) {
115 dom = ce_domain_for(cpu); 106 dom_data = ce_domain_for(cpu);
116 ce_data = dom->data; 107 ce_data = dom_data->domain.data;
117 ce_data->timer.function = ce_timer_function; 108 ce_data->timer.function = ce_timer_function;
118 } 109 }
119 /* then run the regular CE activate plugin */ 110 /* then run the regular CE activate plugin */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 3b502d62cb0e..56a47f9834eb 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -641,7 +641,9 @@ static void _exit_litmus(void)
641 exit_litmus_proc(); 641 exit_litmus_proc();
642 kmem_cache_destroy(bheap_node_cache); 642 kmem_cache_destroy(bheap_node_cache);
643 kmem_cache_destroy(release_heap_cache); 643 kmem_cache_destroy(release_heap_cache);
644#ifdef CONFIG_PLUGIN_MC
644 kmem_cache_destroy(event_list_cache); 645 kmem_cache_destroy(event_list_cache);
646#endif
645} 647}
646 648
647module_init(_init_litmus); 649module_init(_init_litmus);
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 577e7d36faf5..bce25bc8822e 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -24,25 +24,7 @@
24#include <litmus/bheap.h> 24#include <litmus/bheap.h>
25 25
26#include <litmus/sched_mc.h> 26#include <litmus/sched_mc.h>
27 27#include <litmus/ce_domain.h>
28/**
29 * crit_entry_t - State of a CPU within each criticality level system.
30 * @level Criticality level of this entry
31 * @linked Logically running task, ghost or regular
32 * @domain Domain from which to draw tasks
33 * @usable False if a higher criticality task is running
34 * @timer For ghost task budget enforcement
35 * @node Used to sort crit_entries by preemptability in global domains
36 */
37typedef struct {
38 enum crit_level level;
39 struct task_struct* linked;
40 domain_t* domain;
41 int usable;
42 struct hrtimer timer;
43 struct bheap_node* node;
44 atomic_t dirty;
45} crit_entry_t;
46 28
47/** 29/**
48 * cpu_entry_t - State of a CPU for the entire MC system 30 * cpu_entry_t - State of a CPU for the entire MC system
@@ -64,18 +46,6 @@ typedef struct {
64#endif 46#endif
65} cpu_entry_t; 47} cpu_entry_t;
66 48
67/**
68 * domain_data_t - Wrap domains with related CPU state
69 * @domain A domain for a criticality level
70 * @heap The preemptable heap of crit entries (for global domains)
71 * @crit_entry The crit entry for this domain (for partitioned domains)
72 */
73typedef struct {
74 domain_t domain;
75 struct bheap* heap;
76 crit_entry_t* crit_entry;
77} domain_data_t;
78
79static cpu_entry_t* cpus[NR_CPUS]; 49static cpu_entry_t* cpus[NR_CPUS];
80#ifdef CONFIG_RELEASE_MASTER 50#ifdef CONFIG_RELEASE_MASTER
81static int interrupt_cpu; 51static int interrupt_cpu;
@@ -213,10 +183,14 @@ static void link_task_to_crit(crit_entry_t *ce,
213 ce->linked = task; 183 ce->linked = task;
214 if (task) { 184 if (task) {
215 task->rt_param.linked_on = crit_cpu(ce)->cpu; 185 task->rt_param.linked_on = crit_cpu(ce)->cpu;
216 if (is_ghost(task)) { 186 if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) {
187 /* There is a level-A timer that will force a
188 * preemption, so we don't set this for level-A
189 * tasks.
190 */
217 /* Reset budget timer */ 191 /* Reset budget timer */
218 task->se.exec_start = litmus_clock(); 192 task->se.exec_start = litmus_clock();
219 when_to_fire = litmus_clock() + 193 when_to_fire = task->se.exec_start +
220 tsk_mc_data(task)->mc_job.ghost_budget; 194 tsk_mc_data(task)->mc_job.ghost_budget;
221 __hrtimer_start_range_ns(&ce->timer, 195 __hrtimer_start_range_ns(&ce->timer,
222 ns_to_ktime(when_to_fire), 196 ns_to_ktime(when_to_fire),
@@ -659,6 +633,9 @@ static void mc_task_exit(struct task_struct *task)
659 tsk_rt(task)->scheduled_on = NO_CPU; 633 tsk_rt(task)->scheduled_on = NO_CPU;
660 } 634 }
661 635
636 if (CRIT_LEVEL_A == tsk_mc_crit(task))
637 ce_task_exit(get_task_domain(task), task);
638
662 local_irq_restore(flags); 639 local_irq_restore(flags);
663} 640}
664 641
@@ -797,7 +774,22 @@ static long mc_activate_plugin(void)
797 if (interrupt_cpu == NO_CPU) 774 if (interrupt_cpu == NO_CPU)
798 interrupt_cpu = 0; 775 interrupt_cpu = 0;
799#endif 776#endif
800 return 0; 777 return ce_activate_plugin();
778}
779
780/*
781 * This is the plugin's release at function, called by the release task-set
782 * system call. Other places in the file use the generic LITMUS release_at(),
783 * which is not this.
784 */
785void mc_release_at(struct task_struct *ts, lt_t start)
786{
787 ce_start(ts, start);
788}
789
790long mc_deactivate_plugin(void)
791{
792 return ce_deactivate_plugin();
801} 793}
802 794
803/* ************************************************************************** 795/* **************************************************************************
@@ -820,11 +812,6 @@ static rt_domain_t _mc_crit_c_rt;
820struct bheap _mc_heap_c; 812struct bheap _mc_heap_c;
821struct bheap_node _mc_nodes_c[NR_CPUS]; 813struct bheap_node _mc_nodes_c[NR_CPUS];
822 814
823/*
824 * XXX commented out because I think this was an obvious typo
825 */
826/* release_at)_ */
827
828static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { 815static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
829 .plugin_name = "MC", 816 .plugin_name = "MC",
830 .task_new = mc_task_new, 817 .task_new = mc_task_new,
@@ -835,6 +822,8 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
835 .task_block = mc_task_block, 822 .task_block = mc_task_block,
836 .admit_task = mc_admit_task, 823 .admit_task = mc_admit_task,
837 .activate_plugin = mc_activate_plugin, 824 .activate_plugin = mc_activate_plugin,
825 .release_at = mc_release_at,
826 .deactivate_plugin = mc_deactivate_plugin,
838}; 827};
839 828
840static void init_crit_entry(crit_entry_t *ce, enum crit_level level, 829static void init_crit_entry(crit_entry_t *ce, enum crit_level level,
@@ -888,12 +877,14 @@ static inline void init_edf_domain(domain_t *dom, rt_domain_t *rt)
888 edf_higher_prio); 877 edf_higher_prio);
889} 878}
890 879
880domain_data_t *ce_domain_for(int);
891static int __init init_mc(void) 881static int __init init_mc(void)
892{ 882{
893 int cpu; 883 int cpu;
894 cpu_entry_t *entry; 884 cpu_entry_t *entry;
895 rt_domain_t *rt; 885 rt_domain_t *rt;
896 domain_data_t *dom_data; 886 domain_data_t *dom_data;
887 domain_t *dom;
897 raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ 888 raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */
898 889
899 for_each_online_cpu(cpu) { 890 for_each_online_cpu(cpu) {
@@ -911,13 +902,17 @@ static int __init init_mc(void)
911#endif 902#endif
912 903
913 /* CRIT_LEVEL_A */ 904 /* CRIT_LEVEL_A */
914 dom_data = &per_cpu(_mc_crit_a, cpu); 905 dom_data = ce_domain_for(cpu);
915 rt = &per_cpu(_mc_crit_a_rt, cpu);
916 init_local_domain(entry, dom_data, CRIT_LEVEL_A); 906 init_local_domain(entry, dom_data, CRIT_LEVEL_A);
917 init_edf_domain(&dom_data->domain, rt);
918 a_dom = dom_data->domain.lock; 907 a_dom = dom_data->domain.lock;
919 raw_spin_lock_init(a_dom); 908 raw_spin_lock_init(a_dom);
920 dom_data->domain.name = "LVL-A"; 909 dom_data->domain.name = "LVL-A";
910 /* Hook up the level A functions */
911 dom = &dom_data->domain;
912 dom->requeue = ce_requeue;
913 dom->peek_ready = dom->take_ready = ce_peek_and_take_ready;
914 dom->higher_prio = ce_higher_prio;
915 dom->preempt_needed = mc_preempt_needed;
921 916
922 /* CRIT_LEVEL_B */ 917 /* CRIT_LEVEL_B */
923 dom_data = &per_cpu(_mc_crit_b, cpu); 918 dom_data = &per_cpu(_mc_crit_b, cpu);
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index dcb74f4ca67b..8a0f9556640c 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -27,19 +27,20 @@
27static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp; 27static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
28 28
29#define is_active_plugin() (litmus == &mc_ce_plugin) 29#define is_active_plugin() (litmus == &mc_ce_plugin)
30#define get_ce_data(dom_data_ref) (dom_data_ref->domain.data)
30 31
31static atomic_t start_time_set = ATOMIC_INIT(-1); 32static atomic_t start_time_set = ATOMIC_INIT(-1);
32static atomic64_t start_time = ATOMIC64_INIT(0); 33static atomic64_t start_time = ATOMIC64_INIT(0);
33static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL; 34static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
34 35
35 36
36DEFINE_PER_CPU(domain_t, mc_ce_doms); 37DEFINE_PER_CPU(domain_data_t, mc_ce_doms);
37DEFINE_PER_CPU(rt_domain_t, mc_ce_rts); 38DEFINE_PER_CPU(rt_domain_t, mc_ce_rts);
38DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data); 39DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
39 40
40/* Return the address of the domain_t for this CPU, used by the 41/* Return the address of the domain_t for this CPU, used by the
41 * mixed-criticality plugin. */ 42 * mixed-criticality plugin. */
42domain_t *ce_domain_for(int cpu) 43domain_data_t *ce_domain_for(int cpu)
43{ 44{
44 return &per_cpu(mc_ce_doms, cpu); 45 return &per_cpu(mc_ce_doms, cpu);
45} 46}
@@ -65,8 +66,8 @@ static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time)
65 */ 66 */
66static void mc_ce_job_completion(struct task_struct *ts) 67static void mc_ce_job_completion(struct task_struct *ts)
67{ 68{
68 const domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 69 const domain_data_t *dom_data = &per_cpu(mc_ce_doms, smp_processor_id());
69 const struct ce_dom_data *ce_data = dom->data; 70 const struct ce_dom_data *ce_data = get_ce_data(dom_data);
70 const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id; 71 const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id;
71 const struct ce_dom_pid_entry *pid_entry = 72 const struct ce_dom_pid_entry *pid_entry =
72 &ce_data->pid_entries[idx]; 73 &ce_data->pid_entries[idx];
@@ -128,8 +129,9 @@ static int mc_ce_schedule_at(const domain_t *dom, lt_t offset)
128 129
129static struct task_struct *mc_ce_schedule(struct task_struct *prev) 130static struct task_struct *mc_ce_schedule(struct task_struct *prev)
130{ 131{
131 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 132 domain_data_t *dom_data = &per_cpu(mc_ce_doms, smp_processor_id());
132 struct ce_dom_data *ce_data = dom->data; 133 domain_t *dom = &dom_data->domain;
134 struct ce_dom_data *ce_data = get_ce_data(dom_data);
133 struct task_struct *next = NULL; 135 struct task_struct *next = NULL;
134 int exists, sleep, should_sched_exists, should_sched_blocked, 136 int exists, sleep, should_sched_exists, should_sched_blocked,
135 should_sched_asleep; 137 should_sched_asleep;
@@ -178,8 +180,8 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev)
178 180
179static void mc_ce_finish_switch(struct task_struct *prev) 181static void mc_ce_finish_switch(struct task_struct *prev)
180{ 182{
181 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 183 domain_data_t *dom_data = &per_cpu(mc_ce_doms, smp_processor_id());
182 struct ce_dom_data *ce_data = dom->data; 184 struct ce_dom_data *ce_data = get_ce_data(dom_data);
183 185
184 TRACE("finish switch\n"); 186 TRACE("finish switch\n");
185 187
@@ -195,8 +197,9 @@ static void mc_ce_finish_switch(struct task_struct *prev)
195 */ 197 */
196static void mc_ce_tick(struct task_struct *ts) 198static void mc_ce_tick(struct task_struct *ts)
197{ 199{
198 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 200 domain_data_t *dom_data = &per_cpu(mc_ce_doms, smp_processor_id());
199 struct ce_dom_data *ce_data = dom->data; 201 domain_t *dom = &dom_data->domain;
202 struct ce_dom_data *ce_data = get_ce_data(dom_data);
200 struct task_struct *should_schedule; 203 struct task_struct *should_schedule;
201 204
202 if (is_realtime(ts) && CRIT_LEVEL_A == tsk_mc_crit(ts)) { 205 if (is_realtime(ts) && CRIT_LEVEL_A == tsk_mc_crit(ts)) {
@@ -219,8 +222,8 @@ static void mc_ce_tick(struct task_struct *ts)
219 */ 222 */
220static long __mc_ce_admit_task(struct task_struct *ts) 223static long __mc_ce_admit_task(struct task_struct *ts)
221{ 224{
222 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 225 domain_data_t *dom_data = &per_cpu(mc_ce_doms, get_partition(ts));
223 struct ce_dom_data *ce_data = dom->data; 226 struct ce_dom_data *ce_data = get_ce_data(dom_data);
224 struct mc_data *mcd = tsk_mc_data(ts); 227 struct mc_data *mcd = tsk_mc_data(ts);
225 struct pid *pid = NULL; 228 struct pid *pid = NULL;
226 long retval = -EINVAL; 229 long retval = -EINVAL;
@@ -280,7 +283,8 @@ out:
280 283
281static long mc_ce_admit_task(struct task_struct *ts) 284static long mc_ce_admit_task(struct task_struct *ts)
282{ 285{
283 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 286 domain_data_t *dom_data = &per_cpu(mc_ce_doms, get_partition(ts));
287 domain_t *dom = &dom_data->domain;
284 unsigned long flags, retval; 288 unsigned long flags, retval;
285 raw_spin_lock_irqsave(dom->lock, flags); 289 raw_spin_lock_irqsave(dom->lock, flags);
286 retval = __mc_ce_admit_task(ts); 290 retval = __mc_ce_admit_task(ts);
@@ -295,8 +299,9 @@ static long mc_ce_admit_task(struct task_struct *ts)
295 */ 299 */
296static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running) 300static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
297{ 301{
298 domain_t *dom = &per_cpu(mc_ce_doms, task_cpu(ts)); 302 domain_data_t *dom_data = &per_cpu(mc_ce_doms, task_cpu(ts));
299 struct ce_dom_data *ce_data = dom->data; 303 domain_t *dom = &dom_data->domain;
304 struct ce_dom_data *ce_data = get_ce_data(dom_data);
300 struct pid *pid_should_be_running; 305 struct pid *pid_should_be_running;
301 struct ce_dom_pid_entry *pid_entry; 306 struct ce_dom_pid_entry *pid_entry;
302 unsigned long flags; 307 unsigned long flags;
@@ -341,8 +346,9 @@ static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
341 */ 346 */
342static void mc_ce_task_wake_up(struct task_struct *ts) 347static void mc_ce_task_wake_up(struct task_struct *ts)
343{ 348{
344 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 349 domain_data_t *dom_data = &per_cpu(mc_ce_doms, smp_processor_id());
345 struct ce_dom_data *ce_data = dom->data; 350 domain_t *dom = &dom_data->domain;
351 struct ce_dom_data *ce_data = get_ce_data(dom_data);
346 unsigned long flags; 352 unsigned long flags;
347 353
348 TRACE_TASK(ts, "wake up\n"); 354 TRACE_TASK(ts, "wake up\n");
@@ -366,10 +372,11 @@ static void mc_ce_task_block(struct task_struct *ts)
366/* 372/*
367 * Called when a task switches from RT mode back to normal mode. 373 * Called when a task switches from RT mode back to normal mode.
368 */ 374 */
369static void mc_ce_task_exit(struct task_struct *ts) 375void mc_ce_task_exit(struct task_struct *ts)
370{ 376{
371 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 377 domain_data_t *dom_data = &per_cpu(mc_ce_doms, get_partition(ts));
372 struct ce_dom_data *ce_data = dom->data; 378 domain_t *dom = &dom_data->domain;
379 struct ce_dom_data *ce_data = get_ce_data(dom_data);
373 unsigned long flags; 380 unsigned long flags;
374 struct pid *pid; 381 struct pid *pid;
375 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id; 382 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;
@@ -403,11 +410,13 @@ void __mc_ce_timer_callback(struct hrtimer *timer)
403 struct task_struct *should_schedule; 410 struct task_struct *should_schedule;
404 struct ce_dom_pid_entry *pid_entry; 411 struct ce_dom_pid_entry *pid_entry;
405 struct ce_dom_data *ce_data; 412 struct ce_dom_data *ce_data;
413 domain_data_t *dom_data;
406 domain_t *dom; 414 domain_t *dom;
407 int idx, budget_overrun; 415 int idx, budget_overrun;
408 416
409 ce_data = container_of(timer, struct ce_dom_data, timer); 417 ce_data = container_of(timer, struct ce_dom_data, timer);
410 dom = container_of(((void*)ce_data), domain_t, data); 418 dom_data = &per_cpu(mc_ce_doms, ce_data->cpu);
419 dom = &dom_data->domain;
411 420
412 /* Based off of the current time, figure out the offset into the cycle 421 /* Based off of the current time, figure out the offset into the cycle
413 * and the cycle's start time, and determine what should be scheduled. 422 * and the cycle's start time, and determine what should be scheduled.
@@ -466,10 +475,12 @@ static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
466{ 475{
467 struct ce_dom_data *ce_data; 476 struct ce_dom_data *ce_data;
468 unsigned long flags; 477 unsigned long flags;
478 domain_data_t *dom_data;
469 domain_t *dom; 479 domain_t *dom;
470 480
471 ce_data = container_of(timer, struct ce_dom_data, timer); 481 ce_data = container_of(timer, struct ce_dom_data, timer);
472 dom = container_of(((void*)ce_data), domain_t, data); 482 dom_data = &per_cpu(mc_ce_doms, ce_data->cpu);
483 dom = &dom_data->domain;
473 484
474 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu); 485 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
475 486
@@ -490,14 +501,14 @@ static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
490static int cancel_all_timers(void) 501static int cancel_all_timers(void)
491{ 502{
492 struct ce_dom_data *ce_data; 503 struct ce_dom_data *ce_data;
493 domain_t *dom; 504 domain_data_t *dom_data;
494 int cpu, ret = 0, cancel_res; 505 int cpu, ret = 0, cancel_res;
495 506
496 TRACE("cancel all timers\n"); 507 TRACE("cancel all timers\n");
497 508
498 for_each_online_cpu(cpu) { 509 for_each_online_cpu(cpu) {
499 dom = &per_cpu(mc_ce_doms, cpu); 510 dom_data = &per_cpu(mc_ce_doms, cpu);
500 ce_data = dom->data; 511 ce_data = get_ce_data(dom_data);
501 ce_data->should_schedule = NULL; 512 ce_data->should_schedule = NULL;
502 cancel_res = hrtimer_cancel(&ce_data->timer); 513 cancel_res = hrtimer_cancel(&ce_data->timer);
503 atomic_set(&ce_data->timer_info.state, 514 atomic_set(&ce_data->timer_info.state,
@@ -515,15 +526,15 @@ static int cancel_all_timers(void)
515static void arm_all_timers(void) 526static void arm_all_timers(void)
516{ 527{
517 struct ce_dom_data *ce_data; 528 struct ce_dom_data *ce_data;
518 domain_t *dom; 529 domain_data_t *dom_data;
519 int cpu, idx; 530 int cpu, idx;
520 const lt_t start = atomic64_read(&start_time); 531 const lt_t start = atomic64_read(&start_time);
521 532
522 TRACE("arm all timers\n"); 533 TRACE("arm all timers\n");
523 534
524 for_each_online_cpu(cpu) { 535 for_each_online_cpu(cpu) {
525 dom = &per_cpu(mc_ce_doms, cpu); 536 dom_data = &per_cpu(mc_ce_doms, cpu);
526 ce_data = dom->data; 537 ce_data = get_ce_data(dom_data);
527 if (0 == ce_data->num_pid_entries) 538 if (0 == ce_data->num_pid_entries)
528 continue; 539 continue;
529 for (idx = 0; idx < ce_data->num_pid_entries; idx++) { 540 for (idx = 0; idx < ce_data->num_pid_entries; idx++) {
@@ -555,12 +566,12 @@ void mc_ce_release_at(struct task_struct *ts, lt_t start)
555long mc_ce_activate_plugin(void) 566long mc_ce_activate_plugin(void)
556{ 567{
557 struct ce_dom_data *ce_data; 568 struct ce_dom_data *ce_data;
558 domain_t *dom; 569 domain_data_t *dom_data;
559 int cpu; 570 int cpu;
560 571
561 for_each_online_cpu(cpu) { 572 for_each_online_cpu(cpu) {
562 dom = &per_cpu(mc_ce_doms, cpu); 573 dom_data = &per_cpu(mc_ce_doms, cpu);
563 ce_data = dom->data; 574 ce_data = get_ce_data(dom_data);
564 ce_data->scheduled = NULL; 575 ce_data->scheduled = NULL;
565 ce_data->should_schedule = NULL; 576 ce_data->should_schedule = NULL;
566 } 577 }
@@ -575,12 +586,12 @@ long mc_ce_activate_plugin(void)
575static void clear_pid_entries(void) 586static void clear_pid_entries(void)
576{ 587{
577 int cpu, entry; 588 int cpu, entry;
578 domain_t *dom; 589 domain_data_t *dom_data;
579 struct ce_dom_data *ce_data; 590 struct ce_dom_data *ce_data;
580 591
581 for_each_online_cpu(cpu) { 592 for_each_online_cpu(cpu) {
582 dom = &per_cpu(mc_ce_doms, cpu); 593 dom_data = &per_cpu(mc_ce_doms, cpu);
583 ce_data = dom->data; 594 ce_data = get_ce_data(dom_data);
584 ce_data->num_pid_entries = 0; 595 ce_data->num_pid_entries = 0;
585 ce_data->cycle_time = 0; 596 ce_data->cycle_time = 0;
586 for (entry = 0; entry < CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS; 597 for (entry = 0; entry < CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS;
@@ -623,16 +634,18 @@ static int setup_proc(void);
623static int __init init_sched_mc_ce(void) 634static int __init init_sched_mc_ce(void)
624{ 635{
625 struct ce_dom_data *ce_data; 636 struct ce_dom_data *ce_data;
637 domain_data_t *dom_data;
626 domain_t *dom; 638 domain_t *dom;
627 rt_domain_t *rt; 639 rt_domain_t *rt;
628 int cpu, err; 640 int cpu, err;
629 641
630 for_each_online_cpu(cpu) { 642 for_each_online_cpu(cpu) {
631 dom = &per_cpu(mc_ce_doms, cpu); 643 dom_data = &per_cpu(mc_ce_doms, cpu);
644 dom = &dom_data->domain;
632 rt = &per_cpu(mc_ce_rts, cpu); 645 rt = &per_cpu(mc_ce_rts, cpu);
633 pd_domain_init(dom, rt, NULL, NULL, NULL, NULL, NULL); 646 pd_domain_init(dom, rt, NULL, NULL, NULL, NULL, NULL);
634 dom->data = &per_cpu(_mc_ce_dom_data, cpu); 647 dom->data = &per_cpu(_mc_ce_dom_data, cpu);
635 ce_data = dom->data; 648 ce_data = get_ce_data(dom_data);
636 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 649 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
637 hrtimer_start_on_info_init(&ce_data->timer_info); 650 hrtimer_start_on_info_init(&ce_data->timer_info);
638 ce_data->cpu = cpu; 651 ce_data->cpu = cpu;
@@ -736,7 +749,7 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
736{ 749{
737 int n = 0, err, cpu, t; 750 int n = 0, err, cpu, t;
738 struct ce_dom_data *ce_data; 751 struct ce_dom_data *ce_data;
739 domain_t *dom; 752 domain_data_t *dom_data;
740 753
741 if (off > 0) { 754 if (off > 0) {
742 printk(KERN_INFO "litmus: MC-CE called read with off > 0\n"); 755 printk(KERN_INFO "litmus: MC-CE called read with off > 0\n");
@@ -744,8 +757,8 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
744 } 757 }
745 758
746 for_each_online_cpu(cpu) { 759 for_each_online_cpu(cpu) {
747 dom = &per_cpu(mc_ce_doms, cpu); 760 dom_data = &per_cpu(mc_ce_doms, cpu);
748 ce_data = dom->data; 761 ce_data = get_ce_data(dom_data);
749 for (t = 0; t < ce_data->num_pid_entries; ++t) { 762 for (t = 0; t < ce_data->num_pid_entries; ++t) {
750 err = write_pid_entry(page + n, count - n, 763 err = write_pid_entry(page + n, count - n,
751 cpu, t, &ce_data->pid_entries[t]); 764 cpu, t, &ce_data->pid_entries[t]);
@@ -785,8 +798,8 @@ static int skip_comment(const char *buf, const unsigned long max)
785#define BUDGET_THRESHOLD 5000000ULL 798#define BUDGET_THRESHOLD 5000000ULL
786static int setup_pid_entry(const int cpu, const int task, const lt_t budget) 799static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
787{ 800{
788 domain_t *dom = &per_cpu(mc_ce_doms, cpu); 801 domain_data_t *dom_data = &per_cpu(mc_ce_doms, cpu);
789 struct ce_dom_data *ce_data = dom->data; 802 struct ce_dom_data *ce_data = get_ce_data(dom_data);
790 struct ce_dom_pid_entry *new_entry; 803 struct ce_dom_pid_entry *new_entry;
791 int err = 0; 804 int err = 0;
792 805