aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-09-21 18:25:30 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-09-21 18:25:30 -0400
commitd27d5ce8b5fcd8408e995d13608d8993cb720aab (patch)
tree99f3163bc76c74234c9001a9388d91afb8584ab2
parent313bcb226f88d17b193f9e7db7ecb4f57320a596 (diff)
Now with fine grained locking
-rw-r--r--include/litmus/rt_domain.h3
-rw-r--r--include/litmus/sched_mc.h3
-rw-r--r--litmus/litmus.c10
-rw-r--r--litmus/rt_domain.c14
-rw-r--r--litmus/sched_mc.c526
-rw-r--r--litmus/sched_mc_ce.c5
6 files changed, 311 insertions, 250 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index b5cf95ffd488..0e4e75cd1e67 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -80,14 +80,13 @@ void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
80 release_jobs_t release); 80 release_jobs_t release);
81 81
82void pd_domain_init(domain_t *dom, 82void pd_domain_init(domain_t *dom,
83 rt_domain_t *rt,
83 bheap_prio_t order, 84 bheap_prio_t order,
84 check_resched_needed_t check, 85 check_resched_needed_t check,
85 release_jobs_t release, 86 release_jobs_t release,
86 preempt_needed_t preempt_needed, 87 preempt_needed_t preempt_needed,
87 task_prio_t priority); 88 task_prio_t priority);
88 89
89void pd_domain_free(domain_t *dom);
90
91void __add_ready(rt_domain_t* rt, struct task_struct *new); 90void __add_ready(rt_domain_t* rt, struct task_struct *new);
92void __merge_ready(rt_domain_t* rt, struct bheap *tasks); 91void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
93void __add_release(rt_domain_t* rt, struct task_struct *task); 92void __add_release(rt_domain_t* rt, struct task_struct *task);
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
index 32ef68150c81..d29796298701 100644
--- a/include/litmus/sched_mc.h
+++ b/include/litmus/sched_mc.h
@@ -9,8 +9,7 @@ enum crit_level {
9 CRIT_LEVEL_A = 0, 9 CRIT_LEVEL_A = 0,
10 CRIT_LEVEL_B = 1, 10 CRIT_LEVEL_B = 1,
11 CRIT_LEVEL_C = 2, 11 CRIT_LEVEL_C = 2,
12 CRIT_LEVEL_D = 3, 12 NUM_CRIT_LEVELS = 3,
13 NUM_CRIT_LEVELS = 4,
14}; 13};
15 14
16struct mc_task { 15struct mc_task {
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 7db9fdadc7db..437cdfa215ce 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -315,7 +315,7 @@ asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param
315 } 315 }
316 316
317 /* check parameters passed in are valid */ 317 /* check parameters passed in are valid */
318 if (mc.crit < CRIT_LEVEL_A || mc.crit > CRIT_LEVEL_D) 318 if (mc.crit < CRIT_LEVEL_A || mc.crit >= NUM_CRIT_LEVELS)
319 { 319 {
320 printk(KERN_WARNING "litmus: real-time task %d rejected because " 320 printk(KERN_WARNING "litmus: real-time task %d rejected because "
321 "of invalid criticality level\n", pid); 321 "of invalid criticality level\n", pid);
@@ -562,10 +562,10 @@ void exit_litmus(struct task_struct *dead_tsk)
562 } 562 }
563 563
564#ifdef CONFIG_PLUGIN_MC 564#ifdef CONFIG_PLUGIN_MC
565 /* The MC-setup syscall might succeed and allocate mc_data, but the 565 /* The MC-setup syscall might succeed and allocate mc_data, but the
566 task may not exit in real-time mode, and that memory will leak. 566 * task may not exit in real-time mode, and that memory will leak.
567 Check and free it here. 567 * Check and free it here.
568 */ 568 */
569 if (tsk_rt(dead_tsk)->mc_data) 569 if (tsk_rt(dead_tsk)->mc_data)
570 kfree(tsk_rt(dead_tsk)->mc_data); 570 kfree(tsk_rt(dead_tsk)->mc_data);
571#endif 571#endif
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 65c6e5c02f1e..3e419d7c9ae7 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* Uncomment when debugging timer races... */ 22/* Uncomment when debugging timer races... */
23#if 1 23#if 0
24#define VTRACE_TASK TRACE_TASK 24#define VTRACE_TASK TRACE_TASK
25#define VTRACE TRACE 25#define VTRACE TRACE
26#else 26#else
@@ -371,8 +371,12 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
371 371
372 if (is_released(task, litmus_clock())) { 372 if (is_released(task, litmus_clock())) {
373 __add_ready(domain, task); 373 __add_ready(domain, task);
374 TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
375 task->comm, task->pid, get_exec_cost(task), get_rt_period(task),
376 get_release(task), litmus_clock());
374 } else { 377 } else {
375 /* task has to wait for next release */ 378 /* task has to wait for next release */
379 TRACE_TASK(task, "add release(), rel=%llu\n", get_release(task));
376 add_release(domain, task); 380 add_release(domain, task);
377 } 381 }
378} 382}
@@ -398,22 +402,16 @@ static struct task_struct* pd_peek_ready(domain_t *dom)
398/* pd_domain_init - create a generic domain wrapper for an rt_domain 402/* pd_domain_init - create a generic domain wrapper for an rt_domain
399 */ 403 */
400void pd_domain_init(domain_t *dom, 404void pd_domain_init(domain_t *dom,
405 rt_domain_t *domain,
401 bheap_prio_t order, 406 bheap_prio_t order,
402 check_resched_needed_t check, 407 check_resched_needed_t check,
403 release_jobs_t release, 408 release_jobs_t release,
404 preempt_needed_t preempt_needed, 409 preempt_needed_t preempt_needed,
405 task_prio_t priority) 410 task_prio_t priority)
406{ 411{
407 rt_domain_t *domain = kmalloc(sizeof(rt_domain_t), GFP_ATOMIC);
408
409 rt_domain_init(domain, order, check, release); 412 rt_domain_init(domain, order, check, release);
410 domain_init(dom, &domain->ready_lock, 413 domain_init(dom, &domain->ready_lock,
411 pd_requeue, pd_peek_ready, pd_take_ready, 414 pd_requeue, pd_peek_ready, pd_take_ready,
412 preempt_needed, priority); 415 preempt_needed, priority);
413 dom->data = domain; 416 dom->data = domain;
414} 417}
415
416void pd_domain_free(domain_t *dom)
417{
418 kfree(dom->data);
419}
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 0f6c052aad20..1f2b13c6a219 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -16,6 +16,7 @@
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/poison.h>
19 20
20#include <litmus/litmus.h> 21#include <litmus/litmus.h>
21#include <litmus/jobs.h> 22#include <litmus/jobs.h>
@@ -27,30 +28,50 @@
27 28
28#include <litmus/sched_mc.h> 29#include <litmus/sched_mc.h>
29 30
30/* Per CPU per criticality level state */ 31/**
32 * crit_entry_t - State of a CPU within each criticality level system.
33 * @level Criticality level of this entry
34 * @linked Logically running task, ghost or regular
35 * @domain Domain from which to draw tasks
36 * @usable False if a higher criticality task is running
37 * @timer For ghost task budget enforcement
38 * @node Used to sort crit_entries by preemptability in global domains
39 */
31typedef struct { 40typedef struct {
32 enum crit_level level; 41 enum crit_level level;
33 struct task_struct* linked; /* Logically running task */ 42 struct task_struct* linked;
34 domain_t* domain; 43 domain_t* domain;
44 int usable;
45 struct hrtimer timer;
46 struct bheap_node* node;
47} crit_entry_t;
35 48
36 struct hrtimer timer; /* For ghost task budget enforcement */ 49/**
37 struct bheap_node* node; /* For membership in global domains */ 50 * cpu_entry_t - State of a CPU for the entire MC system
38} crit_cpu_entry_t; 51 * @cpu CPU id
39 52 * @scheduled Task that is physically running
40/* Per CPU state */ 53 * @linked Task that should be running / is logically running
54 * @lock For serialization
55 * @crit_entries Array of CPU state per criticality level
56 */
41typedef struct { 57typedef struct {
42 int cpu; 58 int cpu;
43 struct task_struct* scheduled; /* Task that is physically running */ 59 struct task_struct* scheduled;
44 struct task_struct* linked; /* Task that is logically running */ 60 struct task_struct* linked;
45 61 raw_spinlock_t lock;
46 crit_cpu_entry_t crit_entries[NUM_CRIT_LEVELS]; 62 crit_entry_t crit_entries[NUM_CRIT_LEVELS];
47} cpu_entry_t; 63} cpu_entry_t;
48 64
49/* Wrapper necessary until cpu linking code is moved into header file */ 65/**
50typedef struct domain_data { 66 * domain_data_t - Wrap domains with related CPU state
51 domain_t domain; 67 * @domain A domain for a criticality level
52 struct bheap* heap; /* For global domains */ 68 * @heap The preemptable heap of crit entries (for global domains)
53 crit_cpu_entry_t* crit_entry; /* For partitioned domains */ 69 * @crit_entry The crit entry for this domain (for partitioned domains)
70 */
71typedef struct {
72 domain_t domain;
73 struct bheap* heap;
74 crit_entry_t* crit_entry;
54} domain_data_t; 75} domain_data_t;
55 76
56static cpu_entry_t* cpus[NR_CPUS]; 77static cpu_entry_t* cpus[NR_CPUS];
@@ -59,9 +80,14 @@ static raw_spinlock_t global_lock;
59#define domain_data(dom) (container_of(dom, domain_data_t, domain)) 80#define domain_data(dom) (container_of(dom, domain_data_t, domain))
60#define is_global(dom) (domain_data(dom)->heap) 81#define is_global(dom) (domain_data(dom)->heap)
61#define is_global_task(t) (is_global(get_task_domain(t))) 82#define is_global_task(t) (is_global(get_task_domain(t)))
83#define can_requeue(t) \
84 (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU)
85#define entry_level(e) \
86 (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1)
62#define crit_cpu(ce) \ 87#define crit_cpu(ce) \
63 (container_of((void*)(ce - ce->level), cpu_entry_t, crit_entries)) 88 (container_of((void*)((ce) - (ce)->level), cpu_entry_t, crit_entries))
64 89
90/* useful debug macros */
65#define TS "(%s/%d:%d:%s)" 91#define TS "(%s/%d:%d:%s)"
66#define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \ 92#define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \
67 (t) ? t->rt_param.job_params.job_no : 1, \ 93 (t) ? t->rt_param.job_params.job_no : 1, \
@@ -71,7 +97,8 @@ static raw_spinlock_t global_lock;
71 e->cpu, TA(e->linked), ##args) 97 e->cpu, TA(e->linked), ##args)
72#define TRACE_CRIT_ENTRY(ce, fmt, args...) \ 98#define TRACE_CRIT_ENTRY(ce, fmt, args...) \
73 TRACE("%s P%d, linked=" TS " " fmt "\n", \ 99 TRACE("%s P%d, linked=" TS " " fmt "\n", \
74 ce->domain->name, crit_cpu(ce)->cpu, TA(ce->linked), ##args) 100 (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args)
101#undef TRACE_TASK
75#define TRACE_TASK(t, fmt, args...) \ 102#define TRACE_TASK(t, fmt, args...) \
76 TRACE(TS " " fmt "\n", TA(t), ##args) 103 TRACE(TS " " fmt "\n", TA(t), ##args)
77 104
@@ -81,7 +108,7 @@ static raw_spinlock_t global_lock;
81static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) 108static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
82{ 109{
83 domain_t *domain; 110 domain_t *domain;
84 crit_cpu_entry_t *first, *second; 111 crit_entry_t *first, *second;
85 struct task_struct *first_link, *second_link; 112 struct task_struct *first_link, *second_link;
86 113
87 first = a->value; 114 first = a->value;
@@ -89,7 +116,9 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
89 first_link = first->linked; 116 first_link = first->linked;
90 second_link = second->linked; 117 second_link = second->linked;
91 118
92 if (!first_link || !second_link) { 119 if (!first->usable || !second->usable) {
120 return second->usable && first->usable;
121 } else if (!first_link || !second_link) {
93 return second_link && !first_link; 122 return second_link && !first_link;
94 } else { 123 } else {
95 domain = get_task_domain(first_link); 124 domain = get_task_domain(first_link);
@@ -99,10 +128,26 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
99} 128}
100 129
101/* 130/*
131 * Return true if the domain has a higher priority ready task. The curr
132 * task must belong to the domain.
133 */
134static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr)
135{
136 struct task_struct *next = dom->peek_ready(dom);
137
138 if (!next || !curr) {
139 return next && !curr;
140 } else {
141 BUG_ON(tsk_mc_crit(next) != tsk_mc_crit(curr));
142 return get_task_domain(next)->higher_prio(next, curr);
143 }
144}
145
146/*
102 * Return next CPU which should preempted or NULL if the domain has no 147 * Return next CPU which should preempted or NULL if the domain has no
103 * preemptable CPUs. 148 * preemptable CPUs.
104 */ 149 */
105static inline crit_cpu_entry_t* lowest_prio_cpu(domain_t *dom) 150static inline crit_entry_t* lowest_prio_cpu(domain_t *dom)
106{ 151{
107 struct bheap *heap = domain_data(dom)->heap; 152 struct bheap *heap = domain_data(dom)->heap;
108 struct bheap_node* hn; 153 struct bheap_node* hn;
@@ -110,8 +155,9 @@ static inline crit_cpu_entry_t* lowest_prio_cpu(domain_t *dom)
110 return (hn) ? hn->value : NULL; 155 return (hn) ? hn->value : NULL;
111} 156}
112 157
113/* 158/**
114 * Time accounting for ghost tasks. Called during ticks and linking. 159 * update_ghost_time() - Time accounting for ghost tasks.
160 * Must be called before a decision is made involving the task's budget.
115 */ 161 */
116static void update_ghost_time(struct task_struct *p) 162static void update_ghost_time(struct task_struct *p)
117{ 163{
@@ -135,16 +181,18 @@ static void update_ghost_time(struct task_struct *p)
135 } 181 }
136} 182}
137 183
138/* 184/**
139 * Logically set running task for a domain on a CPU. 185 * link_task_to_crit() - Logically run a task at a criticality level.
186 * Caller must hold @ce's domain's lock.
140 */ 187 */
141static void link_task_to_crit(crit_cpu_entry_t *ce, 188static void link_task_to_crit(crit_entry_t *ce,
142 struct task_struct *task) 189 struct task_struct *task)
143{ 190{
144 lt_t when_to_fire; 191 lt_t when_to_fire;
145 struct bheap *heap; 192 struct bheap *heap;
146 193
147 TRACE_TASK(task, "Linking to P%d", crit_cpu(ce)->cpu); 194 TRACE_CRIT_ENTRY(ce, "Linking " TS, TA(task));
195 BUG_ON(!ce->usable && task);
148 BUG_ON(task && tsk_rt(task)->linked_on != NO_CPU); 196 BUG_ON(task && tsk_rt(task)->linked_on != NO_CPU);
149 BUG_ON(task && is_global(ce->domain) && 197 BUG_ON(task && is_global(ce->domain) &&
150 !bheap_node_in_heap(ce->node)); 198 !bheap_node_in_heap(ce->node));
@@ -180,7 +228,7 @@ static void link_task_to_crit(crit_cpu_entry_t *ce,
180 } 228 }
181 229
182 /* Update global heap node position */ 230 /* Update global heap node position */
183 if (is_global(ce->domain) && bheap_node_in_heap(ce->node)) { 231 if (is_global(ce->domain)) {
184 heap = domain_data(ce->domain)->heap; 232 heap = domain_data(ce->domain)->heap;
185 bheap_delete(cpu_lower_prio, heap, ce->node); 233 bheap_delete(cpu_lower_prio, heap, ce->node);
186 bheap_insert(cpu_lower_prio, heap, ce->node); 234 bheap_insert(cpu_lower_prio, heap, ce->node);
@@ -189,18 +237,19 @@ static void link_task_to_crit(crit_cpu_entry_t *ce,
189 237
190static void check_for_preempt(domain_t*); 238static void check_for_preempt(domain_t*);
191 239
192/* 240/**
193 * Catch all function for when a task enters the system after a suspension 241 * job_arrival() - Called when a task re-enters the system.
194 * or a release. Requeues the task and causes a preemption, if necessary. 242 * Caller must hold no locks.
195 */ 243 */
196static void job_arrival(struct task_struct* task) 244static void job_arrival(struct task_struct *task)
197{ 245{
198 domain_t *dom = get_task_domain(task); 246 domain_t *dom = get_task_domain(task);
199 247
200 TRACE_TASK(task, "Job arriving"); 248 TRACE_TASK(task, "Job arriving");
201 BUG_ON(!task); 249 BUG_ON(!task);
202 250
203 if (!is_global(dom) || tsk_rt(task)->scheduled_on == NO_CPU) { 251 raw_spin_lock(dom->lock);
252 if (can_requeue(task)) {
204 dom->requeue(dom, task); 253 dom->requeue(dom, task);
205 check_for_preempt(dom); 254 check_for_preempt(dom);
206 } else { 255 } else {
@@ -212,86 +261,51 @@ static void job_arrival(struct task_struct* task)
212 */ 261 */
213 TRACE_TASK(task, "Delayed arrival of scheduled task"); 262 TRACE_TASK(task, "Delayed arrival of scheduled task");
214 } 263 }
264 raw_spin_unlock(dom->lock);
215} 265}
216 266
217/* 267/**
218 * Logically run a task on a CPU. The task must first have been linked 268 * link_task_to_cpu() - Logically run a task on a CPU.
219 * to one of the criticalities running on this CPU. 269 * The task must first have been linked to one of the CPU's crit_entries.
270 * Caller must hold the entry lock.
220 */ 271 */
221static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) 272static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task)
222{ 273{
223 int i, in_heap; 274 int i;
224 crit_cpu_entry_t *ce;
225 struct bheap *heap;
226 struct task_struct *tmp;
227 enum crit_level last, next;
228
229 next = (task) ? tsk_mc_crit(task) : NUM_CRIT_LEVELS - 1;
230 last = (entry->linked) ? tsk_mc_crit(entry->linked) :
231 NUM_CRIT_LEVELS - 1;
232
233 TRACE_TASK(task, "Linking to P%d", entry->cpu); 275 TRACE_TASK(task, "Linking to P%d", entry->cpu);
234 BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); 276 BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu);
235 BUG_ON(task && is_ghost(task)); 277 BUG_ON(task && is_ghost(task));
236 BUG_ON(entry->linked && task && tsk_mc_crit(entry->linked) < next);
237 278
238 /* Actually link task */ 279 i = entry_level(entry);
239 if (task && !is_ghost(task)) { 280 if (task){
240 set_rt_flags(task, RT_F_RUNNING); 281 set_rt_flags(task, RT_F_RUNNING);
241 entry->linked = task;
242 } else {
243 entry->linked = NULL;
244 } 282 }
245 283 entry->linked = task;
246 /* Update CPU states */ 284 for (; i < entry_level(entry) + 1; i++) {
247 for (i = ((next < last) ? next : last); 285 TRACE_CRIT_ENTRY(&entry->crit_entries[i], "now usable");
248 i <= ((next > last) ? next : last); i++) { 286 entry->crit_entries[i].usable = 1;
249 ce = &entry->crit_entries[i];
250
251 /* Put CPU only in heaps which can preempt the linked task */
252 if (is_global(ce->domain)) {
253 heap = domain_data(ce->domain)->heap;
254 in_heap = bheap_node_in_heap(ce->node);
255 if (ce->level > next && in_heap) {
256 bheap_delete(cpu_lower_prio, heap, ce->node);
257 } else if ((ce->level < next || !task) && !in_heap) {
258 bheap_insert(cpu_lower_prio, heap, ce->node);
259 }
260 }
261
262 /* Remove and requeue lower priority tasks on this CPU */
263 if (ce->linked && ce->level > next) {
264 TRACE_TASK(ce->linked, "Removed by higher priority");
265 tmp = ce->linked;
266 link_task_to_crit(ce, NULL);
267 if (is_global(ce->domain)) {
268 /* Need to check for a preemption.
269 * We know this CPU is no longer in the heap
270 * so it cannot get re-preempted here.
271 */
272 job_arrival(tmp);
273 } else {
274 ce->domain->requeue(ce->domain, tmp);
275 }
276 }
277 } 287 }
278} 288}
279 289
280/* 290/**
281 * Preempt logically running task in a domain. If the preempting task should be 291 * preempt() - Preempt a logically running task with a higher priority one.
282 * running on the domain's CPU, also links the task to the CPU and causes 292 * @dom Domain from which to draw higher priority task
283 * a physical preemption. 293 * @ce CPU criticality level to preempt
294 *
295 * Caller must hold the lock for @dom and @ce's CPU lock. Returns 1 if
296 * a physically preemption occurred.
284 */ 297 */
285static void preempt(domain_t *dom, crit_cpu_entry_t *ce) 298static int preempt(domain_t *dom, crit_entry_t *ce)
286{ 299{
300 int rv = 0;
287 struct task_struct *task = dom->take_ready(dom); 301 struct task_struct *task = dom->take_ready(dom);
288 cpu_entry_t *entry = crit_cpu(ce); 302 cpu_entry_t *entry = crit_cpu(ce);
289 303
290 TRACE_CRIT_ENTRY(ce, "Preempted by " TS, TA(task));
291 BUG_ON(!task); 304 BUG_ON(!task);
305 TRACE_CRIT_ENTRY(ce, "Preempted by " TS, TA(task));
292 306
293 /* Per-domain preemption */ 307 /* Per-domain preemption */
294 if (ce->linked) { 308 if (ce->linked && can_requeue(ce->linked)) {
295 dom->requeue(dom, ce->linked); 309 dom->requeue(dom, ce->linked);
296 } 310 }
297 link_task_to_crit(ce, task); 311 link_task_to_crit(ce, task);
@@ -300,65 +314,119 @@ static void preempt(domain_t *dom, crit_cpu_entry_t *ce)
300 if (!is_ghost(task)) { 314 if (!is_ghost(task)) {
301 link_task_to_cpu(entry, task); 315 link_task_to_cpu(entry, task);
302 preempt_if_preemptable(entry->scheduled, entry->cpu); 316 preempt_if_preemptable(entry->scheduled, entry->cpu);
317 rv = 1;
303 } 318 }
319 return rv;
304} 320}
305 321
306/* 322/**
307 * Causes a logical preemption if the domain has a higher-priority ready task. 323 * update_crit_levels() - Update criticality entries for the new cpu state.
324 * This should be called after a new task has been linked to @entry.
325 * Assumes the caller holds @entry->lock, but this method will release it.
326 */
327static void update_crit_levels(cpu_entry_t *entry)
328{
329 int i;
330 crit_entry_t *ce;
331 struct task_struct *tasks[NUM_CRIT_LEVELS];
332 enum crit_level level = entry_level(entry);
333
334 /* Remove tasks from entries */
335 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
336 ce = &entry->crit_entries[i];
337 tasks[i] = ce->linked;
338 TRACE_CRIT_ENTRY(ce, "not usable");
339 ce->usable = 0;
340 if (ce->linked) {
341 link_task_to_crit(ce, NULL);
342 }
343 }
344
345 raw_spin_unlock(&entry->lock);
346
347 /* Put tasks back into system */
348 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
349 ce = &entry->crit_entries[i];
350 if (tasks[i]) {
351 job_arrival(tasks[i]);
352 }
353 }
354}
355
356/**
357 * check_for_preempt() - Causes a preemption if higher-priority tasks are ready.
358 * Caller must hold domain lock.
359 * Makes gigantic nasty assumption that there is 1 global criticality level,
360 * and it is the last one in each list, so it doesn't call update_crit..
308 */ 361 */
309static void check_for_preempt(domain_t *dom) 362static void check_for_preempt(domain_t *dom)
310{ 363{
311 int lower_prio; 364 int preempted = 1;
312 cpu_entry_t *entry; 365 cpu_entry_t *entry;
313 crit_cpu_entry_t *ce; 366 crit_entry_t *ce;
314 367
315 if (is_global(dom)) { 368 if (is_global(dom)) {
316 /* If a higher priority task is running on a CPU, 369 /* Loop until we find a non-preemptable CPU */
317 * it will not be present in the domain heap. 370 while ((ce = lowest_prio_cpu(dom)) && preempted) {
318 */ 371 entry = crit_cpu(ce);
319 for (ce = lowest_prio_cpu(dom); 372 preempted = 0;
320 ce && dom->preempt_needed(dom, ce->linked); 373 raw_spin_lock(&entry->lock);
321 ce = lowest_prio_cpu(dom)) { 374 if (ce->usable && dom->preempt_needed(dom, ce->linked)){
322 375 preempted = 1;
323 preempt(dom, ce); 376 preempt(dom, ce);
377 }
378 raw_spin_unlock(&entry->lock);
324 } 379 }
325 } else /* Partitioned */ { 380 } else /* Partitioned */ {
326 ce = domain_data(dom)->crit_entry; 381 ce = domain_data(dom)->crit_entry;
327 entry = crit_cpu(ce); 382 entry = crit_cpu(ce);
328 /* A higher priority task might be running, in which case 383
329 * this level cannot link any task. 384 raw_spin_lock(&entry->lock);
330 */ 385 if (ce->usable && dom->preempt_needed(dom, ce->linked)) {
331 lower_prio = entry->linked &&
332 tsk_mc_crit(entry->linked) < ce->level;
333 if (!lower_prio && dom->preempt_needed(dom, ce->linked)) {
334 preempt(dom, ce); 386 preempt(dom, ce);
387 update_crit_levels(entry);
388 } else {
389 raw_spin_unlock(&entry->lock);
335 } 390 }
336 } 391 }
337} 392}
338 393
339/* 394/**
340 * Remove a running task from all structures. 395 * remove_from_all() - Logically remove a task from all structures.
396 * Caller must hold no locks.
341 */ 397 */
342static void remove_from_all(struct task_struct* task) 398static void remove_from_all(struct task_struct* task)
343{ 399{
344 int cpu, level; 400 int update = 0;
345 cpu_entry_t *entry; 401 cpu_entry_t *entry;
346 crit_cpu_entry_t *ce; 402 crit_entry_t *ce;
403 domain_t *dom = get_task_domain(task);
347 404
348 TRACE_TASK(task, "Removing from everything"); 405 TRACE_TASK(task, "Removing from everything");
349 BUG_ON(!task); 406 BUG_ON(!task);
350 407
351 cpu = task->rt_param.linked_on; 408 raw_spin_lock(dom->lock);
352 level = tsk_mc_crit(task); 409 if (task->rt_param.linked_on != NO_CPU) {
353 if (cpu != NO_CPU) { 410 entry = cpus[task->rt_param.linked_on];
354 /* Unlink */ 411
355 entry = cpus[cpu]; 412 raw_spin_lock(&entry->lock);
356 ce = &entry->crit_entries[level]; 413 /* Unlink if task is still linked post lock */
357 link_task_to_crit(ce, NULL); 414 ce = &entry->crit_entries[tsk_mc_crit(task)];
358 if (!is_ghost(task)) { 415 if (task->rt_param.linked_on != NO_CPU) {
359 link_task_to_cpu(entry, NULL); 416 BUG_ON(entry->linked != task);
417 link_task_to_crit(ce, NULL);
418 if (!is_ghost(task)) {
419 update = 1;
420 link_task_to_cpu(entry, NULL);
421 }
360 } 422 }
361 BUG_ON(is_queued(task)); 423 BUG_ON(is_queued(task));
424
425 if (update) {
426 update_crit_levels(entry);
427 } else {
428 raw_spin_unlock(&entry->lock);
429 }
362 } else if (is_queued(task)) { 430 } else if (is_queued(task)) {
363 /* This is an interesting situation: t is scheduled, 431 /* This is an interesting situation: t is scheduled,
364 * but was just recently unlinked. It cannot be 432 * but was just recently unlinked. It cannot be
@@ -367,14 +435,15 @@ static void remove_from_all(struct task_struct* task)
367 * queue. We must remove it from the list in this 435 * queue. We must remove it from the list in this
368 * case. 436 * case.
369 */ 437 */
370 TRACE_TASK(task, "Weird is_queued situation happened");
371 remove((rt_domain_t*)get_task_domain(task)->data, task); 438 remove((rt_domain_t*)get_task_domain(task)->data, task);
372 } 439 }
440 raw_spin_unlock(dom->lock);
373} 441}
374 442
375/* 443/**
376 * Prepares a task for its next period and causes a preemption, if necessary. 444 * job_completion() - Update task state and re-enter it into the system.
377 * Converts tasks which completed their execution early into ghost tasks. 445 * Converts tasks which have completed their execution early into ghost jobs.
446 * Caller must hold no locks.
378 */ 447 */
379static void job_completion(struct task_struct *task, int forced) 448static void job_completion(struct task_struct *task, int forced)
380{ 449{
@@ -409,88 +478,63 @@ static void job_completion(struct task_struct *task, int forced)
409 job_arrival(task); 478 job_arrival(task);
410} 479}
411 480
412/* 481/**
413 * Return true if the domain has a higher priority ready task. The curr 482 * mc_ghost_exhausted() - Complete logically running ghost task.
414 * task must belong to the domain.
415 */
416static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr)
417{
418 struct task_struct *next = dom->peek_ready(dom);
419
420 if (!next || !curr) {
421 return next && !curr;
422 } else {
423 BUG_ON(tsk_mc_crit(next) != tsk_mc_crit(curr));
424 return get_task_domain(next)->higher_prio(next, curr);
425 }
426}
427
428/*
429 * Completes a logically (but not physically) running ghost task.
430 */ 483 */
431static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) 484static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
432{ 485{
433 unsigned long flags; 486 unsigned long flags;
434 crit_cpu_entry_t *ce; 487 crit_entry_t *ce;
488 struct task_struct *tmp = NULL;
435 489
436 raw_spin_lock_irqsave(&global_lock, flags); 490 local_irq_save(flags);
437 491
438 ce = container_of(timer, crit_cpu_entry_t, timer); 492 ce = container_of(timer, crit_entry_t, timer);
439 TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); 493 TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing");
440 494
441 /* Due to race conditions, we cannot just set the linked 495 /* Due to race conditions, we cannot just set the linked
442 * task's budget to 0 as it may no longer be the task 496 * task's budget to 0 as it may no longer be the task
443 * for which this timer was armed. 497 * for which this timer was armed. Instead, update the running
498 * task time
444 */ 499 */
500 raw_spin_lock(&crit_cpu(ce)->lock);
445 if (ce->linked && is_ghost(ce->linked)) { 501 if (ce->linked && is_ghost(ce->linked)) {
446 update_ghost_time(ce->linked); 502 update_ghost_time(ce->linked);
447 if (tsk_mc_data(ce->linked)->mc_job.ghost_budget == 0) { 503 if (tsk_mc_data(ce->linked)->mc_job.ghost_budget == 0) {
448 job_completion(ce->linked, 0); 504 tmp = ce->linked;
449 goto out; 505 link_task_to_crit(ce, NULL);
450 } 506 }
451 } 507 }
508 raw_spin_unlock(&crit_cpu(ce)->lock);
452 509
453 TRACE_TASK(ce->linked, "Was not exhausted"); 510 if (tmp)
454 out: 511 job_completion(tmp, 0);
455 raw_spin_unlock_irqrestore(&global_lock, flags); 512
513 local_irq_restore(flags);
456 return HRTIMER_NORESTART; 514 return HRTIMER_NORESTART;
457} 515}
458 516
459/* 517/**
460 * Adds released jobs to a domain and causes a preemption, if necessary. 518 * mc_release_jobs() - Add heap of tasks to the system, check for preemptions.
461 */ 519 */
462static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) 520static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks)
463{ 521{
464 unsigned long flags; 522 unsigned long flags;
465 struct task_struct *first; 523 struct task_struct *first = bheap_peek(rt->order, tasks)->value;
524 domain_t *dom = get_task_domain(first);
466 525
467 raw_spin_lock_irqsave(&global_lock, flags); 526 raw_spin_lock_irqsave(dom->lock, flags);
468 527
469 first = bheap_peek(rt->order, tasks)->value;
470 TRACE_TASK(first, "Jobs released"); 528 TRACE_TASK(first, "Jobs released");
471
472 __merge_ready(rt, tasks); 529 __merge_ready(rt, tasks);
473 check_for_preempt(get_task_domain(first)); 530 check_for_preempt(dom);
474
475 raw_spin_unlock_irqrestore(&global_lock, flags);
476}
477 531
478/* 532 raw_spin_unlock_irqrestore(dom->lock, flags);
479 * Ghost time accounting.
480 * TODO: remove
481 */
482static void mc_tick(struct task_struct* t)
483{
484 unsigned long flags;
485 if (is_realtime(t) && is_ghost(t)) {
486 raw_spin_lock_irqsave(&global_lock, flags);
487 update_ghost_time(t);
488 raw_spin_unlock_irqrestore(&global_lock, flags);
489 }
490} 533}
491 534
492/* 535/**
493 * Setup new mixed-criticality task. 536 * ms_task_new() - Setup new mixed-criticality task.
537 * Assumes that there are no partitioned domains after level B.
494 */ 538 */
495static void mc_task_new(struct task_struct *t, int on_rq, int running) 539static void mc_task_new(struct task_struct *t, int on_rq, int running)
496{ 540{
@@ -500,7 +544,7 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
500 544
501 TRACE("New mixed criticality task %d\n", t->pid); 545 TRACE("New mixed criticality task %d\n", t->pid);
502 546
503 raw_spin_lock_irqsave(&global_lock, flags); 547 local_irq_save(flags);
504 548
505 /* Assign domain */ 549 /* Assign domain */
506 level = tsk_mc_crit(t); 550 level = tsk_mc_crit(t);
@@ -509,14 +553,12 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
509 } else { 553 } else {
510 entry = cpus[task_cpu(t)]; 554 entry = cpus[task_cpu(t)];
511 } 555 }
512 level = tsk_mc_crit(t);
513 t->rt_param._domain = entry->crit_entries[level].domain; 556 t->rt_param._domain = entry->crit_entries[level].domain;
514 557
515 /* Setup job params */ 558 /* Setup job params */
516 release_at(t, litmus_clock()); 559 release_at(t, litmus_clock());
517 tsk_mc_data(t)->mc_job.ghost_budget = 0; 560 tsk_mc_data(t)->mc_job.ghost_budget = 0;
518 tsk_mc_data(t)->mc_job.is_ghost = 0; 561 tsk_mc_data(t)->mc_job.is_ghost = 0;
519
520 if (running) { 562 if (running) {
521 BUG_ON(entry->scheduled); 563 BUG_ON(entry->scheduled);
522 entry->scheduled = t; 564 entry->scheduled = t;
@@ -528,18 +570,18 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
528 570
529 job_arrival(t); 571 job_arrival(t);
530 572
531 raw_spin_unlock_irqrestore(&global_lock, flags); 573 local_irq_restore(flags);
532} 574}
533 575
534/* 576/**
535 * Add task back into its domain and cause any necessary preemptions. 577 * mc_task_new() - Add task back into its domain check for preemptions.
536 */ 578 */
537static void mc_task_wake_up(struct task_struct *task) 579static void mc_task_wake_up(struct task_struct *task)
538{ 580{
539 unsigned long flags; 581 unsigned long flags;
540 lt_t now; 582 lt_t now;
541 583
542 raw_spin_lock_irqsave(&global_lock, flags); 584 local_irq_save(flags);
543 TRACE_TASK(task, "Wakes up"); 585 TRACE_TASK(task, "Wakes up");
544 586
545 now = litmus_clock(); 587 now = litmus_clock();
@@ -548,49 +590,46 @@ static void mc_task_wake_up(struct task_struct *task)
548 release_at(task, now); 590 release_at(task, now);
549 sched_trace_task_release(task); 591 sched_trace_task_release(task);
550 } 592 }
551
552 if (!is_ghost(task)) 593 if (!is_ghost(task))
553 job_arrival(task); 594 job_arrival(task);
554 595
555 raw_spin_unlock_irqrestore(&global_lock, flags); 596 local_irq_restore(flags);
556} 597}
557 598
558/* 599/**
559 * Remove task from global state to prevent it from being linked / run 600 * mc_task_block() - Remove task from state to prevent it being run anywhere.
560 * on any CPU.
561 */ 601 */
562static void mc_task_block(struct task_struct *task) 602static void mc_task_block(struct task_struct *task)
563{ 603{
564 unsigned long flags; 604 unsigned long flags;
565 raw_spin_lock_irqsave(&global_lock, flags); 605 local_irq_save(flags);
566 TRACE_TASK(task, "Block at %llu", litmus_clock()); 606 TRACE_TASK(task, "Block at %llu", litmus_clock());
567
568 remove_from_all(task); 607 remove_from_all(task);
569 608 local_irq_restore(flags);
570 raw_spin_unlock_irqrestore(&global_lock, flags);
571} 609}
572 610
573/* 611/**
574 * Remove task from the system. 612 * mc_task_exit() - Remove task from the system.
575 */ 613 */
576static void mc_task_exit(struct task_struct *task) 614static void mc_task_exit(struct task_struct *task)
577{ 615{
578 unsigned long flags; 616 unsigned long flags;
579 617 local_irq_save(flags);
580 BUG_ON(!is_realtime(task)); 618 BUG_ON(!is_realtime(task));
581 TRACE_TASK(task, "RIP"); 619 TRACE_TASK(task, "RIP");
582 620
583 raw_spin_lock_irqsave(&global_lock, flags);
584 remove_from_all(task); 621 remove_from_all(task);
585 if (tsk_rt(task)->scheduled_on != NO_CPU) { 622 if (tsk_rt(task)->scheduled_on != NO_CPU) {
586 cpus[tsk_rt(task)->scheduled_on]->scheduled = NULL; 623 cpus[tsk_rt(task)->scheduled_on]->scheduled = NULL;
587 tsk_rt(task)->scheduled_on = NO_CPU; 624 tsk_rt(task)->scheduled_on = NO_CPU;
588 } 625 }
589 raw_spin_unlock_irqrestore(&global_lock, flags); 626
627 local_irq_restore(flags);
590} 628}
591 629
592/* 630/**
593 * Return true if the task is a valid mixed-criticality task. 631 * mc_admit_task() - Return true if the task is valid.
632 * Assumes there are no partitioned levels after level B.
594 */ 633 */
595static long mc_admit_task(struct task_struct* task) 634static long mc_admit_task(struct task_struct* task)
596{ 635{
@@ -614,13 +653,14 @@ static long mc_admit_task(struct task_struct* task)
614 */ 653 */
615static struct task_struct* mc_schedule(struct task_struct * prev) 654static struct task_struct* mc_schedule(struct task_struct * prev)
616{ 655{
656 unsigned long flags;
617 domain_t *dom; 657 domain_t *dom;
618 crit_cpu_entry_t *ce; 658 crit_entry_t *ce;
619 cpu_entry_t* entry = cpus[smp_processor_id()]; 659 cpu_entry_t* entry = cpus[smp_processor_id()];
620 int i, out_of_time, sleep, preempt, exists, blocks, global; 660 int i, out_of_time, sleep, preempt, exists, blocks, global;
621 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 661 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
622 662
623 raw_spin_lock(&global_lock); 663 local_irq_save(flags);
624 664
625 /* Sanity checking */ 665 /* Sanity checking */
626 BUG_ON(entry->scheduled && entry->scheduled != prev); 666 BUG_ON(entry->scheduled && entry->scheduled != prev);
@@ -660,19 +700,29 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
660 job_arrival(entry->scheduled); 700 job_arrival(entry->scheduled);
661 701
662 /* Pick next task if none is linked */ 702 /* Pick next task if none is linked */
663 if (!entry->linked) { 703 raw_spin_lock(&entry->lock);
664 for (i = 0; i < NUM_CRIT_LEVELS && !ready_task; i++) { 704 for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) {
665 ce = &entry->crit_entries[i]; 705 ce = &entry->crit_entries[i];
666 dom = ce->domain; 706 dom = ce->domain;
667 dtask = dom->peek_ready(dom); 707
668 if (!ce->linked && dtask) { 708 raw_spin_unlock(&entry->lock);
669 dom->take_ready(dom); 709 raw_spin_lock(dom->lock);
670 link_task_to_crit(ce, dtask); 710 raw_spin_lock(&entry->lock);
671 ready_task = (is_ghost(dtask)) ? NULL : dtask; 711
712 dtask = dom->peek_ready(dom);
713 if (!entry->linked && ce->usable && !ce->linked && dtask) {
714 dom->take_ready(dom);
715 link_task_to_crit(ce, dtask);
716 ready_task = (is_ghost(dtask)) ? NULL : dtask;
717 if (ready_task) {
718 link_task_to_cpu(entry, ready_task);
719 raw_spin_unlock(dom->lock);
720 update_crit_levels(entry);
721 raw_spin_lock(&entry->lock);
722 continue;
672 } 723 }
673 } 724 }
674 if (ready_task) 725 raw_spin_unlock(dom->lock);
675 link_task_to_cpu(entry, ready_task);
676 } 726 }
677 727
678 /* Schedule next task */ 728 /* Schedule next task */
@@ -683,7 +733,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
683 733
684 sched_state_task_picked(); 734 sched_state_task_picked();
685 735
686 raw_spin_unlock(&global_lock); 736 raw_spin_unlock(&entry->lock);
737 local_irq_restore(flags);
687 738
688 if (next) 739 if (next)
689 TRACE_TASK(next, "Scheduled at %llu", litmus_clock()); 740 TRACE_TASK(next, "Scheduled at %llu", litmus_clock());
@@ -699,7 +750,6 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
699 750
700static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { 751static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
701 .plugin_name = "MC", 752 .plugin_name = "MC",
702 .tick = mc_tick,
703 .task_new = mc_task_new, 753 .task_new = mc_task_new,
704 .complete_job = complete_job, 754 .complete_job = complete_job,
705 .task_exit = mc_task_exit, 755 .task_exit = mc_task_exit,
@@ -713,13 +763,19 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
713 * and destroyed when the module is unloaded. 763 * and destroyed when the module is unloaded.
714 */ 764 */
715DEFINE_PER_CPU(cpu_entry_t, _mc_cpus); 765DEFINE_PER_CPU(cpu_entry_t, _mc_cpus);
766/* LVL-A */
716DEFINE_PER_CPU(domain_data_t, _mc_crit_a); 767DEFINE_PER_CPU(domain_data_t, _mc_crit_a);
768DEFINE_PER_CPU(rt_domain_t, _mc_crit_a_rt);
769/* LVL-B */
717DEFINE_PER_CPU(domain_data_t, _mc_crit_b); 770DEFINE_PER_CPU(domain_data_t, _mc_crit_b);
718static domain_data_t _mc_crit_c, _mc_crit_d; 771DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt);
719struct bheap _mc_heap_c, _mc_heap_d; 772/* LVL-C */
720struct bheap_node _mc_nodes_c[NR_CPUS], _mc_nodes_d[NR_CPUS]; 773static domain_data_t _mc_crit_c;
721 774static rt_domain_t _mc_crit_c_rt;
722static void init_crit_entry(crit_cpu_entry_t *ce, enum crit_level level, 775struct bheap _mc_heap_c;
776struct bheap_node _mc_nodes_c[NR_CPUS];
777
778static void init_crit_entry(crit_entry_t *ce, enum crit_level level,
723 domain_data_t *dom_data, 779 domain_data_t *dom_data,
724 struct bheap_node *node) 780 struct bheap_node *node)
725{ 781{
@@ -727,6 +783,7 @@ static void init_crit_entry(crit_cpu_entry_t *ce, enum crit_level level,
727 ce->linked = NULL; 783 ce->linked = NULL;
728 ce->node = node; 784 ce->node = node;
729 ce->domain = &dom_data->domain; 785 ce->domain = &dom_data->domain;
786 ce->usable = 1;
730 787
731 hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 788 hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
732 ce->timer.function = mc_ghost_exhausted; 789 ce->timer.function = mc_ghost_exhausted;
@@ -745,7 +802,7 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level,
745{ 802{
746 int cpu; 803 int cpu;
747 cpu_entry_t *entry; 804 cpu_entry_t *entry;
748 crit_cpu_entry_t *ce; 805 crit_entry_t *ce;
749 struct bheap_node *node; 806 struct bheap_node *node;
750 807
751 dom_data->crit_entry = NULL; 808 dom_data->crit_entry = NULL;
@@ -764,9 +821,9 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level,
764 } 821 }
765} 822}
766 823
767static inline void init_edf_domain(domain_t *dom) 824static inline void init_edf_domain(domain_t *dom, rt_domain_t *rt)
768{ 825{
769 pd_domain_init(dom, edf_ready_order, NULL, 826 pd_domain_init(dom, rt, edf_ready_order, NULL,
770 mc_release_jobs, mc_preempt_needed, 827 mc_release_jobs, mc_preempt_needed,
771 edf_higher_prio); 828 edf_higher_prio);
772} 829}
@@ -775,7 +832,9 @@ static int __init init_mc(void)
775{ 832{
776 int cpu; 833 int cpu;
777 cpu_entry_t *entry; 834 cpu_entry_t *entry;
835 rt_domain_t *rt;
778 domain_data_t *dom_data; 836 domain_data_t *dom_data;
837 raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */
779 838
780 raw_spin_lock_init(&global_lock); 839 raw_spin_lock_init(&global_lock);
781 840
@@ -786,32 +845,35 @@ static int __init init_mc(void)
786 entry->cpu = cpu; 845 entry->cpu = cpu;
787 entry->scheduled = NULL; 846 entry->scheduled = NULL;
788 entry->linked = NULL; 847 entry->linked = NULL;
848 raw_spin_lock_init(&entry->lock);
789 849
790 /* CRIT_LEVEL_A */ 850 /* CRIT_LEVEL_A */
791 dom_data = &per_cpu(_mc_crit_a, cpu); 851 dom_data = &per_cpu(_mc_crit_a, cpu);
852 rt = &per_cpu(_mc_crit_a_rt, cpu);
792 init_local_domain(entry, dom_data, CRIT_LEVEL_A); 853 init_local_domain(entry, dom_data, CRIT_LEVEL_A);
793 init_edf_domain(&dom_data->domain); 854 init_edf_domain(&dom_data->domain, rt);
855 a_dom = dom_data->domain.lock;
856 raw_spin_lock_init(a_dom);
794 dom_data->domain.name = "LVL-A"; 857 dom_data->domain.name = "LVL-A";
795 858
796 /* CRIT_LEVEL_B */ 859 /* CRIT_LEVEL_B */
797 dom_data = &per_cpu(_mc_crit_b, cpu); 860 dom_data = &per_cpu(_mc_crit_b, cpu);
861 rt = &per_cpu(_mc_crit_b_rt, cpu);
798 init_local_domain(entry, dom_data, CRIT_LEVEL_B); 862 init_local_domain(entry, dom_data, CRIT_LEVEL_B);
799 init_edf_domain(&dom_data->domain); 863 init_edf_domain(&dom_data->domain, rt);
864 b_dom = dom_data->domain.lock;
865 raw_spin_lock_init(b_dom);
800 dom_data->domain.name = "LVL-B"; 866 dom_data->domain.name = "LVL-B";
801 } 867 }
802 868
803 /* CRIT_LEVEL_C */ 869 /* CRIT_LEVEL_C */
804 init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, 870 init_global_domain(&_mc_crit_c, CRIT_LEVEL_C,
805 &_mc_heap_c, _mc_nodes_c); 871 &_mc_heap_c, _mc_nodes_c);
806 init_edf_domain(&_mc_crit_c.domain); 872 init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt);
873 c_dom = _mc_crit_c.domain.lock;
874 raw_spin_lock_init(c_dom);
807 _mc_crit_c.domain.name = "LVL-C"; 875 _mc_crit_c.domain.name = "LVL-C";
808 876
809 /* CRIT_LEVEL_D */
810 init_global_domain(&_mc_crit_d, CRIT_LEVEL_D,
811 &_mc_heap_d, _mc_nodes_d);
812 init_edf_domain(&_mc_crit_d.domain);
813 _mc_crit_d.domain.name = "LVL-D";
814
815 return register_sched_plugin(&mc_plugin); 877 return register_sched_plugin(&mc_plugin);
816} 878}
817 879
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index 1fdca7a7d3aa..0a5a18dc54ff 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -61,6 +61,7 @@ struct ce_dom_data {
61}; 61};
62 62
63DEFINE_PER_CPU(domain_t, mc_ce_doms); 63DEFINE_PER_CPU(domain_t, mc_ce_doms);
64DEFINE_PER_CPU(rt_domain_t, mc_ce_rts);
64DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data); 65DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
65 66
66/* 67/*
@@ -511,12 +512,14 @@ static int __init init_sched_mc_ce(void)
511{ 512{
512 struct ce_dom_data *ce_data; 513 struct ce_dom_data *ce_data;
513 domain_t *dom; 514 domain_t *dom;
515 rt_domain_t *rt;
514 int cpu, err; 516 int cpu, err;
515 517
516 clear_pid_entries(); 518 clear_pid_entries();
517 for_each_online_cpu(cpu) { 519 for_each_online_cpu(cpu) {
518 dom = &per_cpu(mc_ce_doms, cpu); 520 dom = &per_cpu(mc_ce_doms, cpu);
519 pd_domain_init(dom, NULL, NULL, NULL, NULL, NULL); 521 rt = &per_cpu(mc_ce_rts, cpu);
522 pd_domain_init(dom, rt, NULL, NULL, NULL, NULL, NULL);
520 dom->data = &per_cpu(_mc_ce_dom_data, cpu); 523 dom->data = &per_cpu(_mc_ce_dom_data, cpu);
521 ce_data = dom->data; 524 ce_data = dom->data;
522 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 525 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);