diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-21 18:48:49 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-21 18:48:49 -0400 |
commit | 06051baff7db5e0c1b80d7b2a873b022191cdcec (patch) | |
tree | 8f0a776714f91092f312bde90df9757291382d02 /litmus/sched_mc.c | |
parent | 96b86e34ab2d7d49e3e6174935c0aca2828415f5 (diff) |
Formatting
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r-- | litmus/sched_mc.c | 100 |
1 files changed, 35 insertions, 65 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 1f2b13c6a219..0ec03b3949d2 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -4,10 +4,6 @@ | |||
4 | * Implementation of the Mixed Criticality scheduling algorithm. | 4 | * Implementation of the Mixed Criticality scheduling algorithm. |
5 | * | 5 | * |
6 | * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) | 6 | * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) |
7 | * | ||
8 | * This version uses the simple approach and serializes all scheduling | ||
9 | * decisions by the use of a queue lock. This is probably not the | ||
10 | * best way to do it, but it should suffice for now. | ||
11 | */ | 7 | */ |
12 | 8 | ||
13 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
@@ -75,7 +71,6 @@ typedef struct { | |||
75 | } domain_data_t; | 71 | } domain_data_t; |
76 | 72 | ||
77 | static cpu_entry_t* cpus[NR_CPUS]; | 73 | static cpu_entry_t* cpus[NR_CPUS]; |
78 | static raw_spinlock_t global_lock; | ||
79 | 74 | ||
80 | #define domain_data(dom) (container_of(dom, domain_data_t, domain)) | 75 | #define domain_data(dom) (container_of(dom, domain_data_t, domain)) |
81 | #define is_global(dom) (domain_data(dom)->heap) | 76 | #define is_global(dom) (domain_data(dom)->heap) |
@@ -86,8 +81,7 @@ static raw_spinlock_t global_lock; | |||
86 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) | 81 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) |
87 | #define crit_cpu(ce) \ | 82 | #define crit_cpu(ce) \ |
88 | (container_of((void*)((ce) - (ce)->level), cpu_entry_t, crit_entries)) | 83 | (container_of((void*)((ce) - (ce)->level), cpu_entry_t, crit_entries)) |
89 | 84 | /* Useful debug macros */ | |
90 | /* useful debug macros */ | ||
91 | #define TS "(%s/%d:%d:%s)" | 85 | #define TS "(%s/%d:%d:%s)" |
92 | #define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \ | 86 | #define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \ |
93 | (t) ? t->rt_param.job_params.job_no : 1, \ | 87 | (t) ? t->rt_param.job_params.job_no : 1, \ |
@@ -115,7 +109,6 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | |||
115 | second = b->value; | 109 | second = b->value; |
116 | first_link = first->linked; | 110 | first_link = first->linked; |
117 | second_link = second->linked; | 111 | second_link = second->linked; |
118 | |||
119 | if (!first->usable || !second->usable) { | 112 | if (!first->usable || !second->usable) { |
120 | return second->usable && first->usable; | 113 | return second->usable && first->usable; |
121 | } else if (!first_link || !second_link) { | 114 | } else if (!first_link || !second_link) { |
@@ -134,7 +127,6 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | |||
134 | static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) | 127 | static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) |
135 | { | 128 | { |
136 | struct task_struct *next = dom->peek_ready(dom); | 129 | struct task_struct *next = dom->peek_ready(dom); |
137 | |||
138 | if (!next || !curr) { | 130 | if (!next || !curr) { |
139 | return next && !curr; | 131 | return next && !curr; |
140 | } else { | 132 | } else { |
@@ -150,8 +142,7 @@ static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) | |||
150 | static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) | 142 | static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) |
151 | { | 143 | { |
152 | struct bheap *heap = domain_data(dom)->heap; | 144 | struct bheap *heap = domain_data(dom)->heap; |
153 | struct bheap_node* hn; | 145 | struct bheap_node* hn = bheap_peek(cpu_lower_prio, heap); |
154 | hn = bheap_peek(cpu_lower_prio, heap); | ||
155 | return (hn) ? hn->value : NULL; | 146 | return (hn) ? hn->value : NULL; |
156 | } | 147 | } |
157 | 148 | ||
@@ -161,11 +152,9 @@ static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) | |||
161 | */ | 152 | */ |
162 | static void update_ghost_time(struct task_struct *p) | 153 | static void update_ghost_time(struct task_struct *p) |
163 | { | 154 | { |
164 | u64 delta, clock; | 155 | u64 clock = litmus_clock(); |
165 | 156 | u64 delta = clock - p->se.exec_start; | |
166 | BUG_ON(!is_ghost(p)); | 157 | BUG_ON(!is_ghost(p)); |
167 | clock = litmus_clock(); | ||
168 | delta = clock - p->se.exec_start; | ||
169 | if (unlikely ((s64)delta < 0)) { | 158 | if (unlikely ((s64)delta < 0)) { |
170 | delta = 0; | 159 | delta = 0; |
171 | TRACE_TASK(p, "WARNING: negative time delta"); | 160 | TRACE_TASK(p, "WARNING: negative time delta"); |
@@ -236,7 +225,6 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
236 | } | 225 | } |
237 | 226 | ||
238 | static void check_for_preempt(domain_t*); | 227 | static void check_for_preempt(domain_t*); |
239 | |||
240 | /** | 228 | /** |
241 | * job_arrival() - Called when a task re-enters the system. | 229 | * job_arrival() - Called when a task re-enters the system. |
242 | * Caller must hold no locks. | 230 | * Caller must hold no locks. |
@@ -247,7 +235,6 @@ static void job_arrival(struct task_struct *task) | |||
247 | 235 | ||
248 | TRACE_TASK(task, "Job arriving"); | 236 | TRACE_TASK(task, "Job arriving"); |
249 | BUG_ON(!task); | 237 | BUG_ON(!task); |
250 | |||
251 | raw_spin_lock(dom->lock); | 238 | raw_spin_lock(dom->lock); |
252 | if (can_requeue(task)) { | 239 | if (can_requeue(task)) { |
253 | dom->requeue(dom, task); | 240 | dom->requeue(dom, task); |
@@ -271,16 +258,16 @@ static void job_arrival(struct task_struct *task) | |||
271 | */ | 258 | */ |
272 | static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) | 259 | static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) |
273 | { | 260 | { |
274 | int i; | 261 | int i = entry_level(entry); |
275 | TRACE_TASK(task, "Linking to P%d", entry->cpu); | 262 | TRACE_TASK(task, "Linking to P%d", entry->cpu); |
276 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); | 263 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); |
277 | BUG_ON(task && is_ghost(task)); | 264 | BUG_ON(task && is_ghost(task)); |
278 | 265 | ||
279 | i = entry_level(entry); | ||
280 | if (task){ | 266 | if (task){ |
281 | set_rt_flags(task, RT_F_RUNNING); | 267 | set_rt_flags(task, RT_F_RUNNING); |
282 | } | 268 | } |
283 | entry->linked = task; | 269 | entry->linked = task; |
270 | /* Higher criticality crit entries are now usable */ | ||
284 | for (; i < entry_level(entry) + 1; i++) { | 271 | for (; i < entry_level(entry) + 1; i++) { |
285 | TRACE_CRIT_ENTRY(&entry->crit_entries[i], "now usable"); | 272 | TRACE_CRIT_ENTRY(&entry->crit_entries[i], "now usable"); |
286 | entry->crit_entries[i].usable = 1; | 273 | entry->crit_entries[i].usable = 1; |
@@ -295,9 +282,8 @@ static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) | |||
295 | * Caller must hold the lock for @dom and @ce's CPU lock. Returns 1 if | 282 | * Caller must hold the lock for @dom and @ce's CPU lock. Returns 1 if |
296 | * a physically preemption occurred. | 283 | * a physically preemption occurred. |
297 | */ | 284 | */ |
298 | static int preempt(domain_t *dom, crit_entry_t *ce) | 285 | static void preempt(domain_t *dom, crit_entry_t *ce) |
299 | { | 286 | { |
300 | int rv = 0; | ||
301 | struct task_struct *task = dom->take_ready(dom); | 287 | struct task_struct *task = dom->take_ready(dom); |
302 | cpu_entry_t *entry = crit_cpu(ce); | 288 | cpu_entry_t *entry = crit_cpu(ce); |
303 | 289 | ||
@@ -309,14 +295,11 @@ static int preempt(domain_t *dom, crit_entry_t *ce) | |||
309 | dom->requeue(dom, ce->linked); | 295 | dom->requeue(dom, ce->linked); |
310 | } | 296 | } |
311 | link_task_to_crit(ce, task); | 297 | link_task_to_crit(ce, task); |
312 | |||
313 | /* Preempt actual execution if this is a running task */ | 298 | /* Preempt actual execution if this is a running task */ |
314 | if (!is_ghost(task)) { | 299 | if (!is_ghost(task)) { |
315 | link_task_to_cpu(entry, task); | 300 | link_task_to_cpu(entry, task); |
316 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 301 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
317 | rv = 1; | ||
318 | } | 302 | } |
319 | return rv; | ||
320 | } | 303 | } |
321 | 304 | ||
322 | /** | 305 | /** |
@@ -335,7 +318,6 @@ static void update_crit_levels(cpu_entry_t *entry) | |||
335 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { | 318 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { |
336 | ce = &entry->crit_entries[i]; | 319 | ce = &entry->crit_entries[i]; |
337 | tasks[i] = ce->linked; | 320 | tasks[i] = ce->linked; |
338 | TRACE_CRIT_ENTRY(ce, "not usable"); | ||
339 | ce->usable = 0; | 321 | ce->usable = 0; |
340 | if (ce->linked) { | 322 | if (ce->linked) { |
341 | link_task_to_crit(ce, NULL); | 323 | link_task_to_crit(ce, NULL); |
@@ -380,7 +362,6 @@ static void check_for_preempt(domain_t *dom) | |||
380 | } else /* Partitioned */ { | 362 | } else /* Partitioned */ { |
381 | ce = domain_data(dom)->crit_entry; | 363 | ce = domain_data(dom)->crit_entry; |
382 | entry = crit_cpu(ce); | 364 | entry = crit_cpu(ce); |
383 | |||
384 | raw_spin_lock(&entry->lock); | 365 | raw_spin_lock(&entry->lock); |
385 | if (ce->usable && dom->preempt_needed(dom, ce->linked)) { | 366 | if (ce->usable && dom->preempt_needed(dom, ce->linked)) { |
386 | preempt(dom, ce); | 367 | preempt(dom, ce); |
@@ -406,11 +387,12 @@ static void remove_from_all(struct task_struct* task) | |||
406 | BUG_ON(!task); | 387 | BUG_ON(!task); |
407 | 388 | ||
408 | raw_spin_lock(dom->lock); | 389 | raw_spin_lock(dom->lock); |
390 | |||
409 | if (task->rt_param.linked_on != NO_CPU) { | 391 | if (task->rt_param.linked_on != NO_CPU) { |
410 | entry = cpus[task->rt_param.linked_on]; | 392 | entry = cpus[task->rt_param.linked_on]; |
411 | |||
412 | raw_spin_lock(&entry->lock); | 393 | raw_spin_lock(&entry->lock); |
413 | /* Unlink if task is still linked post lock */ | 394 | |
395 | /* Unlink only if task is still linked post lock */ | ||
414 | ce = &entry->crit_entries[tsk_mc_crit(task)]; | 396 | ce = &entry->crit_entries[tsk_mc_crit(task)]; |
415 | if (task->rt_param.linked_on != NO_CPU) { | 397 | if (task->rt_param.linked_on != NO_CPU) { |
416 | BUG_ON(entry->linked != task); | 398 | BUG_ON(entry->linked != task); |
@@ -420,13 +402,11 @@ static void remove_from_all(struct task_struct* task) | |||
420 | link_task_to_cpu(entry, NULL); | 402 | link_task_to_cpu(entry, NULL); |
421 | } | 403 | } |
422 | } | 404 | } |
423 | BUG_ON(is_queued(task)); | ||
424 | 405 | ||
425 | if (update) { | 406 | if (update) |
426 | update_crit_levels(entry); | 407 | update_crit_levels(entry); |
427 | } else { | 408 | else |
428 | raw_spin_unlock(&entry->lock); | 409 | raw_spin_unlock(&entry->lock); |
429 | } | ||
430 | } else if (is_queued(task)) { | 410 | } else if (is_queued(task)) { |
431 | /* This is an interesting situation: t is scheduled, | 411 | /* This is an interesting situation: t is scheduled, |
432 | * but was just recently unlinked. It cannot be | 412 | * but was just recently unlinked. It cannot be |
@@ -437,6 +417,7 @@ static void remove_from_all(struct task_struct* task) | |||
437 | */ | 417 | */ |
438 | remove((rt_domain_t*)get_task_domain(task)->data, task); | 418 | remove((rt_domain_t*)get_task_domain(task)->data, task); |
439 | } | 419 | } |
420 | BUG_ON(is_queued(task)); | ||
440 | raw_spin_unlock(dom->lock); | 421 | raw_spin_unlock(dom->lock); |
441 | } | 422 | } |
442 | 423 | ||
@@ -468,9 +449,8 @@ static void job_completion(struct task_struct *task, int forced) | |||
468 | if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { | 449 | if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { |
469 | tsk_mc_data(task)->mc_job.is_ghost = 0; | 450 | tsk_mc_data(task)->mc_job.is_ghost = 0; |
470 | prepare_for_next_period(task); | 451 | prepare_for_next_period(task); |
471 | if (is_released(task, litmus_clock())) { | 452 | if (is_released(task, litmus_clock())) |
472 | sched_trace_task_release(task); | 453 | sched_trace_task_release(task); |
473 | } | ||
474 | } | 454 | } |
475 | 455 | ||
476 | /* Requeue non-blocking tasks */ | 456 | /* Requeue non-blocking tasks */ |
@@ -484,18 +464,16 @@ static void job_completion(struct task_struct *task, int forced) | |||
484 | static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | 464 | static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) |
485 | { | 465 | { |
486 | unsigned long flags; | 466 | unsigned long flags; |
487 | crit_entry_t *ce; | 467 | crit_entry_t *ce = container_of(timer, crit_entry_t, timer);; |
488 | struct task_struct *tmp = NULL; | 468 | struct task_struct *tmp = NULL; |
489 | 469 | ||
490 | local_irq_save(flags); | 470 | local_irq_save(flags); |
491 | |||
492 | ce = container_of(timer, crit_entry_t, timer); | ||
493 | TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); | 471 | TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); |
494 | 472 | ||
495 | /* Due to race conditions, we cannot just set the linked | 473 | /* Due to race conditions, we cannot just set the linked |
496 | * task's budget to 0 as it may no longer be the task | 474 | * task's budget to 0 as it may no longer be the task |
497 | * for which this timer was armed. Instead, update the running | 475 | * for which this timer was armed. Instead, update the running |
498 | * task time | 476 | * task time and see if this causes exhaustion. |
499 | */ | 477 | */ |
500 | raw_spin_lock(&crit_cpu(ce)->lock); | 478 | raw_spin_lock(&crit_cpu(ce)->lock); |
501 | if (ce->linked && is_ghost(ce->linked)) { | 479 | if (ce->linked && is_ghost(ce->linked)) { |
@@ -524,11 +502,9 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
524 | domain_t *dom = get_task_domain(first); | 502 | domain_t *dom = get_task_domain(first); |
525 | 503 | ||
526 | raw_spin_lock_irqsave(dom->lock, flags); | 504 | raw_spin_lock_irqsave(dom->lock, flags); |
527 | |||
528 | TRACE_TASK(first, "Jobs released"); | 505 | TRACE_TASK(first, "Jobs released"); |
529 | __merge_ready(rt, tasks); | 506 | __merge_ready(rt, tasks); |
530 | check_for_preempt(dom); | 507 | check_for_preempt(dom); |
531 | |||
532 | raw_spin_unlock_irqrestore(dom->lock, flags); | 508 | raw_spin_unlock_irqrestore(dom->lock, flags); |
533 | } | 509 | } |
534 | 510 | ||
@@ -538,21 +514,18 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
538 | */ | 514 | */ |
539 | static void mc_task_new(struct task_struct *t, int on_rq, int running) | 515 | static void mc_task_new(struct task_struct *t, int on_rq, int running) |
540 | { | 516 | { |
541 | unsigned long flags; | 517 | unsigned long flags; |
542 | cpu_entry_t* entry; | 518 | cpu_entry_t* entry; |
543 | enum crit_level level; | 519 | enum crit_level level = tsk_mc_crit(t); |
544 | |||
545 | TRACE("New mixed criticality task %d\n", t->pid); | ||
546 | 520 | ||
547 | local_irq_save(flags); | 521 | local_irq_save(flags); |
522 | TRACE("New mixed criticality task %d\n", t->pid); | ||
548 | 523 | ||
549 | /* Assign domain */ | 524 | /* Assign domain */ |
550 | level = tsk_mc_crit(t); | 525 | if (level < CRIT_LEVEL_C) |
551 | if (level < CRIT_LEVEL_C) { | ||
552 | entry = cpus[get_partition(t)]; | 526 | entry = cpus[get_partition(t)]; |
553 | } else { | 527 | else |
554 | entry = cpus[task_cpu(t)]; | 528 | entry = cpus[task_cpu(t)]; |
555 | } | ||
556 | t->rt_param._domain = entry->crit_entries[level].domain; | 529 | t->rt_param._domain = entry->crit_entries[level].domain; |
557 | 530 | ||
558 | /* Setup job params */ | 531 | /* Setup job params */ |
@@ -579,12 +552,10 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
579 | static void mc_task_wake_up(struct task_struct *task) | 552 | static void mc_task_wake_up(struct task_struct *task) |
580 | { | 553 | { |
581 | unsigned long flags; | 554 | unsigned long flags; |
582 | lt_t now; | 555 | lt_t now = litmus_clock(); |
583 | |||
584 | local_irq_save(flags); | 556 | local_irq_save(flags); |
585 | TRACE_TASK(task, "Wakes up"); | ||
586 | 557 | ||
587 | now = litmus_clock(); | 558 | TRACE_TASK(task, "Wakes up"); |
588 | if (is_tardy(task, now)) { | 559 | if (is_tardy(task, now)) { |
589 | /* Task missed its last release */ | 560 | /* Task missed its last release */ |
590 | release_at(task, now); | 561 | release_at(task, now); |
@@ -648,8 +619,8 @@ static long mc_admit_task(struct task_struct* task) | |||
648 | return 0; | 619 | return 0; |
649 | } | 620 | } |
650 | 621 | ||
651 | /* | 622 | /** |
652 | * Return next task which should be scheduled. | 623 | * mc_schedule() - Return next task which should be scheduled. |
653 | */ | 624 | */ |
654 | static struct task_struct* mc_schedule(struct task_struct * prev) | 625 | static struct task_struct* mc_schedule(struct task_struct * prev) |
655 | { | 626 | { |
@@ -694,7 +665,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
694 | if ((out_of_time || sleep) && !blocks && !preempt) | 665 | if ((out_of_time || sleep) && !blocks && !preempt) |
695 | job_completion(entry->scheduled, !sleep); | 666 | job_completion(entry->scheduled, !sleep); |
696 | /* Global scheduled tasks must wait for a deschedule before they | 667 | /* Global scheduled tasks must wait for a deschedule before they |
697 | * can rejoin a global domain. See comment in job_arrival. | 668 | * can rejoin a global domain. Requeue them here. |
698 | */ | 669 | */ |
699 | else if (global && preempt && !blocks) | 670 | else if (global && preempt && !blocks) |
700 | job_arrival(entry->scheduled); | 671 | job_arrival(entry->scheduled); |
@@ -705,6 +676,9 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
705 | ce = &entry->crit_entries[i]; | 676 | ce = &entry->crit_entries[i]; |
706 | dom = ce->domain; | 677 | dom = ce->domain; |
707 | 678 | ||
679 | /* Swap locks. We cannot acquire a domain lock while | ||
680 | * holding an entry lock or deadlocks will happen. | ||
681 | */ | ||
708 | raw_spin_unlock(&entry->lock); | 682 | raw_spin_unlock(&entry->lock); |
709 | raw_spin_lock(dom->lock); | 683 | raw_spin_lock(dom->lock); |
710 | raw_spin_lock(&entry->lock); | 684 | raw_spin_lock(&entry->lock); |
@@ -714,33 +688,33 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
714 | dom->take_ready(dom); | 688 | dom->take_ready(dom); |
715 | link_task_to_crit(ce, dtask); | 689 | link_task_to_crit(ce, dtask); |
716 | ready_task = (is_ghost(dtask)) ? NULL : dtask; | 690 | ready_task = (is_ghost(dtask)) ? NULL : dtask; |
691 | |||
692 | /* Task found! */ | ||
717 | if (ready_task) { | 693 | if (ready_task) { |
718 | link_task_to_cpu(entry, ready_task); | 694 | link_task_to_cpu(entry, ready_task); |
719 | raw_spin_unlock(dom->lock); | 695 | raw_spin_unlock(dom->lock); |
720 | update_crit_levels(entry); | 696 | update_crit_levels(entry); |
721 | raw_spin_lock(&entry->lock); | 697 | raw_spin_lock(&entry->lock); |
722 | continue; | 698 | goto picked; |
723 | } | 699 | } |
724 | } | 700 | } |
725 | raw_spin_unlock(dom->lock); | 701 | raw_spin_unlock(dom->lock); |
726 | } | 702 | } |
727 | 703 | ||
704 | picked: | ||
728 | /* Schedule next task */ | 705 | /* Schedule next task */ |
729 | next = entry->linked; | 706 | next = entry->linked; |
730 | entry->scheduled = next; | 707 | entry->scheduled = next; |
731 | if (entry->scheduled) | 708 | if (entry->scheduled) |
732 | entry->scheduled->rt_param.scheduled_on = entry->cpu; | 709 | entry->scheduled->rt_param.scheduled_on = entry->cpu; |
733 | |||
734 | sched_state_task_picked(); | 710 | sched_state_task_picked(); |
735 | 711 | ||
736 | raw_spin_unlock(&entry->lock); | 712 | raw_spin_unlock(&entry->lock); |
737 | local_irq_restore(flags); | 713 | local_irq_restore(flags); |
738 | |||
739 | if (next) | 714 | if (next) |
740 | TRACE_TASK(next, "Scheduled at %llu", litmus_clock()); | 715 | TRACE_TASK(next, "Scheduled at %llu", litmus_clock()); |
741 | else if (exists && !next) | 716 | else if (exists && !next) |
742 | TRACE("Becomes idle at %llu\n", litmus_clock()); | 717 | TRACE("Becomes idle at %llu\n", litmus_clock()); |
743 | |||
744 | return next; | 718 | return next; |
745 | } | 719 | } |
746 | 720 | ||
@@ -784,7 +758,6 @@ static void init_crit_entry(crit_entry_t *ce, enum crit_level level, | |||
784 | ce->node = node; | 758 | ce->node = node; |
785 | ce->domain = &dom_data->domain; | 759 | ce->domain = &dom_data->domain; |
786 | ce->usable = 1; | 760 | ce->usable = 1; |
787 | |||
788 | hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 761 | hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
789 | ce->timer.function = mc_ghost_exhausted; | 762 | ce->timer.function = mc_ghost_exhausted; |
790 | } | 763 | } |
@@ -813,9 +786,7 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | |||
813 | entry = cpus[cpu]; | 786 | entry = cpus[cpu]; |
814 | node = &nodes[cpu]; | 787 | node = &nodes[cpu]; |
815 | ce = &entry->crit_entries[level]; | 788 | ce = &entry->crit_entries[level]; |
816 | |||
817 | init_crit_entry(ce, level, dom_data, node); | 789 | init_crit_entry(ce, level, dom_data, node); |
818 | |||
819 | bheap_node_init(&ce->node, ce); | 790 | bheap_node_init(&ce->node, ce); |
820 | bheap_insert(cpu_lower_prio, heap, node); | 791 | bheap_insert(cpu_lower_prio, heap, node); |
821 | } | 792 | } |
@@ -836,12 +807,11 @@ static int __init init_mc(void) | |||
836 | domain_data_t *dom_data; | 807 | domain_data_t *dom_data; |
837 | raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ | 808 | raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ |
838 | 809 | ||
839 | raw_spin_lock_init(&global_lock); | ||
840 | |||
841 | for_each_online_cpu(cpu) { | 810 | for_each_online_cpu(cpu) { |
842 | entry = &per_cpu(_mc_cpus, cpu); | 811 | entry = &per_cpu(_mc_cpus, cpu); |
843 | cpus[cpu] = entry; | 812 | cpus[cpu] = entry; |
844 | 813 | ||
814 | /* CPU */ | ||
845 | entry->cpu = cpu; | 815 | entry->cpu = cpu; |
846 | entry->scheduled = NULL; | 816 | entry->scheduled = NULL; |
847 | entry->linked = NULL; | 817 | entry->linked = NULL; |