diff options
author | Mac Mollison <mollison@cs.unc.edu> | 2010-11-06 14:20:43 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-25 14:41:19 -0400 |
commit | 0c0aa8bc8596433efdad240166d5b98773ab65e6 (patch) | |
tree | dfb449546fb1050c0726bcc1f520595f4a32917f /litmus/sched_mc.c | |
parent | 0f35df9f3143ef5fe48da7c3c02ab7e93f15d9d3 (diff) |
Added many comments; some minor style changes
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r-- | litmus/sched_mc.c | 132 |
1 files changed, 94 insertions, 38 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 0a8464b5aa5c..e2b9fffd72f4 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -26,10 +26,6 @@ | |||
26 | 26 | ||
27 | /* Overview of MC operations. | 27 | /* Overview of MC operations. |
28 | * | 28 | * |
29 | * For a detailed explanation of MC have a look at the FMLP paper. This | ||
30 | * description only covers how the individual operations are implemented in | ||
31 | * LITMUS. | ||
32 | * | ||
33 | * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | 29 | * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage |
34 | * structure (NOT the actually scheduled | 30 | * structure (NOT the actually scheduled |
35 | * task). If there is another linked task To | 31 | * task). If there is another linked task To |
@@ -118,18 +114,22 @@ cpu_entry_t* mc_cpus[NR_CPUS]; | |||
118 | static struct bheap_node mc_heap_node[NR_CPUS]; | 114 | static struct bheap_node mc_heap_node[NR_CPUS]; |
119 | static struct bheap mc_cpu_heap; | 115 | static struct bheap mc_cpu_heap; |
120 | 116 | ||
121 | /*static rt_domain_t mc;*/ | 117 | /* Create per-CPU domains for criticality A */ |
122 | DEFINE_PER_CPU(rt_domain_t, crit_b); | ||
123 | #define remote_b_queue(cpu) (&per_cpu(crit_b, cpu)) | ||
124 | #define local_b_queue (&__get_cpu_var(crit_b)) | ||
125 | |||
126 | DEFINE_PER_CPU(rt_domain_t, crit_a); | 118 | DEFINE_PER_CPU(rt_domain_t, crit_a); |
127 | #define remote_a_queue(cpu) (&per_cpu(crit_a, cpu)) | 119 | #define remote_a_queue(cpu) (&per_cpu(crit_a, cpu)) |
128 | #define local_a_queue (&__get_cpu_var(crit_a)) | 120 | #define local_a_queue (&__get_cpu_var(crit_a)) |
129 | 121 | ||
122 | /* Create per-CPU domains for criticality B */ | ||
123 | DEFINE_PER_CPU(rt_domain_t, crit_b); | ||
124 | #define remote_b_queue(cpu) (&per_cpu(crit_b, cpu)) | ||
125 | #define local_b_queue (&__get_cpu_var(crit_b)) | ||
126 | |||
127 | /* Create global domains for criticalities C and D */ | ||
130 | static rt_domain_t crit_c; | 128 | static rt_domain_t crit_c; |
131 | static rt_domain_t crit_d; | 129 | static rt_domain_t crit_d; |
132 | #define crit_c_lock (crit_c.ready_lock) | 130 | |
131 | /* We use crit_c.ready_lock as a global lock */ | ||
132 | #define global_lock (crit_c.ready_lock) | ||
133 | 133 | ||
134 | /* BEGIN clone of edf_common.c to allow shared C/D run queue*/ | 134 | /* BEGIN clone of edf_common.c to allow shared C/D run queue*/ |
135 | 135 | ||
@@ -188,7 +188,8 @@ static void mc_edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | |||
188 | 188 | ||
189 | /* END clone of edf_common.c*/ | 189 | /* END clone of edf_common.c*/ |
190 | 190 | ||
191 | static rt_domain_t* proper_domain(struct task_struct* task) | 191 | /* Return the domain of a task */ |
192 | static rt_domain_t* domain_of(struct task_struct* task) | ||
192 | { | 193 | { |
193 | switch (task->rt_param.task_params.crit) | 194 | switch (task->rt_param.task_params.crit) |
194 | { | 195 | { |
@@ -215,6 +216,9 @@ static rt_domain_t* proper_domain(struct task_struct* task) | |||
215 | #define WANT_ALL_SCHED_EVENTS | 216 | #define WANT_ALL_SCHED_EVENTS |
216 | */ | 217 | */ |
217 | 218 | ||
219 | /* Called by update_cpu_position and lowest_prio_cpu in bheap operations | ||
220 | * Callers always have global lock | ||
221 | */ | ||
218 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 222 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) |
219 | { | 223 | { |
220 | cpu_entry_t *a, *b; | 224 | cpu_entry_t *a, *b; |
@@ -227,7 +231,10 @@ static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | |||
227 | } | 231 | } |
228 | 232 | ||
229 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 233 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
230 | * order in the cpu queue. Caller must hold mc lock. | 234 | * order in the cpu queue. Caller must hold global lock. |
235 | * Called from link_task_to_cpu, which holds global lock | ||
236 | * link_task_to_cpu is the only way a CPU can get a new task, and hence have its | ||
237 | * priority change. | ||
231 | */ | 238 | */ |
232 | static void update_cpu_position(cpu_entry_t *entry) | 239 | static void update_cpu_position(cpu_entry_t *entry) |
233 | { | 240 | { |
@@ -236,7 +243,10 @@ static void update_cpu_position(cpu_entry_t *entry) | |||
236 | bheap_insert(cpu_lower_prio, &mc_cpu_heap, entry->hn); | 243 | bheap_insert(cpu_lower_prio, &mc_cpu_heap, entry->hn); |
237 | } | 244 | } |
238 | 245 | ||
239 | /* caller must hold mc lock */ | 246 | /* caller must hold global lock |
247 | * Only called when checking for gedf preemptions by check_for_gedf_preemptions, | ||
248 | * which always has global lock | ||
249 | */ | ||
240 | static cpu_entry_t* lowest_prio_cpu(void) | 250 | static cpu_entry_t* lowest_prio_cpu(void) |
241 | { | 251 | { |
242 | struct bheap_node* hn; | 252 | struct bheap_node* hn; |
@@ -248,6 +258,8 @@ static cpu_entry_t* lowest_prio_cpu(void) | |||
248 | /* link_task_to_cpu - Update the link of a CPU. | 258 | /* link_task_to_cpu - Update the link of a CPU. |
249 | * Handles the case where the to-be-linked task is already | 259 | * Handles the case where the to-be-linked task is already |
250 | * scheduled on a different CPU. | 260 | * scheduled on a different CPU. |
261 | * Called from unlink(), prepare_preemption(), and mc_schedule() | ||
262 | * Callers hold global lock | ||
251 | */ | 263 | */ |
252 | static noinline void link_task_to_cpu(struct task_struct* linked, | 264 | static noinline void link_task_to_cpu(struct task_struct* linked, |
253 | cpu_entry_t *entry) | 265 | cpu_entry_t *entry) |
@@ -313,8 +325,10 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
313 | update_cpu_position(entry); | 325 | update_cpu_position(entry); |
314 | } | 326 | } |
315 | 327 | ||
316 | /* unlink - Make sure a task is not linked any longer to an entry | 328 | /* unlink - Make sure a task is not linked any longer to a cpu entry |
317 | * where it was linked before. Must hold appropriate lock | 329 | * where it was linked before. |
330 | * Called by schedule, task_block, task_exit, and job_completion | ||
331 | * Caller assumed to hold global lock | ||
318 | */ | 332 | */ |
319 | static noinline void unlink(struct task_struct* t) | 333 | static noinline void unlink(struct task_struct* t) |
320 | { | 334 | { |
@@ -339,12 +353,13 @@ static noinline void unlink(struct task_struct* t) | |||
339 | * case. | 353 | * case. |
340 | */ | 354 | */ |
341 | TRACE("Weird is_queued situation happened\n"); | 355 | TRACE("Weird is_queued situation happened\n"); |
342 | remove(proper_domain(t), t); | 356 | remove(domain_of(t), t); |
343 | } | 357 | } |
344 | } | 358 | } |
345 | 359 | ||
346 | 360 | ||
347 | /* preempt - force a CPU to reschedule | 361 | /* preempt - force a CPU to reschedule |
362 | * Just sets a Linux scheduler flag. | ||
348 | */ | 363 | */ |
349 | static void preempt(cpu_entry_t *entry) | 364 | static void preempt(cpu_entry_t *entry) |
350 | { | 365 | { |
@@ -352,7 +367,8 @@ static void preempt(cpu_entry_t *entry) | |||
352 | } | 367 | } |
353 | 368 | ||
354 | /* requeue - Put an unlinked task into the proper domain. | 369 | /* requeue - Put an unlinked task into the proper domain. |
355 | * Caller must hold proper lock. | 370 | * Caller holds global lock. |
371 | * Called by mc_job_arrival() and prepare_preemption(). | ||
356 | */ | 372 | */ |
357 | static noinline void requeue(struct task_struct* task) | 373 | static noinline void requeue(struct task_struct* task) |
358 | { | 374 | { |
@@ -361,17 +377,17 @@ static noinline void requeue(struct task_struct* task) | |||
361 | BUG_ON(is_queued(task)); | 377 | BUG_ON(is_queued(task)); |
362 | 378 | ||
363 | if (is_released(task, litmus_clock())) | 379 | if (is_released(task, litmus_clock())) |
364 | __add_ready(proper_domain(task), task); | 380 | __add_ready(domain_of(task), task); |
365 | else { | 381 | else { |
366 | /* it has got to wait */ | 382 | /* it has got to wait */ |
367 | add_release(proper_domain(task), task); | 383 | add_release(domain_of(task), task); |
368 | } | 384 | } |
369 | } | 385 | } |
370 | 386 | ||
371 | static void do_preemption_on_cpu(rt_domain_t *dom, cpu_entry_t *cpu) { | 387 | static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
372 | struct task_struct* task; | 388 | struct task_struct* task; |
373 | task = __take_ready(dom); | 389 | task = __take_ready(dom); |
374 | TRACE("do_preemption_on_cpu: attempting to link task %d to %d\n", | 390 | TRACE("prepare_preemption: attempting to link task %d to %d\n", |
375 | task->pid, cpu->cpu); | 391 | task->pid, cpu->cpu); |
376 | if (cpu->linked) | 392 | if (cpu->linked) |
377 | requeue(cpu->linked); | 393 | requeue(cpu->linked); |
@@ -379,22 +395,27 @@ static void do_preemption_on_cpu(rt_domain_t *dom, cpu_entry_t *cpu) { | |||
379 | preempt(cpu); | 395 | preempt(cpu); |
380 | } | 396 | } |
381 | 397 | ||
398 | /* Callers always have global lock */ | ||
382 | static void check_for_gedf_preemptions(rt_domain_t *dom){ | 399 | static void check_for_gedf_preemptions(rt_domain_t *dom){ |
383 | cpu_entry_t* last; | 400 | cpu_entry_t* last; |
384 | for (last = lowest_prio_cpu(); | 401 | for (last = lowest_prio_cpu(); |
385 | mc_edf_preemption_needed(dom, last->linked); | 402 | mc_edf_preemption_needed(dom, last->linked); |
386 | last = lowest_prio_cpu()) { | 403 | last = lowest_prio_cpu()) { |
387 | do_preemption_on_cpu(dom, last); | 404 | prepare_preemption(dom, last); |
388 | } | 405 | } |
389 | } | 406 | } |
390 | 407 | ||
391 | static void check_for_pedf_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 408 | static void check_for_pedf_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
392 | if (mc_edf_preemption_needed(dom, cpu->linked)) { | 409 | if (mc_edf_preemption_needed(dom, cpu->linked)) { |
393 | do_preemption_on_cpu(dom, cpu); | 410 | prepare_preemption(dom, cpu); |
394 | } | 411 | } |
395 | } | 412 | } |
396 | 413 | ||
397 | /* mc_job_arrival: task is either resumed or released */ | 414 | /* mc_job_arrival: task is either resumed or released |
415 | * Called from job_completion(), mc_task_new(), and mc_task_wake_up(), all | ||
416 | * of which have the global lock | ||
417 | * Requeues task and checks for/triggers preemptions | ||
418 | */ | ||
398 | static noinline void mc_job_arrival(struct task_struct* task) | 419 | static noinline void mc_job_arrival(struct task_struct* task) |
399 | { | 420 | { |
400 | BUG_ON(!task); | 421 | BUG_ON(!task); |
@@ -417,12 +438,16 @@ static noinline void mc_job_arrival(struct task_struct* task) | |||
417 | } | 438 | } |
418 | } | 439 | } |
419 | 440 | ||
441 | /* Called by the domain | ||
442 | * Obtains global lock, merges ready tasks, checks for/triggers preemptions, | ||
443 | * and releases global lock | ||
444 | */ | ||
420 | static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | 445 | static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) |
421 | { | 446 | { |
422 | unsigned long flags; | 447 | unsigned long flags; |
423 | int i; | 448 | int i; |
424 | 449 | ||
425 | raw_spin_lock_irqsave(&crit_c_lock, flags); | 450 | raw_spin_lock_irqsave(&global_lock, flags); |
426 | TRACE("mc_release_jobs triggered\n"); | 451 | TRACE("mc_release_jobs triggered\n"); |
427 | 452 | ||
428 | __merge_ready(rt, tasks); | 453 | __merge_ready(rt, tasks); |
@@ -439,10 +464,13 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
439 | check_for_gedf_preemptions(rt); | 464 | check_for_gedf_preemptions(rt); |
440 | } | 465 | } |
441 | 466 | ||
442 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); | 467 | raw_spin_unlock_irqrestore(&global_lock, flags); |
443 | } | 468 | } |
444 | 469 | ||
445 | /* caller holds crit_c_lock */ | 470 | /* caller holds global_lock |
471 | * Called only by mc_schedule() which holds global lock | ||
472 | * Prepares task for next period, unlinks it, and calls mc_job_arrival | ||
473 | */ | ||
446 | static noinline void job_completion(struct task_struct *t, int forced) | 474 | static noinline void job_completion(struct task_struct *t, int forced) |
447 | { | 475 | { |
448 | BUG_ON(!t); | 476 | BUG_ON(!t); |
@@ -470,6 +498,9 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
470 | * | 498 | * |
471 | * checks whether the current task has expired and checks | 499 | * checks whether the current task has expired and checks |
472 | * whether we need to preempt it if it has not expired | 500 | * whether we need to preempt it if it has not expired |
501 | * Called from LITMUS core | ||
502 | * Does not use locks | ||
503 | * Just sets reschedule flags on task and CPU and request_exit_np flag on task | ||
473 | */ | 504 | */ |
474 | static void mc_tick(struct task_struct* t) | 505 | static void mc_tick(struct task_struct* t) |
475 | { | 506 | { |
@@ -512,6 +543,14 @@ static void mc_tick(struct task_struct* t) | |||
512 | * sys_exit_np must be requested | 543 | * sys_exit_np must be requested |
513 | * | 544 | * |
514 | * Any of these can occur together. | 545 | * Any of these can occur together. |
546 | * | ||
547 | * | ||
548 | * Called by LITMUS core | ||
549 | * No lock required by caller | ||
550 | * Obtains global lock | ||
551 | * can call unlink(), request_exit_np(), job_completion(), __take_ready() | ||
552 | * modifies next, scheduled->scheduled_on, linked->scheduled_on | ||
553 | * Releases global lock | ||
515 | */ | 554 | */ |
516 | static struct task_struct* mc_schedule(struct task_struct * prev) | 555 | static struct task_struct* mc_schedule(struct task_struct * prev) |
517 | { | 556 | { |
@@ -528,7 +567,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
528 | return NULL; | 567 | return NULL; |
529 | #endif | 568 | #endif |
530 | 569 | ||
531 | raw_spin_lock(&crit_c_lock); | 570 | raw_spin_lock(&global_lock); |
532 | clear_will_schedule(); | 571 | clear_will_schedule(); |
533 | 572 | ||
534 | /* sanity checking */ | 573 | /* sanity checking */ |
@@ -622,10 +661,10 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
622 | next = prev; | 661 | next = prev; |
623 | 662 | ||
624 | /*TODO: Update name of locking, reflect that we're locking all queues*/ | 663 | /*TODO: Update name of locking, reflect that we're locking all queues*/ |
625 | raw_spin_unlock(&crit_c_lock); | 664 | raw_spin_unlock(&global_lock); |
626 | 665 | ||
627 | #ifdef WANT_ALL_SCHED_EVENTS | 666 | #ifdef WANT_ALL_SCHED_EVENTS |
628 | TRACE("crit_c_lock released, next=0x%p\n", next); | 667 | TRACE("global_lock released, next=0x%p\n", next); |
629 | 668 | ||
630 | if (next) | 669 | if (next) |
631 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 670 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
@@ -639,6 +678,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
639 | 678 | ||
640 | 679 | ||
641 | /* _finish_switch - we just finished the switch away from prev | 680 | /* _finish_switch - we just finished the switch away from prev |
681 | * Called by LITMUS core | ||
682 | * No locks | ||
642 | */ | 683 | */ |
643 | static void mc_finish_switch(struct task_struct *prev) | 684 | static void mc_finish_switch(struct task_struct *prev) |
644 | { | 685 | { |
@@ -652,6 +693,9 @@ static void mc_finish_switch(struct task_struct *prev) | |||
652 | 693 | ||
653 | 694 | ||
654 | /* Prepare a task for running in RT mode | 695 | /* Prepare a task for running in RT mode |
696 | * Called by LITMUS core | ||
697 | * No lock required by caller | ||
698 | * Obtains lock and calls mc_job_arrival before releasing lock | ||
655 | */ | 699 | */ |
656 | static void mc_task_new(struct task_struct * t, int on_rq, int running) | 700 | static void mc_task_new(struct task_struct * t, int on_rq, int running) |
657 | { | 701 | { |
@@ -660,7 +704,7 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running) | |||
660 | 704 | ||
661 | TRACE("mixed crit: task new %d\n", t->pid); | 705 | TRACE("mixed crit: task new %d\n", t->pid); |
662 | 706 | ||
663 | raw_spin_lock_irqsave(&crit_c_lock, flags); | 707 | raw_spin_lock_irqsave(&global_lock, flags); |
664 | 708 | ||
665 | /* setup job params */ | 709 | /* setup job params */ |
666 | release_at(t, litmus_clock()); | 710 | release_at(t, litmus_clock()); |
@@ -687,9 +731,13 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running) | |||
687 | t->rt_param.linked_on = NO_CPU; | 731 | t->rt_param.linked_on = NO_CPU; |
688 | 732 | ||
689 | mc_job_arrival(t); | 733 | mc_job_arrival(t); |
690 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); | 734 | raw_spin_unlock_irqrestore(&global_lock, flags); |
691 | } | 735 | } |
692 | 736 | ||
737 | /* Called by LITMUS core | ||
738 | * No lock required by caller | ||
739 | * Obtains lock and calls mc_job_arrival before releasing lock | ||
740 | */ | ||
693 | static void mc_task_wake_up(struct task_struct *task) | 741 | static void mc_task_wake_up(struct task_struct *task) |
694 | { | 742 | { |
695 | unsigned long flags; | 743 | unsigned long flags; |
@@ -697,7 +745,7 @@ static void mc_task_wake_up(struct task_struct *task) | |||
697 | 745 | ||
698 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 746 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
699 | 747 | ||
700 | raw_spin_lock_irqsave(&crit_c_lock, flags); | 748 | raw_spin_lock_irqsave(&global_lock, flags); |
701 | /* We need to take suspensions because of semaphores into | 749 | /* We need to take suspensions because of semaphores into |
702 | * account! If a job resumes after being suspended due to acquiring | 750 | * account! If a job resumes after being suspended due to acquiring |
703 | * a semaphore, it should never be treated as a new job release. | 751 | * a semaphore, it should never be treated as a new job release. |
@@ -720,9 +768,13 @@ static void mc_task_wake_up(struct task_struct *task) | |||
720 | } | 768 | } |
721 | } | 769 | } |
722 | mc_job_arrival(task); | 770 | mc_job_arrival(task); |
723 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); | 771 | raw_spin_unlock_irqrestore(&global_lock, flags); |
724 | } | 772 | } |
725 | 773 | ||
774 | /* Called by LITMUS core | ||
775 | * No lock required by caller | ||
776 | * Obtains and releases global lock | ||
777 | */ | ||
726 | static void mc_task_block(struct task_struct *t) | 778 | static void mc_task_block(struct task_struct *t) |
727 | { | 779 | { |
728 | unsigned long flags; | 780 | unsigned long flags; |
@@ -730,26 +782,30 @@ static void mc_task_block(struct task_struct *t) | |||
730 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 782 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
731 | 783 | ||
732 | /* unlink if necessary */ | 784 | /* unlink if necessary */ |
733 | raw_spin_lock_irqsave(&crit_c_lock, flags); | 785 | raw_spin_lock_irqsave(&global_lock, flags); |
734 | unlink(t); | 786 | unlink(t); |
735 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); | 787 | raw_spin_unlock_irqrestore(&global_lock, flags); |
736 | 788 | ||
737 | BUG_ON(!is_realtime(t)); | 789 | BUG_ON(!is_realtime(t)); |
738 | } | 790 | } |
739 | 791 | ||
740 | 792 | ||
793 | /* Called by LITMUS core | ||
794 | * No lock required by caller | ||
795 | * Obtains and releases global lock | ||
796 | */ | ||
741 | static void mc_task_exit(struct task_struct * t) | 797 | static void mc_task_exit(struct task_struct * t) |
742 | { | 798 | { |
743 | unsigned long flags; | 799 | unsigned long flags; |
744 | 800 | ||
745 | /* unlink if necessary */ | 801 | /* unlink if necessary */ |
746 | raw_spin_lock_irqsave(&crit_c_lock, flags); | 802 | raw_spin_lock_irqsave(&global_lock, flags); |
747 | unlink(t); | 803 | unlink(t); |
748 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 804 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
749 | mc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 805 | mc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
750 | tsk_rt(t)->scheduled_on = NO_CPU; | 806 | tsk_rt(t)->scheduled_on = NO_CPU; |
751 | } | 807 | } |
752 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); | 808 | raw_spin_unlock_irqrestore(&global_lock, flags); |
753 | 809 | ||
754 | BUG_ON(!is_realtime(t)); | 810 | BUG_ON(!is_realtime(t)); |
755 | TRACE_TASK(t, "RIP\n"); | 811 | TRACE_TASK(t, "RIP\n"); |