diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6137c74729cb..c0c63eba70ce 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
297 | { | 297 | { |
298 | unsigned long flags; | 298 | unsigned long flags; |
299 | 299 | ||
300 | spin_lock_irqsave(&gsnedf_lock, flags); | 300 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
301 | 301 | ||
302 | __merge_ready(rt, tasks); | 302 | __merge_ready(rt, tasks); |
303 | check_for_preemptions(); | 303 | check_for_preemptions(); |
304 | 304 | ||
305 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 305 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
306 | } | 306 | } |
307 | 307 | ||
308 | /* caller holds gsnedf_lock */ | 308 | /* caller holds gsnedf_lock */ |
@@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
388 | if (gsnedf.release_master == entry->cpu) | 388 | if (gsnedf.release_master == entry->cpu) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
391 | spin_lock(&gsnedf_lock); | 391 | raw_spin_lock(&gsnedf_lock); |
392 | clear_will_schedule(); | 392 | clear_will_schedule(); |
393 | 393 | ||
394 | /* sanity checking */ | 394 | /* sanity checking */ |
@@ -471,7 +471,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
471 | if (exists) | 471 | if (exists) |
472 | next = prev; | 472 | next = prev; |
473 | 473 | ||
474 | spin_unlock(&gsnedf_lock); | 474 | raw_spin_unlock(&gsnedf_lock); |
475 | 475 | ||
476 | #ifdef WANT_ALL_SCHED_EVENTS | 476 | #ifdef WANT_ALL_SCHED_EVENTS |
477 | TRACE("gsnedf_lock released, next=0x%p\n", next); | 477 | TRACE("gsnedf_lock released, next=0x%p\n", next); |
@@ -509,7 +509,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
509 | 509 | ||
510 | TRACE("gsn edf: task new %d\n", t->pid); | 510 | TRACE("gsn edf: task new %d\n", t->pid); |
511 | 511 | ||
512 | spin_lock_irqsave(&gsnedf_lock, flags); | 512 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
513 | 513 | ||
514 | /* setup job params */ | 514 | /* setup job params */ |
515 | release_at(t, litmus_clock()); | 515 | release_at(t, litmus_clock()); |
@@ -532,7 +532,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
532 | t->rt_param.linked_on = NO_CPU; | 532 | t->rt_param.linked_on = NO_CPU; |
533 | 533 | ||
534 | gsnedf_job_arrival(t); | 534 | gsnedf_job_arrival(t); |
535 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 535 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
536 | } | 536 | } |
537 | 537 | ||
538 | static void gsnedf_task_wake_up(struct task_struct *task) | 538 | static void gsnedf_task_wake_up(struct task_struct *task) |
@@ -542,7 +542,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
542 | 542 | ||
543 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 543 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
544 | 544 | ||
545 | spin_lock_irqsave(&gsnedf_lock, flags); | 545 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
546 | /* We need to take suspensions because of semaphores into | 546 | /* We need to take suspensions because of semaphores into |
547 | * account! If a job resumes after being suspended due to acquiring | 547 | * account! If a job resumes after being suspended due to acquiring |
548 | * a semaphore, it should never be treated as a new job release. | 548 | * a semaphore, it should never be treated as a new job release. |
@@ -565,7 +565,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
565 | } | 565 | } |
566 | } | 566 | } |
567 | gsnedf_job_arrival(task); | 567 | gsnedf_job_arrival(task); |
568 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 568 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
569 | } | 569 | } |
570 | 570 | ||
571 | static void gsnedf_task_block(struct task_struct *t) | 571 | static void gsnedf_task_block(struct task_struct *t) |
@@ -575,9 +575,9 @@ static void gsnedf_task_block(struct task_struct *t) | |||
575 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 575 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
576 | 576 | ||
577 | /* unlink if necessary */ | 577 | /* unlink if necessary */ |
578 | spin_lock_irqsave(&gsnedf_lock, flags); | 578 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
579 | unlink(t); | 579 | unlink(t); |
580 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 580 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
581 | 581 | ||
582 | BUG_ON(!is_realtime(t)); | 582 | BUG_ON(!is_realtime(t)); |
583 | } | 583 | } |
@@ -588,13 +588,13 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
588 | unsigned long flags; | 588 | unsigned long flags; |
589 | 589 | ||
590 | /* unlink if necessary */ | 590 | /* unlink if necessary */ |
591 | spin_lock_irqsave(&gsnedf_lock, flags); | 591 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
592 | unlink(t); | 592 | unlink(t); |
593 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 593 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
594 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 594 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
595 | tsk_rt(t)->scheduled_on = NO_CPU; | 595 | tsk_rt(t)->scheduled_on = NO_CPU; |
596 | } | 596 | } |
597 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 597 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
598 | 598 | ||
599 | BUG_ON(!is_realtime(t)); | 599 | BUG_ON(!is_realtime(t)); |
600 | TRACE_TASK(t, "RIP\n"); | 600 | TRACE_TASK(t, "RIP\n"); |
@@ -630,7 +630,7 @@ static void update_queue_position(struct task_struct *holder) | |||
630 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | 630 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); |
631 | } else { | 631 | } else { |
632 | /* holder may be queued: first stop queue changes */ | 632 | /* holder may be queued: first stop queue changes */ |
633 | spin_lock(&gsnedf.release_lock); | 633 | raw_spin_lock(&gsnedf.release_lock); |
634 | if (is_queued(holder)) { | 634 | if (is_queued(holder)) { |
635 | TRACE_TASK(holder, "%s: is queued\n", | 635 | TRACE_TASK(holder, "%s: is queued\n", |
636 | __FUNCTION__); | 636 | __FUNCTION__); |
@@ -648,7 +648,7 @@ static void update_queue_position(struct task_struct *holder) | |||
648 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | 648 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", |
649 | __FUNCTION__); | 649 | __FUNCTION__); |
650 | } | 650 | } |
651 | spin_unlock(&gsnedf.release_lock); | 651 | raw_spin_unlock(&gsnedf.release_lock); |
652 | 652 | ||
653 | /* If holder was enqueued in a release heap, then the following | 653 | /* If holder was enqueued in a release heap, then the following |
654 | * preemption check is pointless, but we can't easily detect | 654 | * preemption check is pointless, but we can't easily detect |
@@ -682,7 +682,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
682 | if (edf_higher_prio(new_waiter, sem->hp.task)) { | 682 | if (edf_higher_prio(new_waiter, sem->hp.task)) { |
683 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | 683 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); |
684 | /* called with IRQs disabled */ | 684 | /* called with IRQs disabled */ |
685 | spin_lock(&gsnedf_lock); | 685 | raw_spin_lock(&gsnedf_lock); |
686 | /* store new highest-priority task */ | 686 | /* store new highest-priority task */ |
687 | sem->hp.task = new_waiter; | 687 | sem->hp.task = new_waiter; |
688 | if (sem->holder) { | 688 | if (sem->holder) { |
@@ -694,7 +694,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
694 | sem->holder->rt_param.inh_task = new_waiter; | 694 | sem->holder->rt_param.inh_task = new_waiter; |
695 | update_queue_position(sem->holder); | 695 | update_queue_position(sem->holder); |
696 | } | 696 | } |
697 | spin_unlock(&gsnedf_lock); | 697 | raw_spin_unlock(&gsnedf_lock); |
698 | } | 698 | } |
699 | 699 | ||
700 | return 0; | 700 | return 0; |
@@ -740,7 +740,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
740 | 740 | ||
741 | if (t->rt_param.inh_task) { | 741 | if (t->rt_param.inh_task) { |
742 | /* interrupts already disabled by PI code */ | 742 | /* interrupts already disabled by PI code */ |
743 | spin_lock(&gsnedf_lock); | 743 | raw_spin_lock(&gsnedf_lock); |
744 | 744 | ||
745 | /* Reset inh_task to NULL. */ | 745 | /* Reset inh_task to NULL. */ |
746 | t->rt_param.inh_task = NULL; | 746 | t->rt_param.inh_task = NULL; |
@@ -748,7 +748,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
748 | /* Check if rescheduling is necessary */ | 748 | /* Check if rescheduling is necessary */ |
749 | unlink(t); | 749 | unlink(t); |
750 | gsnedf_job_arrival(t); | 750 | gsnedf_job_arrival(t); |
751 | spin_unlock(&gsnedf_lock); | 751 | raw_spin_unlock(&gsnedf_lock); |
752 | } | 752 | } |
753 | 753 | ||
754 | return ret; | 754 | return ret; |