diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b9310dd6f75c..7424c183d8b2 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
297 | { | 297 | { |
298 | unsigned long flags; | 298 | unsigned long flags; |
299 | 299 | ||
300 | spin_lock_irqsave(&gsnedf_lock, flags); | 300 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
301 | 301 | ||
302 | __merge_ready(rt, tasks); | 302 | __merge_ready(rt, tasks); |
303 | check_for_preemptions(); | 303 | check_for_preemptions(); |
304 | 304 | ||
305 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 305 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
306 | } | 306 | } |
307 | 307 | ||
308 | /* caller holds gsnedf_lock */ | 308 | /* caller holds gsnedf_lock */ |
@@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
388 | if (gsnedf.release_master == entry->cpu) | 388 | if (gsnedf.release_master == entry->cpu) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
391 | spin_lock(&gsnedf_lock); | 391 | raw_spin_lock(&gsnedf_lock); |
392 | clear_will_schedule(); | 392 | clear_will_schedule(); |
393 | 393 | ||
394 | /* sanity checking */ | 394 | /* sanity checking */ |
@@ -469,7 +469,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
469 | if (exists) | 469 | if (exists) |
470 | next = prev; | 470 | next = prev; |
471 | 471 | ||
472 | spin_unlock(&gsnedf_lock); | 472 | raw_spin_unlock(&gsnedf_lock); |
473 | 473 | ||
474 | #ifdef WANT_ALL_SCHED_EVENTS | 474 | #ifdef WANT_ALL_SCHED_EVENTS |
475 | TRACE("gsnedf_lock released, next=0x%p\n", next); | 475 | TRACE("gsnedf_lock released, next=0x%p\n", next); |
@@ -507,7 +507,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
507 | 507 | ||
508 | TRACE("gsn edf: task new %d\n", t->pid); | 508 | TRACE("gsn edf: task new %d\n", t->pid); |
509 | 509 | ||
510 | spin_lock_irqsave(&gsnedf_lock, flags); | 510 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
511 | 511 | ||
512 | /* setup job params */ | 512 | /* setup job params */ |
513 | release_at(t, litmus_clock()); | 513 | release_at(t, litmus_clock()); |
@@ -530,7 +530,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
530 | t->rt_param.linked_on = NO_CPU; | 530 | t->rt_param.linked_on = NO_CPU; |
531 | 531 | ||
532 | gsnedf_job_arrival(t); | 532 | gsnedf_job_arrival(t); |
533 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 533 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
534 | } | 534 | } |
535 | 535 | ||
536 | static void gsnedf_task_wake_up(struct task_struct *task) | 536 | static void gsnedf_task_wake_up(struct task_struct *task) |
@@ -540,7 +540,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
540 | 540 | ||
541 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 541 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
542 | 542 | ||
543 | spin_lock_irqsave(&gsnedf_lock, flags); | 543 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
544 | /* We need to take suspensions because of semaphores into | 544 | /* We need to take suspensions because of semaphores into |
545 | * account! If a job resumes after being suspended due to acquiring | 545 | * account! If a job resumes after being suspended due to acquiring |
546 | * a semaphore, it should never be treated as a new job release. | 546 | * a semaphore, it should never be treated as a new job release. |
@@ -563,7 +563,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
563 | } | 563 | } |
564 | } | 564 | } |
565 | gsnedf_job_arrival(task); | 565 | gsnedf_job_arrival(task); |
566 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 566 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
567 | } | 567 | } |
568 | 568 | ||
569 | static void gsnedf_task_block(struct task_struct *t) | 569 | static void gsnedf_task_block(struct task_struct *t) |
@@ -573,9 +573,9 @@ static void gsnedf_task_block(struct task_struct *t) | |||
573 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 573 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
574 | 574 | ||
575 | /* unlink if necessary */ | 575 | /* unlink if necessary */ |
576 | spin_lock_irqsave(&gsnedf_lock, flags); | 576 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
577 | unlink(t); | 577 | unlink(t); |
578 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 578 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
579 | 579 | ||
580 | BUG_ON(!is_realtime(t)); | 580 | BUG_ON(!is_realtime(t)); |
581 | } | 581 | } |
@@ -586,13 +586,13 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
586 | unsigned long flags; | 586 | unsigned long flags; |
587 | 587 | ||
588 | /* unlink if necessary */ | 588 | /* unlink if necessary */ |
589 | spin_lock_irqsave(&gsnedf_lock, flags); | 589 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
590 | unlink(t); | 590 | unlink(t); |
591 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 591 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
592 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 592 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 593 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } | 594 | } |
595 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 595 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
596 | 596 | ||
597 | BUG_ON(!is_realtime(t)); | 597 | BUG_ON(!is_realtime(t)); |
598 | TRACE_TASK(t, "RIP\n"); | 598 | TRACE_TASK(t, "RIP\n"); |
@@ -628,7 +628,7 @@ static void update_queue_position(struct task_struct *holder) | |||
628 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | 628 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); |
629 | } else { | 629 | } else { |
630 | /* holder may be queued: first stop queue changes */ | 630 | /* holder may be queued: first stop queue changes */ |
631 | spin_lock(&gsnedf.release_lock); | 631 | raw_spin_lock(&gsnedf.release_lock); |
632 | if (is_queued(holder)) { | 632 | if (is_queued(holder)) { |
633 | TRACE_TASK(holder, "%s: is queued\n", | 633 | TRACE_TASK(holder, "%s: is queued\n", |
634 | __FUNCTION__); | 634 | __FUNCTION__); |
@@ -646,7 +646,7 @@ static void update_queue_position(struct task_struct *holder) | |||
646 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | 646 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", |
647 | __FUNCTION__); | 647 | __FUNCTION__); |
648 | } | 648 | } |
649 | spin_unlock(&gsnedf.release_lock); | 649 | raw_spin_unlock(&gsnedf.release_lock); |
650 | 650 | ||
651 | /* If holder was enqueued in a release heap, then the following | 651 | /* If holder was enqueued in a release heap, then the following |
652 | * preemption check is pointless, but we can't easily detect | 652 | * preemption check is pointless, but we can't easily detect |
@@ -680,7 +680,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
680 | if (edf_higher_prio(new_waiter, sem->hp.task)) { | 680 | if (edf_higher_prio(new_waiter, sem->hp.task)) { |
681 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | 681 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); |
682 | /* called with IRQs disabled */ | 682 | /* called with IRQs disabled */ |
683 | spin_lock(&gsnedf_lock); | 683 | raw_spin_lock(&gsnedf_lock); |
684 | /* store new highest-priority task */ | 684 | /* store new highest-priority task */ |
685 | sem->hp.task = new_waiter; | 685 | sem->hp.task = new_waiter; |
686 | if (sem->holder) { | 686 | if (sem->holder) { |
@@ -692,7 +692,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
692 | sem->holder->rt_param.inh_task = new_waiter; | 692 | sem->holder->rt_param.inh_task = new_waiter; |
693 | update_queue_position(sem->holder); | 693 | update_queue_position(sem->holder); |
694 | } | 694 | } |
695 | spin_unlock(&gsnedf_lock); | 695 | raw_spin_unlock(&gsnedf_lock); |
696 | } | 696 | } |
697 | 697 | ||
698 | return 0; | 698 | return 0; |
@@ -738,7 +738,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
738 | 738 | ||
739 | if (t->rt_param.inh_task) { | 739 | if (t->rt_param.inh_task) { |
740 | /* interrupts already disabled by PI code */ | 740 | /* interrupts already disabled by PI code */ |
741 | spin_lock(&gsnedf_lock); | 741 | raw_spin_lock(&gsnedf_lock); |
742 | 742 | ||
743 | /* Reset inh_task to NULL. */ | 743 | /* Reset inh_task to NULL. */ |
744 | t->rt_param.inh_task = NULL; | 744 | t->rt_param.inh_task = NULL; |
@@ -746,7 +746,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
746 | /* Check if rescheduling is necessary */ | 746 | /* Check if rescheduling is necessary */ |
747 | unlink(t); | 747 | unlink(t); |
748 | gsnedf_job_arrival(t); | 748 | gsnedf_job_arrival(t); |
749 | spin_unlock(&gsnedf_lock); | 749 | raw_spin_unlock(&gsnedf_lock); |
750 | } | 750 | } |
751 | 751 | ||
752 | return ret; | 752 | return ret; |