diff options
author | Mac Mollison <mollison@cs.unc.edu> | 2010-10-05 13:41:40 -0400 |
---|---|---|
committer | Mac Mollison <mollison@cs.unc.edu> | 2010-10-05 13:41:40 -0400 |
commit | 58ea2a8251ef43a8475ecf0601d97fc5644648f7 (patch) | |
tree | cf785047c093acb9b4c46092a13998c884d47420 | |
parent | 33823919c84ec7bea79b673aa24157506091256e (diff) |
Remove all #ifdef FMLP sections
-rw-r--r-- | litmus/sched_mcrit.c | 162 |
1 files changed, 0 insertions, 162 deletions
diff --git a/litmus/sched_mcrit.c b/litmus/sched_mcrit.c index 40c5a95457b7..0d26b3b158da 100644 --- a/litmus/sched_mcrit.c +++ b/litmus/sched_mcrit.c | |||
@@ -646,162 +646,6 @@ static void mcrit_task_exit(struct task_struct * t) | |||
646 | TRACE_TASK(t, "RIP\n"); | 646 | TRACE_TASK(t, "RIP\n"); |
647 | } | 647 | } |
648 | 648 | ||
649 | #ifdef CONFIG_FMLP | ||
650 | |||
651 | /* Update the queue position of a task that got it's priority boosted via | ||
652 | * priority inheritance. */ | ||
653 | static void update_queue_position(struct task_struct *holder) | ||
654 | { | ||
655 | /* We don't know whether holder is in the ready queue. It should, but | ||
656 | * on a budget overrun it may already be in a release queue. Hence, | ||
657 | * calling unlink() is not possible since it assumes that the task is | ||
658 | * not in a release queue. However, we can safely check whether | ||
659 | * sem->holder is currently in a queue or scheduled after locking both | ||
660 | * the release and the ready queue lock. */ | ||
661 | |||
662 | /* Assumption: caller holds mcrit_lock */ | ||
663 | |||
664 | int check_preempt = 0; | ||
665 | |||
666 | if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
667 | TRACE_TASK(holder, "%s: linked on %d\n", | ||
668 | __FUNCTION__, tsk_rt(holder)->linked_on); | ||
669 | /* Holder is scheduled; need to re-order CPUs. | ||
670 | * We can't use heap_decrease() here since | ||
671 | * the cpu_heap is ordered in reverse direction, so | ||
672 | * it is actually an increase. */ | ||
673 | bheap_delete(cpu_lower_prio, &mcrit_cpu_heap, | ||
674 | mcrit_cpus[tsk_rt(holder)->linked_on]->hn); | ||
675 | bheap_insert(cpu_lower_prio, &mcrit_cpu_heap, | ||
676 | mcrit_cpus[tsk_rt(holder)->linked_on]->hn); | ||
677 | } else { | ||
678 | /* holder may be queued: first stop queue changes */ | ||
679 | raw_spin_lock(&mcrit.release_lock); | ||
680 | if (is_queued(holder)) { | ||
681 | TRACE_TASK(holder, "%s: is queued\n", | ||
682 | __FUNCTION__); | ||
683 | /* We need to update the position | ||
684 | * of holder in some heap. Note that this | ||
685 | * may be a release heap. */ | ||
686 | check_preempt = | ||
687 | !bheap_decrease(mcrit_edf_ready_order, | ||
688 | tsk_rt(holder)->heap_node); | ||
689 | } else { | ||
690 | /* Nothing to do: if it is not queued and not linked | ||
691 | * then it is currently being moved by other code | ||
692 | * (e.g., a timer interrupt handler) that will use the | ||
693 | * correct priority when enqueuing the task. */ | ||
694 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
695 | __FUNCTION__); | ||
696 | } | ||
697 | raw_spin_unlock(&mcrit.release_lock); | ||
698 | |||
699 | /* If holder was enqueued in a release heap, then the following | ||
700 | * preemption check is pointless, but we can't easily detect | ||
701 | * that case. If you want to fix this, then consider that | ||
702 | * simply adding a state flag requires O(n) time to update when | ||
703 | * releasing n tasks, which conflicts with the goal to have | ||
704 | * O(log n) merges. */ | ||
705 | if (check_preempt) { | ||
706 | /* heap_decrease() hit the top level of the heap: make | ||
707 | * sure preemption checks get the right task, not the | ||
708 | * potentially stale cache. */ | ||
709 | bheap_uncache_min(mcrit_edf_ready_order, | ||
710 | &mcrit.ready_queue); | ||
711 | check_for_preemptions(); | ||
712 | } | ||
713 | } | ||
714 | } | ||
715 | |||
716 | static long mcrit_pi_block(struct pi_semaphore *sem, | ||
717 | struct task_struct *new_waiter) | ||
718 | { | ||
719 | /* This callback has to handle the situation where a new waiter is | ||
720 | * added to the wait queue of the semaphore. | ||
721 | * | ||
722 | * We must check if has a higher priority than the currently | ||
723 | * highest-priority task, and then potentially reschedule. | ||
724 | */ | ||
725 | |||
726 | BUG_ON(!new_waiter); | ||
727 | |||
728 | if (mcrit_edf_higher_prio(new_waiter, sem->hp.task)) { | ||
729 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | ||
730 | /* called with IRQs disabled */ | ||
731 | raw_spin_lock(&mcrit_lock); | ||
732 | /* store new highest-priority task */ | ||
733 | sem->hp.task = new_waiter; | ||
734 | if (sem->holder) { | ||
735 | TRACE_TASK(sem->holder, | ||
736 | " holds %p and will inherit from %s/%d\n", | ||
737 | sem, | ||
738 | new_waiter->comm, new_waiter->pid); | ||
739 | /* let holder inherit */ | ||
740 | sem->holder->rt_param.inh_task = new_waiter; | ||
741 | update_queue_position(sem->holder); | ||
742 | } | ||
743 | raw_spin_unlock(&mcrit_lock); | ||
744 | } | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static long mcrit_inherit_priority(struct pi_semaphore *sem, | ||
750 | struct task_struct *new_owner) | ||
751 | { | ||
752 | /* We don't need to acquire the mcrit_lock since at the time of this | ||
753 | * call new_owner isn't actually scheduled yet (it's still sleeping) | ||
754 | * and since the calling function already holds sem->wait.lock, which | ||
755 | * prevents concurrent sem->hp.task changes. | ||
756 | */ | ||
757 | |||
758 | if (sem->hp.task && sem->hp.task != new_owner) { | ||
759 | new_owner->rt_param.inh_task = sem->hp.task; | ||
760 | TRACE_TASK(new_owner, "inherited priority from %s/%d\n", | ||
761 | sem->hp.task->comm, sem->hp.task->pid); | ||
762 | } else | ||
763 | TRACE_TASK(new_owner, | ||
764 | "cannot inherit priority, " | ||
765 | "no higher priority job waits.\n"); | ||
766 | return 0; | ||
767 | } | ||
768 | |||
769 | /* This function is called on a semaphore release, and assumes that | ||
770 | * the current task is also the semaphore holder. | ||
771 | */ | ||
772 | static long mcrit_return_priority(struct pi_semaphore *sem) | ||
773 | { | ||
774 | struct task_struct* t = current; | ||
775 | int ret = 0; | ||
776 | |||
777 | /* Find new highest-priority semaphore task | ||
778 | * if holder task is the current hp.task. | ||
779 | * | ||
780 | * Calling function holds sem->wait.lock. | ||
781 | */ | ||
782 | if (t == sem->hp.task) | ||
783 | edf_set_hp_task(sem); | ||
784 | |||
785 | TRACE_CUR("mcrit_return_priority for lock %p\n", sem); | ||
786 | |||
787 | if (t->rt_param.inh_task) { | ||
788 | /* interrupts already disabled by PI code */ | ||
789 | raw_spin_lock(&mcrit_lock); | ||
790 | |||
791 | /* Reset inh_task to NULL. */ | ||
792 | t->rt_param.inh_task = NULL; | ||
793 | |||
794 | /* Check if rescheduling is necessary */ | ||
795 | unlink(t); | ||
796 | mcrit_job_arrival(t); | ||
797 | raw_spin_unlock(&mcrit_lock); | ||
798 | } | ||
799 | |||
800 | return ret; | ||
801 | } | ||
802 | |||
803 | #endif | ||
804 | |||
805 | static long mcrit_admit_task(struct task_struct* tsk) | 649 | static long mcrit_admit_task(struct task_struct* tsk) |
806 | { | 650 | { |
807 | return 0; | 651 | return 0; |
@@ -848,12 +692,6 @@ static struct sched_plugin m_crit_plugin __cacheline_aligned_in_smp = { | |||
848 | .schedule = mcrit_schedule, | 692 | .schedule = mcrit_schedule, |
849 | .task_wake_up = mcrit_task_wake_up, | 693 | .task_wake_up = mcrit_task_wake_up, |
850 | .task_block = mcrit_task_block, | 694 | .task_block = mcrit_task_block, |
851 | #ifdef CONFIG_FMLP | ||
852 | .fmlp_active = 1, | ||
853 | .pi_block = mcrit_pi_block, | ||
854 | .inherit_priority = mcrit_inherit_priority, | ||
855 | .return_priority = mcrit_return_priority, | ||
856 | #endif | ||
857 | .admit_task = mcrit_admit_task, | 695 | .admit_task = mcrit_admit_task, |
858 | .activate_plugin = mcrit_activate_plugin, | 696 | .activate_plugin = mcrit_activate_plugin, |
859 | }; | 697 | }; |