diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-04-19 10:42:46 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-04-19 10:42:46 -0400 |
commit | 37c715bc201539cbb1b8ebe80b05d05d20211443 (patch) | |
tree | b8dda9221ee75eaa66d97dc00b3b25d8449e4be9 | |
parent | 5fd924b4177fff6904137fae17c8d79b9aeda570 (diff) |
Remove FMLP support from C-EDF.
There is no published protocol for FMLP/C-EDF.
-rw-r--r-- | litmus/sched_cedf.c | 163 |
1 files changed, 0 insertions, 163 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 4c22e26241d9..c715ec1e0ee2 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -582,163 +582,6 @@ static void cedf_task_exit(struct task_struct * t) | |||
582 | TRACE_TASK(t, "RIP\n"); | 582 | TRACE_TASK(t, "RIP\n"); |
583 | } | 583 | } |
584 | 584 | ||
585 | #ifdef CONFIG_FMLP | ||
586 | |||
587 | /* Update the queue position of a task that got it's priority boosted via | ||
588 | * priority inheritance. */ | ||
589 | static void update_queue_position(struct task_struct *holder) | ||
590 | { | ||
591 | /* We don't know whether holder is in the ready queue. It should, but | ||
592 | * on a budget overrun it may already be in a release queue. Hence, | ||
593 | * calling unlink() is not possible since it assumes that the task is | ||
594 | * not in a release queue. However, we can safely check whether | ||
595 | * sem->holder is currently in a queue or scheduled after locking both | ||
596 | * the release and the ready queue lock. */ | ||
597 | |||
598 | /* Assumption: caller holds cedf_lock */ | ||
599 | |||
600 | int check_preempt = 0; | ||
601 | cedf_domain_t *cluster = task_cpu_cluster(holder); | ||
602 | |||
603 | if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
604 | TRACE_TASK(holder, "%s: linked on %d\n", | ||
605 | __FUNCTION__, tsk_rt(holder)->linked_on); | ||
606 | /* Holder is scheduled; need to re-order CPUs. | ||
607 | * We can't use heap_decrease() here since | ||
608 | * the cpu_heap is ordered in reverse direction, so | ||
609 | * it is actually an increase. */ | ||
610 | bheap_delete(cpu_lower_prio, &cluster->cpu_heap, | ||
611 | cluster->cpus[tsk_rt(holder)->linked_on]->hn); | ||
612 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, | ||
613 | cluster->cpus[tsk_rt(holder)->linked_on]->hn); | ||
614 | } else { | ||
615 | /* holder may be queued: first stop queue changes */ | ||
616 | spin_lock(&cluster->domain.release_lock); | ||
617 | if (is_queued(holder)) { | ||
618 | TRACE_TASK(holder, "%s: is queued\n", | ||
619 | __FUNCTION__); | ||
620 | /* We need to update the position | ||
621 | * of holder in some heap. Note that this | ||
622 | * may be a release heap. */ | ||
623 | check_preempt = | ||
624 | !bheap_decrease(edf_ready_order, | ||
625 | tsk_rt(holder)->heap_node); | ||
626 | } else { | ||
627 | /* Nothing to do: if it is not queued and not linked | ||
628 | * then it is currently being moved by other code | ||
629 | * (e.g., a timer interrupt handler) that will use the | ||
630 | * correct priority when enqueuing the task. */ | ||
631 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
632 | __FUNCTION__); | ||
633 | } | ||
634 | spin_unlock(&cluster->domain.release_lock); | ||
635 | |||
636 | /* If holder was enqueued in a release heap, then the following | ||
637 | * preemption check is pointless, but we can't easily detect | ||
638 | * that case. If you want to fix this, then consider that | ||
639 | * simply adding a state flag requires O(n) time to update when | ||
640 | * releasing n tasks, which conflicts with the goal to have | ||
641 | * O(log n) merges. */ | ||
642 | if (check_preempt) { | ||
643 | /* heap_decrease() hit the top level of the heap: make | ||
644 | * sure preemption checks get the right task, not the | ||
645 | * potentially stale cache. */ | ||
646 | bheap_uncache_min(edf_ready_order, | ||
647 | &cluster->domain.ready_queue); | ||
648 | check_for_preemptions(cluster); | ||
649 | } | ||
650 | } | ||
651 | } | ||
652 | |||
653 | static long cedf_pi_block(struct pi_semaphore *sem, | ||
654 | struct task_struct *new_waiter) | ||
655 | { | ||
656 | /* This callback has to handle the situation where a new waiter is | ||
657 | * added to the wait queue of the semaphore. | ||
658 | * | ||
659 | * We must check if has a higher priority than the currently | ||
660 | * highest-priority task, and then potentially reschedule. | ||
661 | */ | ||
662 | |||
663 | BUG_ON(!new_waiter); | ||
664 | |||
665 | if (edf_higher_prio(new_waiter, sem->hp.task)) { | ||
666 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | ||
667 | /* called with IRQs disabled */ | ||
668 | spin_lock(&task_cpu_cluster(new_waiter)->lock); | ||
669 | /* store new highest-priority task */ | ||
670 | sem->hp.task = new_waiter; | ||
671 | if (sem->holder) { | ||
672 | TRACE_TASK(sem->holder, | ||
673 | " holds %p and will inherit from %s/%d\n", | ||
674 | sem, | ||
675 | new_waiter->comm, new_waiter->pid); | ||
676 | /* let holder inherit */ | ||
677 | sem->holder->rt_param.inh_task = new_waiter; | ||
678 | update_queue_position(sem->holder); | ||
679 | } | ||
680 | spin_unlock(&task_cpu_cluster(new_waiter)->lock); | ||
681 | } | ||
682 | |||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | static long cedf_inherit_priority(struct pi_semaphore *sem, | ||
687 | struct task_struct *new_owner) | ||
688 | { | ||
689 | /* We don't need to acquire the cedf_lock since at the time of this | ||
690 | * call new_owner isn't actually scheduled yet (it's still sleeping) | ||
691 | * and since the calling function already holds sem->wait.lock, which | ||
692 | * prevents concurrent sem->hp.task changes. | ||
693 | */ | ||
694 | |||
695 | if (sem->hp.task && sem->hp.task != new_owner) { | ||
696 | new_owner->rt_param.inh_task = sem->hp.task; | ||
697 | TRACE_TASK(new_owner, "inherited priority from %s/%d\n", | ||
698 | sem->hp.task->comm, sem->hp.task->pid); | ||
699 | } else | ||
700 | TRACE_TASK(new_owner, | ||
701 | "cannot inherit priority, " | ||
702 | "no higher priority job waits.\n"); | ||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | /* This function is called on a semaphore release, and assumes that | ||
707 | * the current task is also the semaphore holder. | ||
708 | */ | ||
709 | static long cedf_return_priority(struct pi_semaphore *sem) | ||
710 | { | ||
711 | struct task_struct* t = current; | ||
712 | int ret = 0; | ||
713 | |||
714 | /* Find new highest-priority semaphore task | ||
715 | * if holder task is the current hp.task. | ||
716 | * | ||
717 | * Calling function holds sem->wait.lock. | ||
718 | */ | ||
719 | if (t == sem->hp.task) | ||
720 | edf_set_hp_task(sem); | ||
721 | |||
722 | TRACE_CUR("cedf_return_priority for lock %p\n", sem); | ||
723 | |||
724 | if (t->rt_param.inh_task) { | ||
725 | /* interrupts already disabled by PI code */ | ||
726 | spin_lock(&task_cpu_cluster(t)->lock); | ||
727 | |||
728 | /* Reset inh_task to NULL. */ | ||
729 | t->rt_param.inh_task = NULL; | ||
730 | |||
731 | /* Check if rescheduling is necessary */ | ||
732 | unlink(t); | ||
733 | cedf_job_arrival(t); | ||
734 | spin_unlock(&task_cpu_cluster(t)->lock); | ||
735 | } | ||
736 | |||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | #endif | ||
741 | |||
742 | static long cedf_admit_task(struct task_struct* tsk) | 585 | static long cedf_admit_task(struct task_struct* tsk) |
743 | { | 586 | { |
744 | return 0; | 587 | return 0; |
@@ -893,12 +736,6 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
893 | .schedule = cedf_schedule, | 736 | .schedule = cedf_schedule, |
894 | .task_wake_up = cedf_task_wake_up, | 737 | .task_wake_up = cedf_task_wake_up, |
895 | .task_block = cedf_task_block, | 738 | .task_block = cedf_task_block, |
896 | #ifdef CONFIG_FMLP | ||
897 | .fmlp_active = 1, | ||
898 | .pi_block = cedf_pi_block, | ||
899 | .inherit_priority = cedf_inherit_priority, | ||
900 | .return_priority = cedf_return_priority, | ||
901 | #endif | ||
902 | .admit_task = cedf_admit_task, | 739 | .admit_task = cedf_admit_task, |
903 | .activate_plugin = cedf_activate_plugin, | 740 | .activate_plugin = cedf_activate_plugin, |
904 | .deactivate_plugin = cedf_deactivate_plugin, | 741 | .deactivate_plugin = cedf_deactivate_plugin, |