aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_gsn_edf.c322
1 files changed, 322 insertions, 0 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 5de0980e3faa..c525d43eb051 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -11,6 +11,7 @@
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h>
14 15
15#include <litmus/litmus.h> 16#include <litmus/litmus.h>
16#include <litmus/jobs.h> 17#include <litmus/jobs.h>
@@ -446,6 +447,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
446 if (entry->linked) { 447 if (entry->linked) {
447 entry->linked->rt_param.scheduled_on = entry->cpu; 448 entry->linked->rt_param.scheduled_on = entry->cpu;
448 next = entry->linked; 449 next = entry->linked;
450 TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id());
449 } 451 }
450 if (entry->scheduled) { 452 if (entry->scheduled) {
451 /* not gonna be scheduled soon */ 453 /* not gonna be scheduled soon */
@@ -600,6 +602,323 @@ static long gsnedf_admit_task(struct task_struct* tsk)
600 return 0; 602 return 0;
601} 603}
602 604
605#ifdef CONFIG_LITMUS_LOCKING
606
607#include <litmus/fdso.h>
608
609/* called with IRQs off */
610static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
611{
612 int linked_on;
613 int check_preempt = 0;
614
615 raw_spin_lock(&gsnedf_lock);
616
617 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
618 tsk_rt(t)->inh_task = prio_inh;
619
620 linked_on = tsk_rt(t)->linked_on;
621
622 /* If it is scheduled, then we need to reorder the CPU heap. */
623 if (linked_on != NO_CPU) {
624 TRACE_TASK(t, "%s: linked on %d\n",
625 __FUNCTION__, linked_on);
626 /* Holder is scheduled; need to re-order CPUs.
627 * We can't use heap_decrease() here since
628 * the cpu_heap is ordered in reverse direction, so
629 * it is actually an increase. */
630 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
631 gsnedf_cpus[linked_on]->hn);
632 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
633 gsnedf_cpus[linked_on]->hn);
634 } else {
635 /* holder may be queued: first stop queue changes */
636 raw_spin_lock(&gsnedf.release_lock);
637 if (is_queued(t)) {
638 TRACE_TASK(t, "%s: is queued\n",
639 __FUNCTION__);
640 /* We need to update the position of holder in some
641 * heap. Note that this could be a release heap if we
642 * budget enforcement is used and this job overran. */
643 check_preempt =
644 !bheap_decrease(edf_ready_order,
645 tsk_rt(t)->heap_node);
646 } else {
647 /* Nothing to do: if it is not queued and not linked
648 * then it is either sleeping or currently being moved
649 * by other code (e.g., a timer interrupt handler) that
650 * will use the correct priority when enqueuing the
651 * task. */
652 TRACE_TASK(t, "%s: is NOT queued => Done.\n",
653 __FUNCTION__);
654 }
655 raw_spin_unlock(&gsnedf.release_lock);
656
657 /* If holder was enqueued in a release heap, then the following
658 * preemption check is pointless, but we can't easily detect
659 * that case. If you want to fix this, then consider that
660 * simply adding a state flag requires O(n) time to update when
661 * releasing n tasks, which conflicts with the goal to have
662 * O(log n) merges. */
663 if (check_preempt) {
664 /* heap_decrease() hit the top level of the heap: make
665 * sure preemption checks get the right task, not the
666 * potentially stale cache. */
667 bheap_uncache_min(edf_ready_order,
668 &gsnedf.ready_queue);
669 check_for_preemptions();
670 }
671 }
672
673 raw_spin_unlock(&gsnedf_lock);
674}
675
676/* called with IRQs off */
677static void clear_priority_inheritance(struct task_struct* t)
678{
679 raw_spin_lock(&gsnedf_lock);
680
681 /* A job only stops inheriting a priority when it releases a
682 * resource. Thus we can make the following assumption.*/
683 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
684
685 TRACE_TASK(t, "priority restored\n");
686 tsk_rt(t)->inh_task = NULL;
687
688 /* Check if rescheduling is necessary. We can't use heap_decrease()
689 * since the priority was effectively lowered. */
690 unlink(t);
691 gsnedf_job_arrival(t);
692
693 raw_spin_unlock(&gsnedf_lock);
694}
695
696
697/* ******************** FMLP support ********************** */
698
699/* struct for semaphore with priority inheritance */
700struct fmlp_semaphore {
701 struct litmus_lock litmus_lock;
702
703 /* current resource holder */
704 struct task_struct *owner;
705
706 /* highest-priority waiter */
707 struct task_struct *hp_waiter;
708
709 /* FIFO queue of waiting tasks */
710 wait_queue_head_t wait;
711};
712
713static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
714{
715 return container_of(lock, struct fmlp_semaphore, litmus_lock);
716}
717
718/* caller is responsible for locking */
719struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem,
720 struct task_struct* skip)
721{
722 struct list_head *pos;
723 struct task_struct *queued, *found = NULL;
724
725 list_for_each(pos, &sem->wait.task_list) {
726 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
727 task_list)->private;
728
729 /* Compare task prios, find high prio task. */
730 if (queued != skip && edf_higher_prio(queued, found))
731 found = queued;
732 }
733 return found;
734}
735
736int gsnedf_fmlp_lock(struct litmus_lock* l)
737{
738 struct task_struct* t = current;
739 struct fmlp_semaphore *sem = fmlp_from_lock(l);
740 wait_queue_t wait;
741 unsigned long flags;
742
743 if (!is_realtime(t))
744 return -EPERM;
745
746 spin_lock_irqsave(&sem->wait.lock, flags);
747
748 if (sem->owner) {
749 /* resource is not free => must suspend and wait */
750
751 init_waitqueue_entry(&wait, t);
752
753 /* FIXME: interruptible would be nice some day */
754 set_task_state(t, TASK_UNINTERRUPTIBLE);
755
756 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
757
758 /* check if we need to activate priority inheritance */
759 if (edf_higher_prio(t, sem->hp_waiter)) {
760 sem->hp_waiter = t;
761 if (edf_higher_prio(t, sem->owner))
762 set_priority_inheritance(sem->owner, sem->hp_waiter);
763 }
764
765 /* release lock before sleeping */
766 spin_unlock_irqrestore(&sem->wait.lock, flags);
767
768 /* We depend on the FIFO order. Thus, we don't need to recheck
769 * when we wake up; we are guaranteed to have the lock since
770 * there is only one wake up per release.
771 */
772
773 schedule();
774
775 /* Since we hold the lock, no other task will change
776 * ->owner. We can thus check it without acquiring the spin
777 * lock. */
778 BUG_ON(sem->owner != t);
779
780 remove_wait_queue(&sem->wait, &wait);
781 } else {
782 /* it's ours now */
783 sem->owner = t;
784
785 spin_unlock_irqrestore(&sem->wait.lock, flags);
786 }
787
788 return 0;
789}
790
791int gsnedf_fmlp_unlock(struct litmus_lock* l)
792{
793 struct task_struct *t = current, *next;
794 struct fmlp_semaphore *sem = fmlp_from_lock(l);
795 unsigned long flags;
796 int err = 0;
797
798 spin_lock_irqsave(&sem->wait.lock, flags);
799
800 if (sem->owner != t) {
801 err = -EINVAL;
802 goto out;
803 }
804
805 /* check if there are jobs waiting for this resource */
806 next = waitqueue_first(&sem->wait);
807 if (next) {
808 /* next becomes the resouce holder */
809 sem->owner = next;
810 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
811
812 /* determine new hp_waiter if necessary */
813 if (next == sem->hp_waiter) {
814 TRACE_TASK(next, "was highest-prio waiter\n");
815 /* next has the highest priority --- it doesn't need to
816 * inherit. However, we need to make sure that the
817 * next-highest priority in the queue is reflected in
818 * hp_waiter. */
819 sem->hp_waiter = find_hp_waiter(sem, next);
820 if (sem->hp_waiter)
821 TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n");
822 else
823 TRACE("no further waiters\n");
824 } else {
825 /* Well, if next is not the highest-priority waiter,
826 * then it ought to inherit the highest-priority
827 * waiter's priority. */
828 set_priority_inheritance(next, sem->hp_waiter);
829 }
830
831 /* wake up next */
832 wake_up_process(next);
833 } else
834 /* becomes available */
835 sem->owner = NULL;
836
837 /* we lose the benefit of priority inheritance (if any) */
838 if (tsk_rt(t)->inh_task)
839 clear_priority_inheritance(t);
840
841out:
842 spin_unlock_irqrestore(&sem->wait.lock, flags);
843
844 return err;
845}
846
847int gsnedf_fmlp_close(struct litmus_lock* l)
848{
849 struct task_struct *t = current;
850 struct fmlp_semaphore *sem = fmlp_from_lock(l);
851 unsigned long flags;
852
853 int owner;
854
855 spin_lock_irqsave(&sem->wait.lock, flags);
856
857 owner = sem->owner == t;
858
859 spin_unlock_irqrestore(&sem->wait.lock, flags);
860
861 if (owner)
862 gsnedf_fmlp_unlock(l);
863
864 return 0;
865}
866
867void gsnedf_fmlp_free(struct litmus_lock* lock)
868{
869 kfree(fmlp_from_lock(lock));
870}
871
872static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
873 .close = gsnedf_fmlp_close,
874 .lock = gsnedf_fmlp_lock,
875 .unlock = gsnedf_fmlp_unlock,
876 .deallocate = gsnedf_fmlp_free,
877};
878
879static struct litmus_lock* gsnedf_new_fmlp(void)
880{
881 struct fmlp_semaphore* sem;
882
883 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
884 if (!sem)
885 return NULL;
886
887 sem->owner = NULL;
888 sem->hp_waiter = NULL;
889 init_waitqueue_head(&sem->wait);
890 sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops;
891
892 return &sem->litmus_lock;
893}
894
895/* **** lock constructor **** */
896
897
898static long gsnedf_allocate_lock(struct litmus_lock **lock, int type)
899{
900 int err = -ENXIO;
901
902 /* GSN-EDF currently only supports the FMLP for global resources. */
903 switch (type) {
904
905 case FMLP_SEM:
906 /* Flexible Multiprocessor Locking Protocol */
907 *lock = gsnedf_new_fmlp();
908 if (*lock)
909 err = 0;
910 else
911 err = -ENOMEM;
912 break;
913
914 };
915
916 return err;
917}
918
919#endif
920
921
603static long gsnedf_activate_plugin(void) 922static long gsnedf_activate_plugin(void)
604{ 923{
605 int cpu; 924 int cpu;
@@ -642,6 +961,9 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
642 .task_block = gsnedf_task_block, 961 .task_block = gsnedf_task_block,
643 .admit_task = gsnedf_admit_task, 962 .admit_task = gsnedf_admit_task,
644 .activate_plugin = gsnedf_activate_plugin, 963 .activate_plugin = gsnedf_activate_plugin,
964#ifdef CONFIG_LITMUS_LOCKING
965 .allocate_lock = gsnedf_allocate_lock,
966#endif
645}; 967};
646 968
647 969