diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 2645 |
1 files changed, 106 insertions, 2539 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c0316c4a1b35..59236d007fd8 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -29,6 +29,11 @@ | |||
29 | #include <litmus/bheap.h> | 29 | #include <litmus/bheap.h> |
30 | #include <litmus/binheap.h> | 30 | #include <litmus/binheap.h> |
31 | 31 | ||
32 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
33 | #include <litmus/rsm_lock.h> | ||
34 | #include <litmus/ikglp_lock.h> | ||
35 | #endif | ||
36 | |||
32 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 37 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
33 | #include <litmus/affinity.h> | 38 | #include <litmus/affinity.h> |
34 | #endif | 39 | #endif |
@@ -122,6 +127,11 @@ static rt_domain_t gsnedf; | |||
122 | 127 | ||
123 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 128 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
124 | static raw_spinlock_t dgl_lock; | 129 | static raw_spinlock_t dgl_lock; |
130 | |||
131 | static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t) | ||
132 | { | ||
133 | return(&dgl_lock); | ||
134 | } | ||
125 | #endif | 135 | #endif |
126 | 136 | ||
127 | /* Uncomment this if you want to see all scheduling decisions in the | 137 | /* Uncomment this if you want to see all scheduling decisions in the |
@@ -133,7 +143,7 @@ static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) | |||
133 | { | 143 | { |
134 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); | 144 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
135 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); | 145 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
136 | 146 | ||
137 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 147 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
138 | * the top of the heap. | 148 | * the top of the heap. |
139 | */ | 149 | */ |
@@ -645,29 +655,37 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
645 | static long gsnedf_admit_task(struct task_struct* tsk) | 655 | static long gsnedf_admit_task(struct task_struct* tsk) |
646 | { | 656 | { |
647 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 657 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
648 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, edf_max_heap_base_priority_order); | 658 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, |
659 | edf_max_heap_base_priority_order); | ||
649 | #endif | 660 | #endif |
650 | 661 | ||
651 | return 0; | 662 | return 0; |
652 | } | 663 | } |
653 | 664 | ||
654 | #ifdef CONFIG_LITMUS_LOCKING | ||
655 | 665 | ||
656 | extern raw_spinlock_t rsm_global_lock; | 666 | |
667 | |||
668 | |||
669 | |||
670 | #ifdef CONFIG_LITMUS_LOCKING | ||
657 | 671 | ||
658 | #include <litmus/fdso.h> | 672 | #include <litmus/fdso.h> |
659 | 673 | ||
660 | /* called with IRQs off */ | 674 | /* called with IRQs off */ |
661 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 675 | static void increase_priority_inheritance(struct task_struct* t, |
676 | struct task_struct* prio_inh) | ||
662 | { | 677 | { |
663 | int linked_on; | 678 | int linked_on; |
664 | int check_preempt = 0; | 679 | int check_preempt = 0; |
665 | 680 | ||
666 | raw_spin_lock(&gsnedf_lock); | 681 | raw_spin_lock(&gsnedf_lock); |
667 | 682 | ||
683 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
668 | /* this sanity check allows for weaker locking in protocols */ | 684 | /* this sanity check allows for weaker locking in protocols */ |
669 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | 685 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { |
670 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 686 | #endif |
687 | TRACE_TASK(t, "inherits priority from %s/%d\n", | ||
688 | prio_inh->comm, prio_inh->pid); | ||
671 | tsk_rt(t)->inh_task = prio_inh; | 689 | tsk_rt(t)->inh_task = prio_inh; |
672 | 690 | ||
673 | linked_on = tsk_rt(t)->linked_on; | 691 | linked_on = tsk_rt(t)->linked_on; |
@@ -721,6 +739,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
721 | check_for_preemptions(); | 739 | check_for_preemptions(); |
722 | } | 740 | } |
723 | } | 741 | } |
742 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
724 | } | 743 | } |
725 | else { | 744 | else { |
726 | TRACE_TASK(t, "Spurious invalid priority increase. " | 745 | TRACE_TASK(t, "Spurious invalid priority increase. " |
@@ -730,28 +749,33 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
730 | effective_priority(t)->comm, effective_priority(t)->pid, | 749 | effective_priority(t)->comm, effective_priority(t)->pid, |
731 | prio_inh->comm, prio_inh->pid); | 750 | prio_inh->comm, prio_inh->pid); |
732 | } | 751 | } |
752 | #endif | ||
733 | 753 | ||
734 | raw_spin_unlock(&gsnedf_lock); | 754 | raw_spin_unlock(&gsnedf_lock); |
735 | } | 755 | } |
736 | 756 | ||
737 | /* called with IRQs off */ | 757 | /* called with IRQs off */ |
738 | static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 758 | static void decrease_priority_inheritance(struct task_struct* t, |
759 | struct task_struct* prio_inh) | ||
739 | { | 760 | { |
740 | raw_spin_lock(&gsnedf_lock); | 761 | raw_spin_lock(&gsnedf_lock); |
741 | 762 | ||
763 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
742 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 764 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
765 | #endif | ||
743 | /* A job only stops inheriting a priority when it releases a | 766 | /* A job only stops inheriting a priority when it releases a |
744 | * resource. Thus we can make the following assumption.*/ | 767 | * resource. Thus we can make the following assumption.*/ |
745 | if(prio_inh) | 768 | if(prio_inh) |
746 | TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); | 769 | TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", |
770 | prio_inh->comm, prio_inh->pid); | ||
747 | else | 771 | else |
748 | TRACE_TASK(t, "base priority restored.\n"); | 772 | TRACE_TASK(t, "base priority restored.\n"); |
749 | 773 | ||
750 | tsk_rt(t)->inh_task = prio_inh; | 774 | tsk_rt(t)->inh_task = prio_inh; |
751 | 775 | ||
752 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | 776 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
753 | TRACE_TASK(t, "is scheduled.\n"); | 777 | TRACE_TASK(t, "is scheduled.\n"); |
754 | 778 | ||
755 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 779 | /* Check if rescheduling is necessary. We can't use heap_decrease() |
756 | * since the priority was effectively lowered. */ | 780 | * since the priority was effectively lowered. */ |
757 | unlink(t); | 781 | unlink(t); |
@@ -762,7 +786,7 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
762 | raw_spin_lock(&gsnedf.release_lock); | 786 | raw_spin_lock(&gsnedf.release_lock); |
763 | if (is_queued(t)) { | 787 | if (is_queued(t)) { |
764 | TRACE_TASK(t, "is queued.\n"); | 788 | TRACE_TASK(t, "is queued.\n"); |
765 | 789 | ||
766 | /* decrease in priority, so we have to re-add to binomial heap */ | 790 | /* decrease in priority, so we have to re-add to binomial heap */ |
767 | unlink(t); | 791 | unlink(t); |
768 | gsnedf_job_arrival(t); | 792 | gsnedf_job_arrival(t); |
@@ -772,6 +796,7 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
772 | } | 796 | } |
773 | raw_spin_unlock(&gsnedf.release_lock); | 797 | raw_spin_unlock(&gsnedf.release_lock); |
774 | } | 798 | } |
799 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
775 | } | 800 | } |
776 | else { | 801 | else { |
777 | TRACE_TASK(t, "Spurious invalid priority decrease. " | 802 | TRACE_TASK(t, "Spurious invalid priority decrease. " |
@@ -782,8 +807,8 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
782 | (prio_inh) ? prio_inh->comm : "nil", | 807 | (prio_inh) ? prio_inh->comm : "nil", |
783 | (prio_inh) ? prio_inh->pid : -1); | 808 | (prio_inh) ? prio_inh->pid : -1); |
784 | } | 809 | } |
810 | #endif | ||
785 | 811 | ||
786 | |||
787 | raw_spin_unlock(&gsnedf_lock); | 812 | raw_spin_unlock(&gsnedf_lock); |
788 | } | 813 | } |
789 | 814 | ||
@@ -792,96 +817,15 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
792 | 817 | ||
793 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 818 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
794 | 819 | ||
795 | |||
796 | void print_hp_waiters(struct binheap_node* n, int depth) | ||
797 | { | ||
798 | struct litmus_lock *l; | ||
799 | struct nested_info *nest; | ||
800 | char padding[81] = " "; | ||
801 | struct task_struct *hp = NULL; | ||
802 | struct task_struct *hp_eff = NULL; | ||
803 | struct task_struct *node_prio = NULL; | ||
804 | |||
805 | |||
806 | if(n == NULL) { | ||
807 | TRACE("+-> %p\n", NULL); | ||
808 | return; | ||
809 | } | ||
810 | |||
811 | nest = binheap_entry(n, struct nested_info, hp_binheap_node); | ||
812 | l = nest->lock; | ||
813 | |||
814 | if(depth*2 <= 80) | ||
815 | padding[depth*2] = '\0'; | ||
816 | |||
817 | if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) { | ||
818 | hp = *(nest->hp_waiter_ptr); | ||
819 | |||
820 | if(tsk_rt(hp)->inh_task) { | ||
821 | hp_eff = tsk_rt(hp)->inh_task; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | node_prio = nest->hp_waiter_eff_prio; | ||
826 | |||
827 | TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", | ||
828 | padding, | ||
829 | (node_prio) ? node_prio->comm : "nil", | ||
830 | (node_prio) ? node_prio->pid : -1, | ||
831 | (hp) ? hp->comm : "nil", | ||
832 | (hp) ? hp->pid : -1, | ||
833 | (hp_eff) ? hp_eff->comm : "nil", | ||
834 | (hp_eff) ? hp_eff->pid : -1, | ||
835 | l->ident); | ||
836 | |||
837 | if(n->left) print_hp_waiters(n->left, depth+1); | ||
838 | if(n->right) print_hp_waiters(n->right, depth+1); | ||
839 | } | ||
840 | |||
841 | void dump_node_data(struct binheap_node* parent, struct binheap_node* child) | ||
842 | { | ||
843 | struct binheap_node *root = (parent != BINHEAP_POISON) ? parent : child; | ||
844 | struct binheap_node *bad_node = (parent == BINHEAP_POISON) ? parent : child; | ||
845 | struct nested_info *nest; | ||
846 | |||
847 | while(root->parent != NULL) { | ||
848 | root = root->parent; | ||
849 | } | ||
850 | |||
851 | if(parent == BINHEAP_POISON) { | ||
852 | TRACE_CUR("parent was bad node.\n"); | ||
853 | } | ||
854 | else { | ||
855 | TRACE_CUR("child was bad node.\n"); | ||
856 | } | ||
857 | TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); | ||
858 | |||
859 | nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); | ||
860 | TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); | ||
861 | |||
862 | print_hp_waiters(root, 1); | ||
863 | } | ||
864 | |||
865 | void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node) | ||
866 | { | ||
867 | struct binheap_node *root = handle->root; | ||
868 | struct nested_info *nest; | ||
869 | |||
870 | TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); | ||
871 | |||
872 | nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); | ||
873 | TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); | ||
874 | |||
875 | print_hp_waiters(root, 1); | ||
876 | } | ||
877 | |||
878 | |||
879 | /* called with IRQs off */ | 820 | /* called with IRQs off */ |
880 | /* preconditions: | 821 | /* preconditions: |
881 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | 822 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. |
882 | (2) The lock 'to_unlock' is held. | 823 | (2) The lock 'to_unlock' is held. |
883 | */ | 824 | */ |
884 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | 825 | static void nested_increase_priority_inheritance(struct task_struct* t, |
826 | struct task_struct* prio_inh, | ||
827 | raw_spinlock_t *to_unlock, | ||
828 | unsigned long irqflags) | ||
885 | { | 829 | { |
886 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | 830 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
887 | 831 | ||
@@ -890,17 +834,21 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t | |||
890 | } | 834 | } |
891 | 835 | ||
892 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 836 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
893 | 837 | ||
894 | 838 | ||
895 | if(blocked_lock) { | 839 | if(blocked_lock) { |
896 | if(blocked_lock->ops->propagate_increase_inheritance) { | 840 | if(blocked_lock->ops->propagate_increase_inheritance) { |
897 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); | 841 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", |
898 | 842 | blocked_lock->ident); | |
899 | // beware: recursion | 843 | |
900 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, t, to_unlock, irqflags); | 844 | // beware: recursion |
845 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, | ||
846 | t, to_unlock, | ||
847 | irqflags); | ||
901 | } | 848 | } |
902 | else { | 849 | else { |
903 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); | 850 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", |
851 | blocked_lock->ident); | ||
904 | unlock_fine_irqrestore(to_unlock, irqflags); | 852 | unlock_fine_irqrestore(to_unlock, irqflags); |
905 | } | 853 | } |
906 | } | 854 | } |
@@ -915,22 +863,29 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t | |||
915 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | 863 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. |
916 | (2) The lock 'to_unlock' is held. | 864 | (2) The lock 'to_unlock' is held. |
917 | */ | 865 | */ |
918 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | 866 | static void nested_decrease_priority_inheritance(struct task_struct* t, |
867 | struct task_struct* prio_inh, | ||
868 | raw_spinlock_t *to_unlock, | ||
869 | unsigned long irqflags) | ||
919 | { | 870 | { |
920 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | 871 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
921 | decrease_priority_inheritance(t, prio_inh); | 872 | decrease_priority_inheritance(t, prio_inh); |
922 | 873 | ||
923 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 874 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
924 | 875 | ||
925 | if(blocked_lock) { | 876 | if(blocked_lock) { |
926 | if(blocked_lock->ops->propagate_decrease_inheritance) { | 877 | if(blocked_lock->ops->propagate_decrease_inheritance) { |
927 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); | 878 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", |
928 | 879 | blocked_lock->ident); | |
880 | |||
929 | // beware: recursion | 881 | // beware: recursion |
930 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, to_unlock, irqflags); | 882 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, |
931 | } | 883 | to_unlock, |
884 | irqflags); | ||
885 | } | ||
932 | else { | 886 | else { |
933 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); | 887 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", |
888 | blocked_lock); | ||
934 | unlock_fine_irqrestore(to_unlock, irqflags); | 889 | unlock_fine_irqrestore(to_unlock, irqflags); |
935 | } | 890 | } |
936 | } | 891 | } |
@@ -943,2440 +898,46 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, struct t | |||
943 | 898 | ||
944 | /* ******************** RSM MUTEX ********************** */ | 899 | /* ******************** RSM MUTEX ********************** */ |
945 | 900 | ||
946 | /* struct for semaphore with priority inheritance */ | 901 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { |
947 | struct rsm_mutex { | 902 | .lock = rsm_mutex_lock, |
948 | struct litmus_lock litmus_lock; | 903 | .unlock = rsm_mutex_unlock, |
949 | 904 | .close = rsm_mutex_close, | |
950 | /* current resource holder */ | 905 | .deallocate = rsm_mutex_free, |
951 | struct task_struct *owner; | ||
952 | |||
953 | /* highest-priority waiter */ | ||
954 | struct task_struct *hp_waiter; | ||
955 | |||
956 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | ||
957 | wait_queue_head_t wait; | ||
958 | |||
959 | /* we do some nesting within spinlocks, so we can't use the normal | ||
960 | sleeplocks found in wait_queue_head_t. */ | ||
961 | raw_spinlock_t lock; | ||
962 | }; | ||
963 | |||
964 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | ||
965 | { | ||
966 | return container_of(lock, struct rsm_mutex, litmus_lock); | ||
967 | } | ||
968 | |||
969 | /* caller is responsible for locking */ | ||
970 | struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | ||
971 | struct task_struct* skip) | ||
972 | { | ||
973 | wait_queue_t *q; | ||
974 | struct list_head *pos; | ||
975 | struct task_struct *queued = NULL, *found = NULL; | ||
976 | |||
977 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
978 | dgl_wait_state_t *dgl_wait = NULL; | ||
979 | #endif | ||
980 | |||
981 | list_for_each(pos, &mutex->wait.task_list) { | ||
982 | q = list_entry(pos, wait_queue_t, task_list); | ||
983 | |||
984 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
985 | if(q->func == dgl_wake_up) { | ||
986 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
987 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | ||
988 | queued = dgl_wait->task; | ||
989 | } | ||
990 | else { | ||
991 | queued = NULL; // skip it. | ||
992 | } | ||
993 | } | ||
994 | else { | ||
995 | queued = (struct task_struct*) q->private; | ||
996 | } | ||
997 | #else | ||
998 | queued = (struct task_struct*) q->private; | ||
999 | #endif | ||
1000 | |||
1001 | /* Compare task prios, find high prio task. */ | ||
1002 | if (queued && queued != skip && edf_higher_prio(queued, found)) { | ||
1003 | found = queued; | ||
1004 | } | ||
1005 | } | ||
1006 | return found; | ||
1007 | } | ||
1008 | |||
1009 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | ||
1010 | if(!binheap_empty(handle)) { | ||
1011 | return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); | ||
1012 | } | ||
1013 | return NULL; | ||
1014 | } | ||
1015 | |||
1016 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1017 | //static void gsnedf_rsm_mutex_reserve(struct litmus_lock *l, unsigned long *irqflags) | ||
1018 | //{ | ||
1019 | // struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1020 | // raw_spin_lock_irqsave(&mutex->lock, *irqflags); | ||
1021 | //} | ||
1022 | // | ||
1023 | //static void gsnedf_rsm_mutex_unreserve(struct litmus_lock *l, unsigned long irqflags) | ||
1024 | //{ | ||
1025 | // struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1026 | // raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1027 | //} | ||
1028 | |||
1029 | static raw_spinlock_t* gsn_edf_get_dgl_spinlock(struct task_struct *t) | ||
1030 | { | ||
1031 | return(&dgl_lock); | ||
1032 | } | ||
1033 | |||
1034 | static int gsn_edf_rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | ||
1035 | { | ||
1036 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1037 | return(mutex->owner == t); | ||
1038 | } | ||
1039 | |||
1040 | |||
1041 | // return 1 if resource was immediatly acquired. | ||
1042 | // Assumes mutex->lock is held. | ||
1043 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
1044 | static int gsn_edf_rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node) | ||
1045 | { | ||
1046 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1047 | struct task_struct *t = dgl_wait->task; | ||
1048 | |||
1049 | int acquired_immediatly = 0; | ||
1050 | |||
1051 | BUG_ON(t != current); | ||
1052 | |||
1053 | if (mutex->owner) { | ||
1054 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | ||
1055 | |||
1056 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
1057 | |||
1058 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1059 | __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); | ||
1060 | } else { | ||
1061 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1062 | |||
1063 | /* it's ours now */ | ||
1064 | mutex->owner = t; | ||
1065 | |||
1066 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1067 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1068 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1069 | |||
1070 | acquired_immediatly = 1; | ||
1071 | } | ||
1072 | |||
1073 | return acquired_immediatly; | ||
1074 | } | ||
1075 | |||
1076 | // Assumes mutex->lock is held. | ||
1077 | static void gsn_edf_rsm_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait) | ||
1078 | { | ||
1079 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1080 | struct task_struct *t = dgl_wait->task; | ||
1081 | struct task_struct *owner = mutex->owner; | ||
1082 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | ||
1083 | |||
1084 | BUG_ON(owner == t); | ||
1085 | |||
1086 | tsk_rt(t)->blocked_lock = l; | ||
1087 | mb(); | ||
1088 | |||
1089 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
1090 | |||
1091 | struct task_struct *old_max_eff_prio; | ||
1092 | struct task_struct *new_max_eff_prio; | ||
1093 | struct task_struct *new_prio = NULL; | ||
1094 | |||
1095 | if(mutex->hp_waiter) | ||
1096 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
1097 | else | ||
1098 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
1099 | |||
1100 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1101 | |||
1102 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
1103 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1104 | |||
1105 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1106 | |||
1107 | mutex->hp_waiter = t; | ||
1108 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1109 | |||
1110 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1111 | |||
1112 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1113 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1114 | |||
1115 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1116 | |||
1117 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1118 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1119 | |||
1120 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1121 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
1122 | new_prio = new_max_eff_prio; | ||
1123 | } | ||
1124 | } | ||
1125 | else { | ||
1126 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1127 | } | ||
1128 | |||
1129 | //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1130 | |||
1131 | if(new_prio) { | ||
1132 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
1133 | } | ||
1134 | else { | ||
1135 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1136 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1137 | } | ||
1138 | } | ||
1139 | else { | ||
1140 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1141 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1142 | } | ||
1143 | } | ||
1144 | #endif | ||
1145 | |||
1146 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | ||
1147 | { | ||
1148 | struct task_struct *t = current; | ||
1149 | struct task_struct *owner; | ||
1150 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1151 | wait_queue_t wait; | ||
1152 | unsigned long flags; | ||
1153 | |||
1154 | if (!is_realtime(t)) | ||
1155 | return -EPERM; | ||
1156 | |||
1157 | |||
1158 | lock_global_irqsave(&dgl_lock, flags); | ||
1159 | lock_fine_irqsave(&mutex->lock, flags); | ||
1160 | |||
1161 | if (mutex->owner) { | ||
1162 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
1163 | |||
1164 | /* resource is not free => must suspend and wait */ | ||
1165 | |||
1166 | owner = mutex->owner; | ||
1167 | |||
1168 | init_waitqueue_entry(&wait, t); | ||
1169 | |||
1170 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | ||
1171 | mb(); // needed? | ||
1172 | |||
1173 | /* FIXME: interruptible would be nice some day */ | ||
1174 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1175 | |||
1176 | __add_wait_queue_tail_exclusive(&mutex->wait, &wait); | ||
1177 | |||
1178 | /* check if we need to activate priority inheritance */ | ||
1179 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
1180 | |||
1181 | struct task_struct *old_max_eff_prio; | ||
1182 | struct task_struct *new_max_eff_prio; | ||
1183 | struct task_struct *new_prio = NULL; | ||
1184 | |||
1185 | if(mutex->hp_waiter) | ||
1186 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
1187 | else | ||
1188 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
1189 | |||
1190 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1191 | |||
1192 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
1193 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1194 | |||
1195 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1196 | |||
1197 | mutex->hp_waiter = t; | ||
1198 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1199 | |||
1200 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1201 | |||
1202 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1203 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1204 | |||
1205 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1206 | |||
1207 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1208 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1209 | |||
1210 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1211 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
1212 | new_prio = new_max_eff_prio; | ||
1213 | } | ||
1214 | } | ||
1215 | else { | ||
1216 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1217 | } | ||
1218 | |||
1219 | if(new_prio) { | ||
1220 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
1221 | } | ||
1222 | else { | ||
1223 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1224 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1225 | } | ||
1226 | } | ||
1227 | else { | ||
1228 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1229 | |||
1230 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1231 | } | ||
1232 | |||
1233 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1234 | |||
1235 | TS_LOCK_SUSPEND; | ||
1236 | |||
1237 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1238 | * when we wake up; we are guaranteed to have the lock since | ||
1239 | * there is only one wake up per release. | ||
1240 | */ | ||
1241 | |||
1242 | schedule(); | ||
1243 | |||
1244 | TS_LOCK_RESUME; | ||
1245 | |||
1246 | /* Since we hold the lock, no other task will change | ||
1247 | * ->owner. We can thus check it without acquiring the spin | ||
1248 | * lock. */ | ||
1249 | BUG_ON(mutex->owner != t); | ||
1250 | |||
1251 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
1252 | |||
1253 | } else { | ||
1254 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1255 | |||
1256 | /* it's ours now */ | ||
1257 | mutex->owner = t; | ||
1258 | |||
1259 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
1260 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1261 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
1262 | |||
1263 | |||
1264 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1265 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1266 | } | ||
1267 | |||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | |||
1272 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1273 | void select_next_lock_if_primary(struct litmus_lock *l, dgl_wait_state_t *dgl_wait) | ||
1274 | { | ||
1275 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | ||
1276 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
1277 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | ||
1278 | mb(); | ||
1279 | select_next_lock(dgl_wait, l); // pick the next lock to be blocked on | ||
1280 | } | ||
1281 | else { | ||
1282 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
1283 | } | ||
1284 | } | ||
1285 | #endif | ||
1286 | |||
1287 | |||
1288 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | ||
1289 | { | ||
1290 | struct task_struct *t = current, *next = NULL; | ||
1291 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1292 | unsigned long flags; | ||
1293 | |||
1294 | struct task_struct *old_max_eff_prio; | ||
1295 | |||
1296 | int wake_up_task = 1; | ||
1297 | |||
1298 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1299 | dgl_wait_state_t *dgl_wait = NULL; | ||
1300 | #endif | ||
1301 | |||
1302 | int err = 0; | ||
1303 | |||
1304 | lock_global_irqsave(&dgl_lock, flags); | ||
1305 | lock_fine_irqsave(&mutex->lock, flags); | ||
1306 | |||
1307 | |||
1308 | if (mutex->owner != t) { | ||
1309 | err = -EINVAL; | ||
1310 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1311 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1312 | return err; | ||
1313 | } | ||
1314 | |||
1315 | |||
1316 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1317 | |||
1318 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
1319 | //TRACE_TASK(t, "Heap Before:\n"); | ||
1320 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1321 | |||
1322 | //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node)->hp_waiter_ptr); | ||
1323 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1324 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
1325 | |||
1326 | //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1327 | |||
1328 | //TRACE_TASK(t, "Heap After:\n"); | ||
1329 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1330 | |||
1331 | if(tsk_rt(t)->inh_task){ | ||
1332 | struct task_struct *new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1333 | |||
1334 | if((new_max_eff_prio == NULL) || | ||
1335 | ( (new_max_eff_prio != old_max_eff_prio) /* there was a change in eff prio */ && | ||
1336 | (effective_priority(t) == old_max_eff_prio) /* and owner had the old eff prio */) ) | ||
1337 | { | ||
1338 | // old_max_eff_prio > new_max_eff_prio | ||
1339 | |||
1340 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { | ||
1341 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
1342 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
1343 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1344 | WARN_ON(1); | ||
1345 | } | ||
1346 | |||
1347 | decrease_priority_inheritance(t, new_max_eff_prio); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && tsk_rt(t)->inh_task != NULL) | ||
1352 | { | ||
1353 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
1354 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
1355 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1356 | } | ||
1357 | |||
1358 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1359 | |||
1360 | |||
1361 | /* check if there are jobs waiting for this resource */ | ||
1362 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1363 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | ||
1364 | if(dgl_wait) { | ||
1365 | next = dgl_wait->task; | ||
1366 | //select_next_lock_if_primary(l, dgl_wait); | ||
1367 | } | ||
1368 | #else | ||
1369 | next = __waitqueue_remove_first(&mutex->wait); | ||
1370 | #endif | ||
1371 | if (next) { | ||
1372 | /* next becomes the resouce holder */ | ||
1373 | mutex->owner = next; | ||
1374 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
1375 | |||
1376 | // if(tsk_rt(next)->blocked_lock == &mutex->litmus_lock) { // might be false for DGL. | ||
1377 | // tsk_rt(next)->blocked_lock = NULL; | ||
1378 | // mb(); | ||
1379 | // } | ||
1380 | |||
1381 | /* determine new hp_waiter if necessary */ | ||
1382 | if (next == mutex->hp_waiter) { | ||
1383 | |||
1384 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1385 | /* next has the highest priority --- it doesn't need to | ||
1386 | * inherit. However, we need to make sure that the | ||
1387 | * next-highest priority in the queue is reflected in | ||
1388 | * hp_waiter. */ | ||
1389 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | ||
1390 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1391 | |||
1392 | if (mutex->hp_waiter) | ||
1393 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
1394 | else | ||
1395 | TRACE("no further waiters\n"); | ||
1396 | |||
1397 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1398 | |||
1399 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1400 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1401 | |||
1402 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1403 | |||
1404 | //TRACE_TASK(next, "Heap After:\n"); | ||
1405 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1406 | |||
1407 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1408 | if(dgl_wait) { | ||
1409 | select_next_lock_if_primary(l, dgl_wait); | ||
1410 | //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
1411 | --(dgl_wait->nr_remaining); | ||
1412 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
1413 | } | ||
1414 | #endif | ||
1415 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1416 | } | ||
1417 | else { | ||
1418 | /* Well, if 'next' is not the highest-priority waiter, | ||
1419 | * then it (probably) ought to inherit the highest-priority | ||
1420 | * waiter's priority. */ | ||
1421 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); | ||
1422 | |||
1423 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1424 | |||
1425 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1426 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1427 | |||
1428 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | ||
1429 | struct nested_info, hp_binheap_node); | ||
1430 | |||
1431 | |||
1432 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1433 | if(dgl_wait) { | ||
1434 | select_next_lock_if_primary(l, dgl_wait); | ||
1435 | // wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
1436 | --(dgl_wait->nr_remaining); | ||
1437 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
1438 | } | ||
1439 | #endif | ||
1440 | |||
1441 | //TRACE_TASK(next, "Heap After:\n"); | ||
1442 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1443 | |||
1444 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
1445 | * because that update hasn't yet executed (update operation is | ||
1446 | * probably blocked on mutex->lock). So only inherit if the top of | ||
1447 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
1448 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
1449 | * since the effective priority of hp_waiter can change (and the | ||
1450 | * update has not made it to this lock).) | ||
1451 | */ | ||
1452 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1453 | if((l->nest.hp_waiter_eff_prio != NULL) && (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) | ||
1454 | { | ||
1455 | if(dgl_wait && tsk_rt(next)->blocked_lock) { | ||
1456 | BUG_ON(wake_up_task); | ||
1457 | if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { | ||
1458 | nested_increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. | ||
1459 | goto out; // all spinlocks are released. bail out now. | ||
1460 | } | ||
1461 | } | ||
1462 | else { | ||
1463 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1467 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1468 | #else | ||
1469 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) | ||
1470 | { | ||
1471 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); | ||
1472 | } | ||
1473 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1474 | #endif | ||
1475 | } | ||
1476 | |||
1477 | if(wake_up_task) { | ||
1478 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
1479 | |||
1480 | tsk_rt(next)->blocked_lock = NULL; | ||
1481 | mb(); | ||
1482 | |||
1483 | wake_up_process(next); | ||
1484 | } | ||
1485 | else { | ||
1486 | TRACE_TASK(next, "is still blocked.\n"); | ||
1487 | } | ||
1488 | } | ||
1489 | else { | ||
1490 | /* becomes available */ | ||
1491 | mutex->owner = NULL; | ||
1492 | } | ||
1493 | |||
1494 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1495 | |||
1496 | out: | ||
1497 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1498 | |||
1499 | return err; | ||
1500 | } | ||
1501 | |||
1502 | |||
1503 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
1504 | struct task_struct* t, | ||
1505 | raw_spinlock_t* to_unlock, | ||
1506 | unsigned long irqflags) | ||
1507 | { | ||
1508 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1509 | |||
1510 | // relay-style locking | ||
1511 | lock_fine(&mutex->lock); | ||
1512 | unlock_fine(to_unlock); | ||
1513 | |||
1514 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1515 | struct task_struct *owner = mutex->owner; | ||
1516 | |||
1517 | struct task_struct *old_max_eff_prio; | ||
1518 | struct task_struct *new_max_eff_prio; | ||
1519 | |||
1520 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1521 | |||
1522 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1523 | |||
1524 | if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
1525 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
1526 | mutex->hp_waiter = t; | ||
1527 | } | ||
1528 | if(t == mutex->hp_waiter) { | ||
1529 | // reflect the decreased priority in the heap node. | ||
1530 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1531 | |||
1532 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | ||
1533 | BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks)); | ||
1534 | |||
1535 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1536 | } | ||
1537 | |||
1538 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1539 | |||
1540 | |||
1541 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1542 | // new_max_eff_prio > old_max_eff_prio holds. | ||
1543 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1544 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { | ||
1545 | |||
1546 | TRACE_CUR("Propagating inheritance to holder of lock %d.\n", l->ident); | ||
1547 | |||
1548 | // beware: recursion | ||
1549 | nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock | ||
1550 | } | ||
1551 | else { | ||
1552 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); | ||
1553 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1554 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1555 | } | ||
1556 | } | ||
1557 | else { | ||
1558 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
1559 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1560 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1561 | } | ||
1562 | } | ||
1563 | else { | ||
1564 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1565 | |||
1566 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1567 | if(still_blocked) { | ||
1568 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1569 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
1570 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1571 | we know that task 't' has not released any locks behind us in this | ||
1572 | chain. Propagation just needs to catch up with task 't'. */ | ||
1573 | still_blocked->ops->propagate_increase_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1574 | } | ||
1575 | else { | ||
1576 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1577 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1578 | } | ||
1579 | } | ||
1580 | else { | ||
1581 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1582 | } | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | |||
1587 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
1588 | struct task_struct* t, | ||
1589 | raw_spinlock_t* to_unlock, | ||
1590 | unsigned long irqflags) | ||
1591 | { | ||
1592 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1593 | |||
1594 | // relay-style locking | ||
1595 | lock_fine(&mutex->lock); | ||
1596 | unlock_fine(to_unlock); | ||
1597 | |||
1598 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1599 | if(t == mutex->hp_waiter) { | ||
1600 | struct task_struct *owner = mutex->owner; | ||
1601 | |||
1602 | struct task_struct *old_max_eff_prio; | ||
1603 | struct task_struct *new_max_eff_prio; | ||
1604 | |||
1605 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1606 | |||
1607 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1608 | |||
1609 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1610 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
1611 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1612 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1613 | |||
1614 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1615 | |||
1616 | if((old_max_eff_prio != new_max_eff_prio) && | ||
1617 | (effective_priority(owner) == old_max_eff_prio)) | ||
1618 | { | ||
1619 | // Need to set new effective_priority for owner | ||
1620 | |||
1621 | struct task_struct *decreased_prio; | ||
1622 | |||
1623 | TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", l->ident); | ||
1624 | |||
1625 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
1626 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
1627 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
1628 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
1629 | owner->comm, | ||
1630 | owner->pid, | ||
1631 | l->ident); | ||
1632 | |||
1633 | decreased_prio = new_max_eff_prio; | ||
1634 | } | ||
1635 | else { | ||
1636 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
1637 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
1638 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
1639 | owner->comm, | ||
1640 | owner->pid, | ||
1641 | l->ident); | ||
1642 | |||
1643 | decreased_prio = NULL; | ||
1644 | } | ||
1645 | |||
1646 | // beware: recursion | ||
1647 | nested_decrease_priority_inheritance(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
1648 | } | ||
1649 | else { | ||
1650 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1651 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1652 | } | ||
1653 | } | ||
1654 | else { | ||
1655 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
1656 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1657 | } | ||
1658 | } | ||
1659 | else { | ||
1660 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1661 | |||
1662 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1663 | if(still_blocked) { | ||
1664 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1665 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
1666 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1667 | we know that task 't' has not released any locks behind us in this | ||
1668 | chain. propagation just needs to catch up with task 't' */ | ||
1669 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1670 | } | ||
1671 | else { | ||
1672 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1673 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1674 | } | ||
1675 | } | ||
1676 | else { | ||
1677 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1678 | } | ||
1679 | } | ||
1680 | } | ||
1681 | |||
1682 | |||
1683 | |||
1684 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) | ||
1685 | { | ||
1686 | struct task_struct *t = current; | ||
1687 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1688 | unsigned long flags; | ||
1689 | |||
1690 | int owner; | ||
1691 | |||
1692 | |||
1693 | lock_global_irqsave(&dgl_lock, flags); | ||
1694 | lock_fine_irqsave(&mutex->lock, flags); | ||
1695 | |||
1696 | owner = (mutex->owner == t); | ||
1697 | |||
1698 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1699 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1700 | |||
1701 | if (owner) | ||
1702 | gsnedf_rsm_mutex_unlock(l); | ||
1703 | |||
1704 | return 0; | ||
1705 | } | ||
1706 | 906 | ||
1707 | void gsnedf_rsm_mutex_free(struct litmus_lock* lock) | 907 | .propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance, |
1708 | { | 908 | .propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance, |
1709 | kfree(rsm_mutex_from_lock(lock)); | ||
1710 | } | ||
1711 | 909 | ||
1712 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { | ||
1713 | .close = gsnedf_rsm_mutex_close, | ||
1714 | .lock = gsnedf_rsm_mutex_lock, | ||
1715 | .unlock = gsnedf_rsm_mutex_unlock, | ||
1716 | .deallocate = gsnedf_rsm_mutex_free, | ||
1717 | .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, | ||
1718 | .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance, | ||
1719 | |||
1720 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 910 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
1721 | // .reserve = gsnedf_rsm_mutex_reserve, | 911 | .dgl_lock = rsm_mutex_dgl_lock, |
1722 | // .unreserve = gsnedf_rsm_mutex_unreserve, | 912 | .is_owner = rsm_mutex_is_owner, |
1723 | .dgl_lock = gsn_edf_rsm_mutex_dgl_lock, | 913 | .enable_priority = rsm_mutex_enable_priority, |
1724 | .is_owner = gsn_edf_rsm_mutex_is_owner, | ||
1725 | .enable_priority = gsn_edf_rsm_enable_priority, | ||
1726 | #endif | 914 | #endif |
1727 | }; | 915 | }; |
1728 | 916 | ||
1729 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) | 917 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) |
1730 | { | 918 | { |
1731 | struct rsm_mutex* mutex; | 919 | return rsm_mutex_new(&gsnedf_rsm_mutex_lock_ops); |
1732 | |||
1733 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | ||
1734 | if (!mutex) | ||
1735 | return NULL; | ||
1736 | |||
1737 | mutex->owner = NULL; | ||
1738 | mutex->hp_waiter = NULL; | ||
1739 | init_waitqueue_head(&mutex->wait); | ||
1740 | |||
1741 | |||
1742 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
1743 | { | ||
1744 | __raw_spin_lock_init(&mutex->lock, ((struct litmus_lock*)mutex)->cheat_lockdep, &((struct litmus_lock*)mutex)->key); | ||
1745 | } | ||
1746 | #else | ||
1747 | raw_spin_lock_init(&mutex->lock); | ||
1748 | #endif | ||
1749 | |||
1750 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; | ||
1751 | |||
1752 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | ||
1753 | |||
1754 | return &mutex->litmus_lock; | ||
1755 | } | 920 | } |
1756 | 921 | ||
1757 | /* **** lock constructor **** */ | ||
1758 | |||
1759 | |||
1760 | |||
1761 | |||
1762 | |||
1763 | |||
1764 | |||
1765 | /* ******************** IKGLP ********************** */ | 922 | /* ******************** IKGLP ********************** */ |
1766 | 923 | ||
1767 | |||
1768 | typedef struct ikglp_heap_node | ||
1769 | { | ||
1770 | struct task_struct *task; | ||
1771 | struct binheap_node node; | ||
1772 | } ikglp_heap_node_t; | ||
1773 | |||
1774 | static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
1775 | { | ||
1776 | ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); | ||
1777 | ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); | ||
1778 | |||
1779 | BUG_ON(!d_a); | ||
1780 | BUG_ON(!d_b); | ||
1781 | |||
1782 | return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); | ||
1783 | } | ||
1784 | |||
1785 | static int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
1786 | { | ||
1787 | ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); | ||
1788 | ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); | ||
1789 | |||
1790 | return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); | ||
1791 | } | ||
1792 | |||
1793 | struct fifo_queue; | ||
1794 | struct ikglp_wait_state; | ||
1795 | |||
1796 | typedef struct ikglp_donee_heap_node | ||
1797 | { | ||
1798 | struct task_struct *task; | ||
1799 | struct fifo_queue *fq; | ||
1800 | struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor | ||
1801 | |||
1802 | struct binheap_node node; | ||
1803 | } ikglp_donee_heap_node_t; | ||
1804 | |||
1805 | |||
1806 | // Maintains the state of a request as it goes through the IKGLP | ||
1807 | typedef struct ikglp_wait_state { | ||
1808 | struct task_struct *task; // pointer back to the requesting task | ||
1809 | |||
1810 | // Data for while waiting in FIFO Queue | ||
1811 | wait_queue_t fq_node; | ||
1812 | ikglp_heap_node_t global_heap_node; | ||
1813 | ikglp_donee_heap_node_t donee_heap_node; | ||
1814 | |||
1815 | // Data for while waiting in PQ | ||
1816 | ikglp_heap_node_t pq_node; | ||
1817 | |||
1818 | // Data for while waiting as a donor | ||
1819 | ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t | ||
1820 | struct nested_info prio_donation; | ||
1821 | struct binheap_node node; | ||
1822 | } ikglp_wait_state_t; | ||
1823 | |||
1824 | |||
1825 | static int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
1826 | { | ||
1827 | ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); | ||
1828 | ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); | ||
1829 | |||
1830 | return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); | ||
1831 | } | ||
1832 | |||
1833 | |||
1834 | static int ikglp_edf_min_heap_donee_order(struct binheap_node *a, struct binheap_node *b) | ||
1835 | { | ||
1836 | struct task_struct *prio_a, *prio_b; | ||
1837 | |||
1838 | ikglp_donee_heap_node_t *d_a = binheap_entry(a, ikglp_donee_heap_node_t, node); | ||
1839 | ikglp_donee_heap_node_t *d_b = binheap_entry(b, ikglp_donee_heap_node_t, node); | ||
1840 | |||
1841 | if(!d_a->donor_info) { | ||
1842 | prio_a = d_a->task; | ||
1843 | } | ||
1844 | else { | ||
1845 | prio_a = d_a->donor_info->task; | ||
1846 | BUG_ON(d_a->task != d_a->donor_info->donee_info->task); | ||
1847 | } | ||
1848 | |||
1849 | if(!d_b->donor_info) { | ||
1850 | prio_b = d_b->task; | ||
1851 | } | ||
1852 | else { | ||
1853 | prio_b = d_b->donor_info->task; | ||
1854 | BUG_ON(d_b->task != d_b->donor_info->donee_info->task); | ||
1855 | } | ||
1856 | |||
1857 | // note reversed order | ||
1858 | return __edf_higher_prio(prio_b, BASE, prio_a, BASE); | ||
1859 | } | ||
1860 | |||
1861 | |||
1862 | /* struct for semaphore with priority inheritance */ | ||
1863 | struct fifo_queue | ||
1864 | { | ||
1865 | wait_queue_head_t wait; | ||
1866 | struct task_struct* owner; | ||
1867 | |||
1868 | // used for bookkeepping | ||
1869 | ikglp_heap_node_t global_heap_node; | ||
1870 | ikglp_donee_heap_node_t donee_heap_node; | ||
1871 | |||
1872 | struct task_struct* hp_waiter; | ||
1873 | int count; /* number of waiters + holder */ | ||
1874 | |||
1875 | struct nested_info nest; | ||
1876 | }; | ||
1877 | |||
1878 | |||
1879 | struct ikglp_semaphore | ||
1880 | { | ||
1881 | struct litmus_lock litmus_lock; | ||
1882 | |||
1883 | raw_spinlock_t lock; | ||
1884 | raw_spinlock_t real_lock; | ||
1885 | |||
1886 | int nr_replicas; // AKA k | ||
1887 | int m; | ||
1888 | |||
1889 | int max_fifo_len; // max len of a fifo queue | ||
1890 | |||
1891 | struct binheap_handle top_m; // min heap, base prio | ||
1892 | int top_m_size; // number of nodes in top_m | ||
1893 | |||
1894 | struct binheap_handle not_top_m; // max heap, base prio | ||
1895 | |||
1896 | struct binheap_handle donees; // min-heap, base prio | ||
1897 | struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue | ||
1898 | |||
1899 | /* data structures for holding requests */ | ||
1900 | struct fifo_queue *fifo_queues; // array nr_replicas in length | ||
1901 | struct binheap_handle priority_queue; // max-heap, base prio | ||
1902 | struct binheap_handle donors; // max-heap, base prio | ||
1903 | }; | ||
1904 | |||
1905 | static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) | ||
1906 | { | ||
1907 | return container_of(lock, struct ikglp_semaphore, litmus_lock); | ||
1908 | } | ||
1909 | |||
1910 | |||
1911 | static inline int ikglp_get_idx(struct ikglp_semaphore *sem, | ||
1912 | struct fifo_queue *queue) | ||
1913 | { | ||
1914 | return (queue - &sem->fifo_queues[0]); | ||
1915 | } | ||
1916 | |||
1917 | static inline struct fifo_queue* ikglp_get_queue( | ||
1918 | struct ikglp_semaphore *sem, | ||
1919 | struct task_struct *holder) | ||
1920 | { | ||
1921 | int i; | ||
1922 | for(i = 0; i < sem->nr_replicas; ++i) | ||
1923 | if(sem->fifo_queues[i].owner == holder) | ||
1924 | return(&sem->fifo_queues[i]); | ||
1925 | return(NULL); | ||
1926 | } | ||
1927 | |||
1928 | static struct task_struct* ikglp_find_hp_waiter( | ||
1929 | struct fifo_queue *kqueue, | ||
1930 | struct task_struct *skip) | ||
1931 | { | ||
1932 | struct list_head *pos; | ||
1933 | struct task_struct *queued, *found = NULL; | ||
1934 | |||
1935 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1936 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, task_list)->private; | ||
1937 | |||
1938 | /* Compare task prios, find high prio task. */ | ||
1939 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1940 | found = queued; | ||
1941 | } | ||
1942 | return found; | ||
1943 | } | ||
1944 | |||
1945 | static struct fifo_queue* ikglp_find_shortest( | ||
1946 | struct ikglp_semaphore *sem, | ||
1947 | struct fifo_queue *search_start) | ||
1948 | { | ||
1949 | // we start our search at search_start instead of at the beginning of the | ||
1950 | // queue list to load-balance across all resources. | ||
1951 | struct fifo_queue* step = search_start; | ||
1952 | struct fifo_queue* shortest = sem->shortest_fifo_queue; | ||
1953 | |||
1954 | do { | ||
1955 | step = (step+1 != &sem->fifo_queues[sem->nr_replicas]) ? | ||
1956 | step+1 : &sem->fifo_queues[0]; | ||
1957 | |||
1958 | if(step->count < shortest->count) { | ||
1959 | shortest = step; | ||
1960 | if(step->count == 0) | ||
1961 | break; /* can't get any shorter */ | ||
1962 | } | ||
1963 | |||
1964 | }while(step != search_start); | ||
1965 | |||
1966 | return(shortest); | ||
1967 | } | ||
1968 | |||
1969 | static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) | ||
1970 | { | ||
1971 | return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task; | ||
1972 | } | ||
1973 | |||
1974 | void print_global_list(struct binheap_node* n, int depth) | ||
1975 | { | ||
1976 | ikglp_heap_node_t *global_heap_node; | ||
1977 | char padding[81] = " "; | ||
1978 | |||
1979 | if(n == NULL) { | ||
1980 | TRACE_CUR("+-> %p\n", NULL); | ||
1981 | return; | ||
1982 | } | ||
1983 | |||
1984 | global_heap_node = binheap_entry(n, ikglp_heap_node_t, node); | ||
1985 | |||
1986 | if(depth*2 <= 80) | ||
1987 | padding[depth*2] = '\0'; | ||
1988 | |||
1989 | TRACE_CUR("%s+-> %s/%d\n", | ||
1990 | padding, | ||
1991 | global_heap_node->task->comm, | ||
1992 | global_heap_node->task->pid); | ||
1993 | |||
1994 | if(n->left) print_global_list(n->left, depth+1); | ||
1995 | if(n->right) print_global_list(n->right, depth+1); | ||
1996 | } | ||
1997 | |||
1998 | |||
1999 | static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node) | ||
2000 | { | ||
2001 | |||
2002 | |||
2003 | node->task = t; | ||
2004 | INIT_BINHEAP_NODE(&node->node); | ||
2005 | |||
2006 | if(sem->top_m_size < sem->m) { | ||
2007 | TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", t->comm, t->pid); | ||
2008 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
2009 | // print_global_list(sem->top_m.root, 1); | ||
2010 | |||
2011 | binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); | ||
2012 | ++(sem->top_m_size); | ||
2013 | |||
2014 | TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
2015 | print_global_list(sem->top_m.root, 1); | ||
2016 | } | ||
2017 | else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { | ||
2018 | ikglp_heap_node_t *evicted = binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); | ||
2019 | |||
2020 | TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", | ||
2021 | t->comm, t->pid, | ||
2022 | evicted->task->comm, evicted->task->pid); | ||
2023 | |||
2024 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2025 | // print_global_list(sem->not_top_m.root, 1); | ||
2026 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
2027 | // print_global_list(sem->top_m.root, 1); | ||
2028 | |||
2029 | |||
2030 | binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); | ||
2031 | INIT_BINHEAP_NODE(&evicted->node); | ||
2032 | binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node); | ||
2033 | |||
2034 | binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); | ||
2035 | |||
2036 | TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
2037 | print_global_list(sem->top_m.root, 1); | ||
2038 | TRACE_CUR("Not-Top-M After:\n"); | ||
2039 | print_global_list(sem->not_top_m.root, 1); | ||
2040 | } | ||
2041 | else { | ||
2042 | TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", t->comm, t->pid); | ||
2043 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2044 | // print_global_list(sem->not_top_m.root, 1); | ||
2045 | |||
2046 | binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); | ||
2047 | |||
2048 | TRACE_CUR("Not-Top-M After:\n"); | ||
2049 | print_global_list(sem->not_top_m.root, 1); | ||
2050 | } | ||
2051 | } | ||
2052 | |||
2053 | |||
2054 | static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node) | ||
2055 | { | ||
2056 | BUG_ON(!binheap_is_in_heap(&node->node)); | ||
2057 | |||
2058 | TRACE_CUR("Removing %s/%d from global list.\n", t->comm, t->pid); | ||
2059 | |||
2060 | if(binheap_is_in_this_heap(&node->node, &sem->top_m)) { | ||
2061 | TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid); | ||
2062 | |||
2063 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2064 | // print_global_list(sem->not_top_m.root, 1); | ||
2065 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
2066 | // print_global_list(sem->top_m.root, 1); | ||
2067 | |||
2068 | |||
2069 | binheap_delete(&node->node, &sem->top_m); | ||
2070 | |||
2071 | if(!binheap_empty(&sem->not_top_m)) { | ||
2072 | ikglp_heap_node_t *promoted = binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); | ||
2073 | |||
2074 | TRACE_CUR("Promoting %s/%d to top-m\n", promoted->task->comm, promoted->task->pid); | ||
2075 | |||
2076 | binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); | ||
2077 | INIT_BINHEAP_NODE(&promoted->node); | ||
2078 | |||
2079 | binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node); | ||
2080 | } | ||
2081 | else { | ||
2082 | TRACE_CUR("No one to promote to top-m.\n"); | ||
2083 | --(sem->top_m_size); | ||
2084 | } | ||
2085 | |||
2086 | TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
2087 | print_global_list(sem->top_m.root, 1); | ||
2088 | TRACE_CUR("Not-Top-M After:\n"); | ||
2089 | print_global_list(sem->not_top_m.root, 1); | ||
2090 | } | ||
2091 | else { | ||
2092 | // TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); | ||
2093 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2094 | // print_global_list(sem->not_top_m.root, 1); | ||
2095 | |||
2096 | binheap_delete(&node->node, &sem->not_top_m); | ||
2097 | |||
2098 | TRACE_CUR("Not-Top-M After:\n"); | ||
2099 | print_global_list(sem->not_top_m.root, 1); | ||
2100 | } | ||
2101 | } | ||
2102 | |||
2103 | |||
2104 | void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth) | ||
2105 | { | ||
2106 | ikglp_donee_heap_node_t *donee_node; | ||
2107 | char padding[81] = " "; | ||
2108 | struct task_struct* donor = NULL; | ||
2109 | |||
2110 | if(n == NULL) { | ||
2111 | TRACE_CUR("+-> %p\n", NULL); | ||
2112 | return; | ||
2113 | } | ||
2114 | |||
2115 | donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node); | ||
2116 | |||
2117 | if(depth*2 <= 80) | ||
2118 | padding[depth*2] = '\0'; | ||
2119 | |||
2120 | if(donee_node->donor_info) { | ||
2121 | donor = donee_node->donor_info->task; | ||
2122 | } | ||
2123 | |||
2124 | TRACE_CUR("%s+-> %s/%d (d: %s/%d) (fq: %d)\n", | ||
2125 | padding, | ||
2126 | donee_node->task->comm, | ||
2127 | donee_node->task->pid, | ||
2128 | (donor) ? donor->comm : "nil", | ||
2129 | (donor) ? donor->pid : -1, | ||
2130 | ikglp_get_idx(sem, donee_node->fq)); | ||
2131 | |||
2132 | if(n->left) print_donees(sem, n->left, depth+1); | ||
2133 | if(n->right) print_donees(sem, n->right, depth+1); | ||
2134 | } | ||
2135 | |||
2136 | |||
2137 | static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq, struct task_struct *t, ikglp_donee_heap_node_t* node) | ||
2138 | { | ||
2139 | // TRACE_CUR("Adding %s/%d to donee list.\n", t->comm, t->pid); | ||
2140 | // TRACE_CUR("donees Before:\n"); | ||
2141 | // print_donees(sem, sem->donees.root, 1); | ||
2142 | |||
2143 | node->task = t; | ||
2144 | node->donor_info = NULL; | ||
2145 | node->fq = fq; | ||
2146 | INIT_BINHEAP_NODE(&node->node); | ||
2147 | |||
2148 | binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); | ||
2149 | |||
2150 | TRACE_CUR("donees After:\n"); | ||
2151 | print_donees(sem, sem->donees.root, 1); | ||
2152 | } | ||
2153 | |||
2154 | |||
2155 | static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2156 | { | ||
2157 | // priority of 't' has increased (note: 't' might already be hp_waiter). | ||
2158 | if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { | ||
2159 | struct task_struct *old_max_eff_prio; | ||
2160 | struct task_struct *new_max_eff_prio; | ||
2161 | struct task_struct *new_prio = NULL; | ||
2162 | struct task_struct *owner = fq->owner; | ||
2163 | |||
2164 | if(fq->hp_waiter) | ||
2165 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
2166 | else | ||
2167 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
2168 | |||
2169 | if(owner) | ||
2170 | { | ||
2171 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2172 | |||
2173 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
2174 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
2175 | |||
2176 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2177 | |||
2178 | fq->hp_waiter = t; | ||
2179 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
2180 | |||
2181 | binheap_decrease(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
2182 | |||
2183 | //TRACE_TASK(owner, "Heap After:\n"); | ||
2184 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
2185 | |||
2186 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2187 | |||
2188 | if(new_max_eff_prio != old_max_eff_prio) { | ||
2189 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
2190 | |||
2191 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
2192 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
2193 | new_prio = new_max_eff_prio; | ||
2194 | } | ||
2195 | } | ||
2196 | else { | ||
2197 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
2198 | } | ||
2199 | |||
2200 | if(new_prio) { | ||
2201 | // set new inheritance and propagate | ||
2202 | TRACE_TASK(t, "Effective priority changed for owner %s/%d to %s/%d\n", | ||
2203 | owner->comm, owner->pid, | ||
2204 | new_prio->comm, new_prio->pid); | ||
2205 | nested_increase_priority_inheritance(owner, new_prio, &sem->lock, flags); // unlocks lock. | ||
2206 | } | ||
2207 | else { | ||
2208 | TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", | ||
2209 | new_max_eff_prio->comm, new_max_eff_prio->pid); | ||
2210 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2211 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2212 | } | ||
2213 | } | ||
2214 | else { | ||
2215 | fq->hp_waiter = t; | ||
2216 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
2217 | |||
2218 | TRACE_TASK(t, "no owner??\n"); | ||
2219 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2220 | } | ||
2221 | } | ||
2222 | else { | ||
2223 | TRACE_TASK(t, "hp_waiter is unaffected.\n"); | ||
2224 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2225 | } | ||
2226 | } | ||
2227 | |||
2228 | // hp_waiter has decreased | ||
2229 | static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2230 | { | ||
2231 | struct task_struct *owner = fq->owner; | ||
2232 | |||
2233 | struct task_struct *old_max_eff_prio; | ||
2234 | struct task_struct *new_max_eff_prio; | ||
2235 | |||
2236 | if(!owner) { | ||
2237 | TRACE_CUR("No owner. Returning.\n"); | ||
2238 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2239 | return; | ||
2240 | } | ||
2241 | |||
2242 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2243 | |||
2244 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2245 | |||
2246 | binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
2247 | fq->nest.hp_waiter_eff_prio = fq->hp_waiter; | ||
2248 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
2249 | |||
2250 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2251 | |||
2252 | if((old_max_eff_prio != new_max_eff_prio) && | ||
2253 | (effective_priority(owner) == old_max_eff_prio)) | ||
2254 | { | ||
2255 | // Need to set new effective_priority for owner | ||
2256 | struct task_struct *decreased_prio; | ||
2257 | |||
2258 | TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2259 | |||
2260 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
2261 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n", | ||
2262 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
2263 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
2264 | owner->comm, | ||
2265 | owner->pid, | ||
2266 | ikglp_get_idx(sem, fq)); | ||
2267 | |||
2268 | decreased_prio = new_max_eff_prio; | ||
2269 | } | ||
2270 | else { | ||
2271 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of fq %d.\n", | ||
2272 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
2273 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
2274 | owner->comm, | ||
2275 | owner->pid, | ||
2276 | ikglp_get_idx(sem, fq)); | ||
2277 | |||
2278 | decreased_prio = NULL; | ||
2279 | } | ||
2280 | |||
2281 | // beware: recursion | ||
2282 | nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | ||
2283 | } | ||
2284 | else { | ||
2285 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | ||
2286 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2287 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2288 | } | ||
2289 | } | ||
2290 | |||
2291 | |||
2292 | static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2293 | { | ||
2294 | struct task_struct *owner = fq->owner; | ||
2295 | |||
2296 | struct task_struct *old_max_eff_prio; | ||
2297 | struct task_struct *new_max_eff_prio; | ||
2298 | |||
2299 | BUG_ON(!owner); | ||
2300 | |||
2301 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2302 | |||
2303 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2304 | |||
2305 | binheap_delete(n, &tsk_rt(owner)->hp_blocked_tasks); | ||
2306 | |||
2307 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2308 | |||
2309 | if((old_max_eff_prio != new_max_eff_prio) && | ||
2310 | (effective_priority(owner) == old_max_eff_prio)) | ||
2311 | { | ||
2312 | // Need to set new effective_priority for owner | ||
2313 | struct task_struct *decreased_prio; | ||
2314 | |||
2315 | TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2316 | |||
2317 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
2318 | TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2319 | decreased_prio = new_max_eff_prio; | ||
2320 | } | ||
2321 | else { | ||
2322 | TRACE_CUR("has lesser base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2323 | decreased_prio = NULL; | ||
2324 | } | ||
2325 | |||
2326 | // beware: recursion | ||
2327 | nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | ||
2328 | } | ||
2329 | else { | ||
2330 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | ||
2331 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2332 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2333 | } | ||
2334 | } | ||
2335 | |||
2336 | static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, struct binheap_node *n) | ||
2337 | { | ||
2338 | struct task_struct *old_max_eff_prio; | ||
2339 | struct task_struct *new_max_eff_prio; | ||
2340 | |||
2341 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2342 | |||
2343 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
2344 | |||
2345 | binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks); | ||
2346 | |||
2347 | new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
2348 | |||
2349 | if((old_max_eff_prio != new_max_eff_prio) && | ||
2350 | (effective_priority(t) == old_max_eff_prio)) | ||
2351 | { | ||
2352 | // Need to set new effective_priority for owner | ||
2353 | struct task_struct *decreased_prio; | ||
2354 | |||
2355 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { | ||
2356 | decreased_prio = new_max_eff_prio; | ||
2357 | } | ||
2358 | else { | ||
2359 | decreased_prio = NULL; | ||
2360 | } | ||
2361 | |||
2362 | tsk_rt(t)->inh_task = decreased_prio; | ||
2363 | } | ||
2364 | |||
2365 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2366 | } | ||
2367 | |||
2368 | static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2369 | { | ||
2370 | // resource available now | ||
2371 | TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq)); | ||
2372 | |||
2373 | fq->owner = t; | ||
2374 | |||
2375 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2376 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
2377 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2378 | |||
2379 | ++(fq->count); | ||
2380 | |||
2381 | ikglp_add_global_list(sem, t, &fq->global_heap_node); | ||
2382 | ikglp_add_donees(sem, fq, t, &fq->donee_heap_node); | ||
2383 | |||
2384 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); | ||
2385 | |||
2386 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2387 | } | ||
2388 | |||
2389 | |||
2390 | |||
2391 | static void __ikglp_enqueue_on_fq( | ||
2392 | struct ikglp_semaphore *sem, | ||
2393 | struct fifo_queue* fq, | ||
2394 | struct task_struct* t, | ||
2395 | wait_queue_t *wait, | ||
2396 | ikglp_heap_node_t *global_heap_node, | ||
2397 | ikglp_donee_heap_node_t *donee_heap_node) | ||
2398 | { | ||
2399 | /* resource is not free => must suspend and wait */ | ||
2400 | TRACE_TASK(t, "Enqueuing on fq %d.\n", | ||
2401 | ikglp_get_idx(sem, fq)); | ||
2402 | |||
2403 | init_waitqueue_entry(wait, t); | ||
2404 | |||
2405 | __add_wait_queue_tail_exclusive(&fq->wait, wait); | ||
2406 | |||
2407 | ++(fq->count); | ||
2408 | |||
2409 | // update global list. | ||
2410 | if(likely(global_heap_node)) { | ||
2411 | if(binheap_is_in_heap(&global_heap_node->node)) { | ||
2412 | WARN_ON(1); | ||
2413 | ikglp_del_global_list(sem, t, global_heap_node); | ||
2414 | } | ||
2415 | ikglp_add_global_list(sem, t, global_heap_node); | ||
2416 | } | ||
2417 | // update donor eligiblity list. | ||
2418 | if(likely(donee_heap_node)) { | ||
2419 | // if(binheap_is_in_heap(&donee_heap_node->node)) { | ||
2420 | // WARN_ON(1); | ||
2421 | // } | ||
2422 | ikglp_add_donees(sem, fq, t, donee_heap_node); | ||
2423 | } | ||
2424 | |||
2425 | if(likely(sem->shortest_fifo_queue == fq)) { | ||
2426 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); | ||
2427 | } | ||
2428 | |||
2429 | TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); | ||
2430 | } | ||
2431 | |||
2432 | |||
2433 | static void ikglp_enqueue_on_fq( | ||
2434 | struct ikglp_semaphore *sem, | ||
2435 | struct fifo_queue *fq, | ||
2436 | ikglp_wait_state_t *wait, | ||
2437 | unsigned long flags) | ||
2438 | { | ||
2439 | /* resource is not free => must suspend and wait */ | ||
2440 | TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend and wait.\n", | ||
2441 | ikglp_get_idx(sem, fq)); | ||
2442 | |||
2443 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
2444 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
2445 | |||
2446 | __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, &wait->global_heap_node, &wait->donee_heap_node); | ||
2447 | |||
2448 | ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock | ||
2449 | } | ||
2450 | |||
2451 | |||
2452 | static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) | ||
2453 | { | ||
2454 | TRACE_TASK(wait->task, "goes to PQ.\n"); | ||
2455 | |||
2456 | wait->pq_node.task = wait->task; // copy over task (little redundant...) | ||
2457 | |||
2458 | binheap_add(&wait->pq_node.node, &sem->priority_queue, ikglp_heap_node_t, node); | ||
2459 | } | ||
2460 | |||
2461 | static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) | ||
2462 | { | ||
2463 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
2464 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
2465 | INIT_BINHEAP_NODE(&wait->pq_node.node); | ||
2466 | |||
2467 | __ikglp_enqueue_on_pq(sem, wait); | ||
2468 | } | ||
2469 | |||
2470 | void print_donors(struct binheap_node *n, int depth) | ||
2471 | { | ||
2472 | ikglp_wait_state_t *donor_node; | ||
2473 | char padding[81] = " "; | ||
2474 | |||
2475 | if(n == NULL) { | ||
2476 | TRACE_CUR("+-> %p\n", NULL); | ||
2477 | return; | ||
2478 | } | ||
2479 | |||
2480 | donor_node = binheap_entry(n, ikglp_wait_state_t, node); | ||
2481 | |||
2482 | if(depth*2 <= 80) | ||
2483 | padding[depth*2] = '\0'; | ||
2484 | |||
2485 | |||
2486 | TRACE_CUR("%s+-> %s/%d (donee: %s/%d)\n", | ||
2487 | padding, | ||
2488 | donor_node->task->comm, | ||
2489 | donor_node->task->pid, | ||
2490 | donor_node->donee_info->task->comm, | ||
2491 | donor_node->donee_info->task->pid); | ||
2492 | |||
2493 | if(n->left) print_donors(n->left, depth+1); | ||
2494 | if(n->right) print_donors(n->right, depth+1); | ||
2495 | } | ||
2496 | |||
2497 | |||
2498 | static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state_t* wait, unsigned long flags) | ||
2499 | { | ||
2500 | struct task_struct *t = wait->task; | ||
2501 | ikglp_donee_heap_node_t *donee_node = NULL; | ||
2502 | struct task_struct *donee; | ||
2503 | |||
2504 | struct task_struct *old_max_eff_prio; | ||
2505 | struct task_struct *new_max_eff_prio; | ||
2506 | struct task_struct *new_prio = NULL; | ||
2507 | |||
2508 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
2509 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
2510 | INIT_BINHEAP_NODE(&wait->pq_node.node); | ||
2511 | INIT_BINHEAP_NODE(&wait->node); | ||
2512 | |||
2513 | // TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid); | ||
2514 | // TRACE_CUR("donors Before:\n"); | ||
2515 | // print_donors(sem->donors.root, 1); | ||
2516 | |||
2517 | // Add donor to the global list. | ||
2518 | ikglp_add_global_list(sem, t, &wait->global_heap_node); | ||
2519 | |||
2520 | // Select a donee | ||
2521 | donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | ||
2522 | donee = donee_node->task; | ||
2523 | |||
2524 | TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid); | ||
2525 | |||
2526 | TRACE_CUR("Temporarily removing %s/%d to donee list.\n", donee->comm, donee->pid); | ||
2527 | // TRACE_CUR("donees Before:\n"); | ||
2528 | // print_donees(sem, sem->donees.root, 1); | ||
2529 | |||
2530 | binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly | ||
2531 | |||
2532 | TRACE_CUR("donees After:\n"); | ||
2533 | print_donees(sem, sem->donees.root, 1); | ||
2534 | |||
2535 | |||
2536 | wait->donee_info = donee_node; | ||
2537 | |||
2538 | // Add t to donor heap. | ||
2539 | binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node); | ||
2540 | |||
2541 | // Now adjust the donee's priority. | ||
2542 | |||
2543 | // Lock the donee's inheritance heap. | ||
2544 | raw_spin_lock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
2545 | |||
2546 | old_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); | ||
2547 | |||
2548 | if(donee_node->donor_info) { | ||
2549 | // Steal donation relation. Evict old donor to PQ. | ||
2550 | |||
2551 | // Remove old donor from donor heap | ||
2552 | ikglp_wait_state_t *old_wait = donee_node->donor_info; | ||
2553 | struct task_struct *old_donor = old_wait->task; | ||
2554 | |||
2555 | TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. Moving old donor to PQ.\n", | ||
2556 | donee->comm, donee->pid, old_donor->comm, old_donor->pid); | ||
2557 | |||
2558 | binheap_delete(&old_wait->node, &sem->donors); | ||
2559 | |||
2560 | // Remove donation from donee's inheritance heap. | ||
2561 | binheap_delete(&old_wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks); | ||
2562 | // WARNING: have not updated inh_prio! | ||
2563 | |||
2564 | // Add old donor to PQ. | ||
2565 | __ikglp_enqueue_on_pq(sem, old_wait); | ||
2566 | |||
2567 | // Remove old donor from the global heap. | ||
2568 | ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node); | ||
2569 | } | ||
2570 | |||
2571 | // Add back donee's node to the donees heap with increased prio | ||
2572 | donee_node->donor_info = wait; | ||
2573 | INIT_BINHEAP_NODE(&donee_node->node); | ||
2574 | |||
2575 | |||
2576 | TRACE_CUR("Adding %s/%d back to donee list.\n", donee->comm, donee->pid); | ||
2577 | // TRACE_CUR("donees Before:\n"); | ||
2578 | print_donees(sem, sem->donees.root, 1); | ||
2579 | |||
2580 | binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); | ||
2581 | |||
2582 | TRACE_CUR("donees After:\n"); | ||
2583 | print_donees(sem, sem->donees.root, 1); | ||
2584 | |||
2585 | |||
2586 | |||
2587 | // Add an inheritance/donation to the donee's inheritance heap. | ||
2588 | wait->prio_donation.lock = (struct litmus_lock*)sem; | ||
2589 | wait->prio_donation.hp_waiter_eff_prio = t; | ||
2590 | wait->prio_donation.hp_waiter_ptr = NULL; | ||
2591 | INIT_BINHEAP_NODE(&wait->prio_donation.hp_binheap_node); | ||
2592 | |||
2593 | binheap_add(&wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
2594 | |||
2595 | new_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); | ||
2596 | |||
2597 | if(new_max_eff_prio != old_max_eff_prio) { | ||
2598 | if ((effective_priority(donee) == old_max_eff_prio) || | ||
2599 | (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ | ||
2600 | TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid); | ||
2601 | new_prio = new_max_eff_prio; | ||
2602 | } | ||
2603 | // else { | ||
2604 | // // should be bug. donor would not be in top-m. | ||
2605 | // TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid); | ||
2606 | // WARN_ON(1); | ||
2607 | // } | ||
2608 | // } | ||
2609 | // else { | ||
2610 | // // should be bug. donor would not be in top-m. | ||
2611 | // TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid); | ||
2612 | // WARN_ON(1); | ||
2613 | } | ||
2614 | |||
2615 | if(new_prio) { | ||
2616 | struct fifo_queue *donee_fq = donee_node->fq; | ||
2617 | |||
2618 | if(donee != donee_fq->owner) { | ||
2619 | TRACE_TASK(t, "%s/%d is not the owner. Propagating priority to owner %s/%d.\n", | ||
2620 | donee->comm, donee->pid, | ||
2621 | donee_fq->owner->comm, donee_fq->owner->pid); | ||
2622 | |||
2623 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
2624 | ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock | ||
2625 | } | ||
2626 | else { | ||
2627 | TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n", | ||
2628 | donee->comm, donee->pid); | ||
2629 | nested_increase_priority_inheritance(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock | ||
2630 | } | ||
2631 | } | ||
2632 | else { | ||
2633 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", | ||
2634 | new_max_eff_prio->comm, new_max_eff_prio->pid); | ||
2635 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
2636 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2637 | } | ||
2638 | |||
2639 | |||
2640 | TRACE_CUR("donors After:\n"); | ||
2641 | print_donors(sem->donors.root, 1); | ||
2642 | } | ||
2643 | |||
2644 | |||
2645 | static int gsnedf_ikglp_lock(struct litmus_lock* l) | ||
2646 | { | ||
2647 | struct task_struct* t = current; | ||
2648 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
2649 | unsigned long flags = 0, real_flags; | ||
2650 | struct fifo_queue *fq = NULL; | ||
2651 | int replica = -EINVAL; | ||
2652 | |||
2653 | ikglp_wait_state_t wait; | ||
2654 | |||
2655 | if (!is_realtime(t)) | ||
2656 | return -EPERM; | ||
2657 | |||
2658 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
2659 | |||
2660 | lock_global_irqsave(&dgl_lock, flags); | ||
2661 | lock_fine_irqsave(&sem->lock, flags); | ||
2662 | |||
2663 | if(sem->shortest_fifo_queue->count == 0) { | ||
2664 | // take available resource | ||
2665 | replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); | ||
2666 | |||
2667 | ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock | ||
2668 | |||
2669 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2670 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
2671 | } | ||
2672 | else | ||
2673 | { | ||
2674 | // we have to suspend. | ||
2675 | |||
2676 | wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! | ||
2677 | |||
2678 | tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked | ||
2679 | mb(); | ||
2680 | |||
2681 | /* FIXME: interruptible would be nice some day */ | ||
2682 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
2683 | |||
2684 | if(sem->shortest_fifo_queue->count < sem->max_fifo_len) { | ||
2685 | // enqueue on fq | ||
2686 | ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock | ||
2687 | } | ||
2688 | else { | ||
2689 | |||
2690 | TRACE_CUR("IKGLP fifo queues are full.\n"); | ||
2691 | |||
2692 | // no room in fifos. Go to PQ or donors. | ||
2693 | |||
2694 | if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { | ||
2695 | // enqueue on PQ | ||
2696 | ikglp_enqueue_on_pq(sem, &wait); | ||
2697 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2698 | } | ||
2699 | else { | ||
2700 | // enqueue as donor | ||
2701 | ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock | ||
2702 | } | ||
2703 | } | ||
2704 | |||
2705 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2706 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
2707 | |||
2708 | TS_LOCK_SUSPEND; | ||
2709 | |||
2710 | schedule(); | ||
2711 | |||
2712 | TS_LOCK_RESUME; | ||
2713 | |||
2714 | fq = ikglp_get_queue(sem, t); | ||
2715 | BUG_ON(!fq); | ||
2716 | |||
2717 | replica = ikglp_get_idx(sem, fq); | ||
2718 | } | ||
2719 | |||
2720 | TRACE_CUR("Acquired lock %d, queue %d\n", | ||
2721 | l->ident, replica); | ||
2722 | |||
2723 | return replica; | ||
2724 | } | ||
2725 | |||
2726 | static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *donor_info) | ||
2727 | { | ||
2728 | struct task_struct *t = donor_info->task; | ||
2729 | |||
2730 | TRACE_CUR("Donor %s/%d being moved to fq %d\n", | ||
2731 | t->comm, | ||
2732 | t->pid, | ||
2733 | ikglp_get_idx(sem, fq)); | ||
2734 | |||
2735 | binheap_delete(&donor_info->node, &sem->donors); | ||
2736 | |||
2737 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
2738 | &donor_info->fq_node, | ||
2739 | NULL, // already in global_list, so pass null to prevent adding 2nd time. | ||
2740 | //&donor_info->global_heap_node, | ||
2741 | &donor_info->donee_heap_node); | ||
2742 | |||
2743 | // warning: | ||
2744 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | ||
2745 | } | ||
2746 | |||
2747 | static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *wait) | ||
2748 | { | ||
2749 | struct task_struct *t = wait->task; | ||
2750 | |||
2751 | TRACE_CUR("PQ request %s/%d being moved to fq %d\n", | ||
2752 | t->comm, | ||
2753 | t->pid, | ||
2754 | ikglp_get_idx(sem, fq)); | ||
2755 | |||
2756 | binheap_delete(&wait->pq_node.node, &sem->priority_queue); | ||
2757 | |||
2758 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
2759 | &wait->fq_node, | ||
2760 | &wait->global_heap_node, | ||
2761 | &wait->donee_heap_node); | ||
2762 | // warning: | ||
2763 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | ||
2764 | } | ||
2765 | |||
2766 | static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(struct ikglp_semaphore* sem) | ||
2767 | { | ||
2768 | /* must hold sem->lock */ | ||
2769 | |||
2770 | struct fifo_queue *fq = NULL; | ||
2771 | struct list_head *pos; | ||
2772 | struct task_struct *queued; | ||
2773 | int i; | ||
2774 | |||
2775 | for(i = 0; i < sem->nr_replicas; ++i) { | ||
2776 | if( (sem->fifo_queues[i].count > 1) && | ||
2777 | (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { | ||
2778 | |||
2779 | TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", | ||
2780 | ikglp_get_idx(sem, &sem->fifo_queues[i]), | ||
2781 | sem->fifo_queues[i].hp_waiter->comm, | ||
2782 | sem->fifo_queues[i].hp_waiter->pid, | ||
2783 | (fq) ? ikglp_get_idx(sem, fq) : -1, | ||
2784 | (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "nil") : "nilXX", | ||
2785 | (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : -1) : -2); | ||
2786 | |||
2787 | fq = &sem->fifo_queues[i]; | ||
2788 | |||
2789 | WARN_ON(!(fq->hp_waiter)); | ||
2790 | } | ||
2791 | } | ||
2792 | |||
2793 | if(fq) { | ||
2794 | struct task_struct *max_hp = fq->hp_waiter; | ||
2795 | ikglp_wait_state_t* ret = NULL; | ||
2796 | |||
2797 | TRACE_CUR("Searching for %s/%d on fq %d\n", | ||
2798 | max_hp->comm, | ||
2799 | max_hp->pid, | ||
2800 | ikglp_get_idx(sem, fq)); | ||
2801 | |||
2802 | BUG_ON(!max_hp); | ||
2803 | |||
2804 | list_for_each(pos, &fq->wait.task_list) { | ||
2805 | wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); | ||
2806 | |||
2807 | queued = (struct task_struct*) wait->private; | ||
2808 | |||
2809 | TRACE_CUR("fq %d entry: %s/%d\n", ikglp_get_idx(sem, fq), queued->comm, queued->pid); | ||
2810 | |||
2811 | /* Compare task prios, find high prio task. */ | ||
2812 | if (queued == max_hp) { | ||
2813 | TRACE_CUR("Found it!\n"); | ||
2814 | ret = container_of(wait, ikglp_wait_state_t, fq_node); | ||
2815 | } | ||
2816 | } | ||
2817 | |||
2818 | // list_for_each(pos, &fq->wait.task_list) { | ||
2819 | // wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); | ||
2820 | // | ||
2821 | // queued = (struct task_struct*) wait->private; | ||
2822 | // /* Compare task prios, find high prio task. */ | ||
2823 | // if (queued == max_hp) { | ||
2824 | // TRACE_CUR("Found it!\n"); | ||
2825 | // return container_of(wait, ikglp_wait_state_t, fq_node); | ||
2826 | // } | ||
2827 | // } | ||
2828 | |||
2829 | if(!ret) { | ||
2830 | WARN_ON(1); | ||
2831 | } | ||
2832 | |||
2833 | return ret; | ||
2834 | } | ||
2835 | |||
2836 | return(NULL); | ||
2837 | } | ||
2838 | |||
2839 | static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *fq_wait) | ||
2840 | { | ||
2841 | struct task_struct *t = fq_wait->task; | ||
2842 | struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq; | ||
2843 | |||
2844 | WARN_ON(t != fq_steal->hp_waiter); | ||
2845 | |||
2846 | TRACE_CUR("FQ request %s/%d being moved to fq %d\n", | ||
2847 | t->comm, | ||
2848 | t->pid, | ||
2849 | ikglp_get_idx(sem, fq)); | ||
2850 | |||
2851 | fq_wait->donee_heap_node.fq = fq; // just to be safe | ||
2852 | |||
2853 | |||
2854 | __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); | ||
2855 | --(fq_steal->count); | ||
2856 | |||
2857 | fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL); | ||
2858 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
2859 | ikglp_get_idx(sem, fq_steal), | ||
2860 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "nil", | ||
2861 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : -1); | ||
2862 | |||
2863 | |||
2864 | // Update shortest. | ||
2865 | if(fq_steal->count < sem->shortest_fifo_queue->count) { | ||
2866 | sem->shortest_fifo_queue = fq_steal; | ||
2867 | } | ||
2868 | |||
2869 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
2870 | &fq_wait->fq_node, | ||
2871 | NULL, | ||
2872 | NULL); | ||
2873 | |||
2874 | // warning: We have not checked the priority inheritance of fq's owner yet. | ||
2875 | } | ||
2876 | |||
2877 | |||
2878 | static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *old_wait) | ||
2879 | { | ||
2880 | struct task_struct *t = old_wait->task; | ||
2881 | |||
2882 | BUG_ON(old_wait->donee_heap_node.fq != fq); | ||
2883 | |||
2884 | TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", ikglp_get_idx(sem, fq)); | ||
2885 | |||
2886 | // need to migrate global_heap_node and donee_heap_node off of the stack | ||
2887 | // to the nodes allocated for the owner of this fq. | ||
2888 | |||
2889 | // TODO: Enhance binheap() to perform this operation in place. | ||
2890 | |||
2891 | ikglp_del_global_list(sem, t, &old_wait->global_heap_node); // remove | ||
2892 | fq->global_heap_node = old_wait->global_heap_node; // copy | ||
2893 | ikglp_add_global_list(sem, t, &fq->global_heap_node); // re-add | ||
2894 | |||
2895 | binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); // remove | ||
2896 | fq->donee_heap_node = old_wait->donee_heap_node; // copy | ||
2897 | |||
2898 | if(fq->donee_heap_node.donor_info) { | ||
2899 | // let donor know that our location has changed | ||
2900 | BUG_ON(fq->donee_heap_node.donor_info->donee_info->task != t); // validate cross-link | ||
2901 | fq->donee_heap_node.donor_info->donee_info = &fq->donee_heap_node; | ||
2902 | } | ||
2903 | INIT_BINHEAP_NODE(&fq->donee_heap_node.node); | ||
2904 | binheap_add(&fq->donee_heap_node.node, &sem->donees, ikglp_donee_heap_node_t, node); // re-add | ||
2905 | } | ||
2906 | |||
2907 | static int gsnedf_ikglp_unlock(struct litmus_lock* l) | ||
2908 | { | ||
2909 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
2910 | struct task_struct *t = current; | ||
2911 | struct task_struct *donee = NULL; | ||
2912 | struct task_struct *next = NULL; | ||
2913 | struct task_struct *new_on_fq = NULL; | ||
2914 | |||
2915 | ikglp_wait_state_t *other_donor_info = NULL; | ||
2916 | struct fifo_queue *to_steal = NULL; | ||
2917 | struct fifo_queue *fq; | ||
2918 | |||
2919 | unsigned long flags = 0, real_flags; | ||
2920 | |||
2921 | int err = 0; | ||
2922 | |||
2923 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
2924 | |||
2925 | lock_global_irqsave(&dgl_lock, flags); // TODO: Push this deeper | ||
2926 | lock_fine_irqsave(&sem->lock, flags); | ||
2927 | |||
2928 | fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. | ||
2929 | |||
2930 | if (!fq) { | ||
2931 | err = -EINVAL; | ||
2932 | goto out; | ||
2933 | } | ||
2934 | |||
2935 | TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); | ||
2936 | |||
2937 | |||
2938 | // Remove 't' from the heaps, but data in nodes will still be good. | ||
2939 | ikglp_del_global_list(sem, t, &fq->global_heap_node); | ||
2940 | binheap_delete(&fq->donee_heap_node.node, &sem->donees); | ||
2941 | |||
2942 | // Move the next request into the FQ and update heaps as needed. | ||
2943 | // We defer re-evaluation of priorities to later in the function. | ||
2944 | if(fq->donee_heap_node.donor_info) { // move my doner to FQ | ||
2945 | ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; | ||
2946 | |||
2947 | new_on_fq = donor_info->task; | ||
2948 | |||
2949 | TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n", | ||
2950 | new_on_fq->comm, new_on_fq->pid, | ||
2951 | ikglp_get_idx(sem, fq)); | ||
2952 | // donor moved to FQ | ||
2953 | donee = t; | ||
2954 | ikglp_move_donor_to_fq(sem, fq, donor_info); | ||
2955 | // TODO: Terminate donation | ||
2956 | } | ||
2957 | else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ | ||
2958 | // move other donor to FQ | ||
2959 | other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); | ||
2960 | |||
2961 | new_on_fq = other_donor_info->task; | ||
2962 | donee = other_donor_info->donee_info->task; | ||
2963 | |||
2964 | // update the donee's heap position. | ||
2965 | other_donor_info->donee_info->donor_info = NULL; // clear the cross-link | ||
2966 | binheap_decrease(&other_donor_info->donee_info->node, &sem->donees); | ||
2967 | |||
2968 | |||
2969 | TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n", | ||
2970 | new_on_fq->comm, new_on_fq->pid, | ||
2971 | ikglp_get_idx(sem, fq)); | ||
2972 | |||
2973 | ikglp_move_donor_to_fq(sem, fq, other_donor_info); | ||
2974 | |||
2975 | // TODO: Terminate donation | ||
2976 | } | ||
2977 | else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ | ||
2978 | ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, ikglp_heap_node_t, node); | ||
2979 | ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, pq_node); | ||
2980 | |||
2981 | new_on_fq = pq_wait->task; | ||
2982 | |||
2983 | TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n", | ||
2984 | new_on_fq->comm, new_on_fq->pid, | ||
2985 | ikglp_get_idx(sem, fq)); | ||
2986 | |||
2987 | ikglp_move_pq_to_fq(sem, fq, pq_wait); | ||
2988 | } | ||
2989 | else if(fq->count == 1) { // No PQ and this queue is empty, so steal | ||
2990 | // steal. | ||
2991 | ikglp_wait_state_t *fq_wait; | ||
2992 | |||
2993 | TRACE_TASK(t, "Looking to steal a request for fq %d...\n", | ||
2994 | ikglp_get_idx(sem, fq)); | ||
2995 | |||
2996 | fq_wait = ikglp_find_hp_waiter_to_steal(sem); | ||
2997 | if(fq_wait) { | ||
2998 | to_steal = fq_wait->donee_heap_node.fq; | ||
2999 | |||
3000 | new_on_fq = fq_wait->task; | ||
3001 | |||
3002 | TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", | ||
3003 | new_on_fq->comm, new_on_fq->pid, | ||
3004 | ikglp_get_idx(sem, to_steal), | ||
3005 | ikglp_get_idx(sem, fq)); | ||
3006 | |||
3007 | ikglp_steal_to_fq(sem, fq, fq_wait); | ||
3008 | |||
3009 | // TODO: Update prio of old queue. | ||
3010 | } | ||
3011 | else { | ||
3012 | TRACE_TASK(t, "Found nothing to steal for fq %d.\n", | ||
3013 | ikglp_get_idx(sem, fq)); | ||
3014 | } | ||
3015 | } | ||
3016 | else { // move no one | ||
3017 | } | ||
3018 | |||
3019 | // 't' must drop all priority and clean up data structures before hand-off. | ||
3020 | |||
3021 | // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST | ||
3022 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
3023 | { | ||
3024 | int count = 0; | ||
3025 | while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { | ||
3026 | binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
3027 | ++count; | ||
3028 | } | ||
3029 | decrease_priority_inheritance(t, NULL); | ||
3030 | WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. | ||
3031 | } | ||
3032 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
3033 | |||
3034 | |||
3035 | // Updating the owner and updating sem->shortest_fifo_queue | ||
3036 | // could have been done sooner, but it is deffered, hoping | ||
3037 | // that it will reduce thrashing of sem->shortest_fifo_queue | ||
3038 | // assignment. | ||
3039 | fq->owner = NULL; // no longer owned!! | ||
3040 | --(fq->count); | ||
3041 | if(fq->count < sem->shortest_fifo_queue->count) { | ||
3042 | sem->shortest_fifo_queue = fq; | ||
3043 | } | ||
3044 | |||
3045 | // Now patch up other priorities. | ||
3046 | // | ||
3047 | // At most one of the following: | ||
3048 | // if(donee && donee != t), decrease prio, propagate to owner, or onward | ||
3049 | // if(to_steal), update owner's prio (hp_waiter has already been set) | ||
3050 | // | ||
3051 | |||
3052 | BUG_ON((other_donor_info != NULL) && (to_steal != NULL)); | ||
3053 | |||
3054 | if(other_donor_info) { | ||
3055 | struct fifo_queue *other_fq = other_donor_info->donee_info->fq; | ||
3056 | |||
3057 | BUG_ON(!donee); | ||
3058 | BUG_ON(donee == t); | ||
3059 | |||
3060 | TRACE_TASK(t, "Terminating donation relation of donor %s/%d to donee %s/%d!\n", | ||
3061 | other_donor_info->task->comm, other_donor_info->task->pid, | ||
3062 | donee->comm, donee->pid); | ||
3063 | |||
3064 | // need to terminate donation relation. | ||
3065 | if(donee == other_fq->owner) { | ||
3066 | TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", | ||
3067 | donee->comm, donee->pid, | ||
3068 | ikglp_get_idx(sem, other_fq)); | ||
3069 | |||
3070 | ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); | ||
3071 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
3072 | } | ||
3073 | else { | ||
3074 | TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", | ||
3075 | donee->comm, donee->pid, | ||
3076 | ikglp_get_idx(sem, other_fq)); | ||
3077 | |||
3078 | ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node); | ||
3079 | if(donee == other_fq->hp_waiter) { | ||
3080 | TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", | ||
3081 | donee->comm, donee->pid, | ||
3082 | ikglp_get_idx(sem, other_fq)); | ||
3083 | |||
3084 | other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); | ||
3085 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
3086 | ikglp_get_idx(sem, other_fq), | ||
3087 | (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "nil", | ||
3088 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); | ||
3089 | |||
3090 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. | ||
3091 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
3092 | } | ||
3093 | } | ||
3094 | } | ||
3095 | else if(to_steal) { | ||
3096 | TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n", | ||
3097 | ikglp_get_idx(sem, to_steal)); | ||
3098 | |||
3099 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. | ||
3100 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
3101 | } | ||
3102 | |||
3103 | // check for new HP waiter. | ||
3104 | if(new_on_fq) { | ||
3105 | // fq->owner is null, so just update the hp_waiter without locking. | ||
3106 | |||
3107 | if(new_on_fq == fq->hp_waiter) { | ||
3108 | TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
3109 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... | ||
3110 | } | ||
3111 | else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { | ||
3112 | if(fq->hp_waiter) | ||
3113 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
3114 | else | ||
3115 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
3116 | |||
3117 | fq->hp_waiter = new_on_fq; | ||
3118 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
3119 | |||
3120 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
3121 | ikglp_get_idx(sem, fq), | ||
3122 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
3123 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
3124 | } | ||
3125 | } | ||
3126 | |||
3127 | |||
3128 | if(waitqueue_active(&fq->wait)) | ||
3129 | { | ||
3130 | wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); | ||
3131 | ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); | ||
3132 | next = (struct task_struct*) wait->private; | ||
3133 | |||
3134 | __remove_wait_queue(&fq->wait, wait); | ||
3135 | |||
3136 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
3137 | ikglp_get_idx(sem, fq), | ||
3138 | next->comm, next->pid); | ||
3139 | |||
3140 | // migrate wait-state to fifo-memory. | ||
3141 | ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); | ||
3142 | |||
3143 | /* next becomes the resouce holder */ | ||
3144 | fq->owner = next; | ||
3145 | tsk_rt(next)->blocked_lock = NULL; | ||
3146 | |||
3147 | |||
3148 | /* determine new hp_waiter if necessary */ | ||
3149 | if (next == fq->hp_waiter) { | ||
3150 | |||
3151 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
3152 | /* next has the highest priority --- it doesn't need to | ||
3153 | * inherit. However, we need to make sure that the | ||
3154 | * next-highest priority in the queue is reflected in | ||
3155 | * hp_waiter. */ | ||
3156 | fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); | ||
3157 | TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", | ||
3158 | ikglp_get_idx(sem, fq), | ||
3159 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
3160 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
3161 | |||
3162 | fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? effective_priority(fq->hp_waiter) : NULL; | ||
3163 | |||
3164 | if (fq->hp_waiter) | ||
3165 | TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); | ||
3166 | else | ||
3167 | TRACE("no further waiters\n"); | ||
3168 | |||
3169 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3170 | |||
3171 | //TRACE_TASK(next, "Heap Before:\n"); | ||
3172 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
3173 | |||
3174 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
3175 | |||
3176 | //TRACE_TASK(next, "Heap After:\n"); | ||
3177 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
3178 | |||
3179 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3180 | } | ||
3181 | else { | ||
3182 | /* Well, if 'next' is not the highest-priority waiter, | ||
3183 | * then it (probably) ought to inherit the highest-priority | ||
3184 | * waiter's priority. */ | ||
3185 | TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", | ||
3186 | ikglp_get_idx(sem, fq), | ||
3187 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
3188 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
3189 | |||
3190 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3191 | |||
3192 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | ||
3193 | struct nested_info, hp_binheap_node); | ||
3194 | |||
3195 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
3196 | * because that update hasn't yet executed (update operation is | ||
3197 | * probably blocked on mutex->lock). So only inherit if the top of | ||
3198 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
3199 | * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
3200 | * since the effective priority of hp_waiter can change (and the | ||
3201 | * update has not made it to this lock).) | ||
3202 | */ | ||
3203 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == fq->nest.hp_waiter_eff_prio)) | ||
3204 | { | ||
3205 | if(fq->nest.hp_waiter_eff_prio) | ||
3206 | increase_priority_inheritance(next, fq->nest.hp_waiter_eff_prio); | ||
3207 | else | ||
3208 | WARN_ON(1); | ||
3209 | } | ||
3210 | |||
3211 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3212 | } | ||
3213 | |||
3214 | |||
3215 | // wake up the new resource holder! | ||
3216 | wake_up_process(next); | ||
3217 | } | ||
3218 | |||
3219 | out: | ||
3220 | unlock_fine_irqrestore(&sem->lock, flags); | ||
3221 | unlock_global_irqrestore(&dgl_lock, flags); | ||
3222 | |||
3223 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
3224 | |||
3225 | return err; | ||
3226 | } | ||
3227 | |||
3228 | |||
3229 | static int gsnedf_ikglp_close(struct litmus_lock* l) | ||
3230 | { | ||
3231 | struct task_struct *t = current; | ||
3232 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
3233 | unsigned long flags; | ||
3234 | |||
3235 | int owner = 0; | ||
3236 | int i; | ||
3237 | |||
3238 | raw_spin_lock_irqsave(&sem->real_lock, flags); | ||
3239 | |||
3240 | for(i = 0; i < sem->nr_replicas; ++i) { | ||
3241 | if(sem->fifo_queues[i].owner == t) { | ||
3242 | owner = 1; | ||
3243 | break; | ||
3244 | } | ||
3245 | } | ||
3246 | |||
3247 | raw_spin_unlock_irqrestore(&sem->real_lock, flags); | ||
3248 | |||
3249 | if (owner) | ||
3250 | gsnedf_ikglp_unlock(l); | ||
3251 | |||
3252 | return 0; | ||
3253 | } | ||
3254 | |||
3255 | static void gsnedf_ikglp_free(struct litmus_lock* l) | ||
3256 | { | ||
3257 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
3258 | |||
3259 | kfree(sem->fifo_queues); | ||
3260 | kfree(sem); | ||
3261 | } | ||
3262 | |||
3263 | static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { | 924 | static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { |
3264 | .lock = gsnedf_ikglp_lock, | 925 | .lock = ikglp_lock, |
3265 | .unlock = gsnedf_ikglp_unlock, | 926 | .unlock = ikglp_unlock, |
3266 | 927 | .close = ikglp_close, | |
928 | .deallocate = ikglp_free, | ||
929 | |||
3267 | // ikglp can only be an outer-most lock. | 930 | // ikglp can only be an outer-most lock. |
3268 | .propagate_increase_inheritance = NULL, | 931 | .propagate_increase_inheritance = NULL, |
3269 | .propagate_decrease_inheritance = NULL, | 932 | .propagate_decrease_inheritance = NULL, |
3270 | |||
3271 | .close = gsnedf_ikglp_close, | ||
3272 | .deallocate = gsnedf_ikglp_free, | ||
3273 | }; | 933 | }; |
3274 | 934 | ||
3275 | static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) | 935 | static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) |
3276 | { | 936 | { |
3277 | struct ikglp_semaphore* sem; | 937 | return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg); |
3278 | int nr_replicas = 0; | ||
3279 | int i; | ||
3280 | |||
3281 | if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas))) | ||
3282 | { | ||
3283 | return(NULL); | ||
3284 | } | ||
3285 | if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas))) | ||
3286 | { | ||
3287 | return(NULL); | ||
3288 | } | ||
3289 | if(nr_replicas < 1) | ||
3290 | { | ||
3291 | return(NULL); | ||
3292 | } | ||
3293 | |||
3294 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
3295 | if(!sem) | ||
3296 | { | ||
3297 | return NULL; | ||
3298 | } | ||
3299 | |||
3300 | sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); | ||
3301 | if(!sem->fifo_queues) | ||
3302 | { | ||
3303 | kfree(sem); | ||
3304 | return NULL; | ||
3305 | } | ||
3306 | |||
3307 | |||
3308 | sem->litmus_lock.ops = &gsnedf_ikglp_lock_ops; | ||
3309 | |||
3310 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
3311 | { | ||
3312 | __raw_spin_lock_init(&sem->lock, ((struct litmus_lock*)sem)->cheat_lockdep, &((struct litmus_lock*)sem)->key); | ||
3313 | } | ||
3314 | #else | ||
3315 | raw_spin_lock_init(&sem->lock); | ||
3316 | #endif | ||
3317 | |||
3318 | raw_spin_lock_init(&sem->real_lock); | ||
3319 | |||
3320 | sem->nr_replicas = nr_replicas; | ||
3321 | sem->m = num_online_cpus(); // default 'm' to number of CPUs. | ||
3322 | sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0); | ||
3323 | |||
3324 | TRACE("New IKGLP Sem: m = %d, k = %d, max fifo_len = %d\n", | ||
3325 | sem->m, | ||
3326 | sem->nr_replicas, | ||
3327 | sem->max_fifo_len); | ||
3328 | |||
3329 | for(i = 0; i < nr_replicas; ++i) | ||
3330 | { | ||
3331 | struct fifo_queue* q = &(sem->fifo_queues[i]); | ||
3332 | |||
3333 | q->owner = NULL; | ||
3334 | q->hp_waiter = NULL; | ||
3335 | init_waitqueue_head(&q->wait); | ||
3336 | q->count = 0; | ||
3337 | |||
3338 | q->global_heap_node.task = NULL; | ||
3339 | INIT_BINHEAP_NODE(&q->global_heap_node.node); | ||
3340 | |||
3341 | q->donee_heap_node.task = NULL; | ||
3342 | q->donee_heap_node.donor_info = NULL; | ||
3343 | q->donee_heap_node.fq = NULL; | ||
3344 | INIT_BINHEAP_NODE(&q->donee_heap_node.node); | ||
3345 | |||
3346 | q->nest.lock = (struct litmus_lock*)sem; | ||
3347 | q->nest.hp_waiter_eff_prio = NULL; | ||
3348 | q->nest.hp_waiter_ptr = &q->hp_waiter; | ||
3349 | INIT_BINHEAP_NODE(&q->nest.hp_binheap_node); | ||
3350 | } | ||
3351 | |||
3352 | sem->shortest_fifo_queue = &sem->fifo_queues[0]; | ||
3353 | |||
3354 | sem->top_m_size = 0; | ||
3355 | |||
3356 | // init heaps | ||
3357 | INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order); | ||
3358 | INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order); | ||
3359 | INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order); | ||
3360 | INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); | ||
3361 | INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); | ||
3362 | |||
3363 | return &sem->litmus_lock; | ||
3364 | } | 938 | } |
3365 | 939 | ||
3366 | 940 | #endif /* CONFIG_LITMUS_NESTED_LOCKING */ | |
3367 | |||
3368 | |||
3369 | |||
3370 | #endif | ||
3371 | |||
3372 | |||
3373 | |||
3374 | |||
3375 | |||
3376 | |||
3377 | |||
3378 | |||
3379 | |||
3380 | 941 | ||
3381 | 942 | ||
3382 | /* ******************** FMLP support ********************** */ | 943 | /* ******************** FMLP support ********************** */ |
@@ -3561,7 +1122,7 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | |||
3561 | .lock = gsnedf_fmlp_lock, | 1122 | .lock = gsnedf_fmlp_lock, |
3562 | .unlock = gsnedf_fmlp_unlock, | 1123 | .unlock = gsnedf_fmlp_unlock, |
3563 | .deallocate = gsnedf_fmlp_free, | 1124 | .deallocate = gsnedf_fmlp_free, |
3564 | 1125 | ||
3565 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1126 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
3566 | .propagate_increase_inheritance = NULL, | 1127 | .propagate_increase_inheritance = NULL, |
3567 | .propagate_decrease_inheritance = NULL | 1128 | .propagate_decrease_inheritance = NULL |
@@ -3599,17 +1160,17 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
3599 | /* Flexible Multiprocessor Locking Protocol */ | 1160 | /* Flexible Multiprocessor Locking Protocol */ |
3600 | *lock = gsnedf_new_fmlp(); | 1161 | *lock = gsnedf_new_fmlp(); |
3601 | break; | 1162 | break; |
3602 | 1163 | ||
3603 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1164 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
3604 | case RSM_MUTEX: | 1165 | case RSM_MUTEX: |
3605 | *lock = gsnedf_new_rsm_mutex(); | 1166 | *lock = gsnedf_new_rsm_mutex(); |
3606 | break; | 1167 | break; |
3607 | 1168 | ||
3608 | case IKGLP_SEM: | 1169 | case IKGLP_SEM: |
3609 | *lock = gsnedf_new_ikglp(args); | 1170 | *lock = gsnedf_new_ikglp(args); |
3610 | break; | 1171 | break; |
3611 | #endif | 1172 | #endif |
3612 | 1173 | ||
3613 | default: | 1174 | default: |
3614 | err = -ENXIO; | 1175 | err = -ENXIO; |
3615 | goto UNSUPPORTED_LOCK; | 1176 | goto UNSUPPORTED_LOCK; |
@@ -3671,9 +1232,15 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
3671 | .activate_plugin = gsnedf_activate_plugin, | 1232 | .activate_plugin = gsnedf_activate_plugin, |
3672 | #ifdef CONFIG_LITMUS_LOCKING | 1233 | #ifdef CONFIG_LITMUS_LOCKING |
3673 | .allocate_lock = gsnedf_allocate_lock, | 1234 | .allocate_lock = gsnedf_allocate_lock, |
1235 | .increase_prio = increase_priority_inheritance, | ||
1236 | .decrease_prio = decrease_priority_inheritance, | ||
1237 | #endif | ||
1238 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1239 | .nested_increase_prio = nested_increase_priority_inheritance, | ||
1240 | .nested_decrease_prio = nested_decrease_priority_inheritance, | ||
3674 | #endif | 1241 | #endif |
3675 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1242 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
3676 | .get_dgl_spinlock = gsn_edf_get_dgl_spinlock, | 1243 | .get_dgl_spinlock = gsnedf_get_dgl_spinlock, |
3677 | #endif | 1244 | #endif |
3678 | }; | 1245 | }; |
3679 | 1246 | ||
@@ -3692,11 +1259,11 @@ static int __init init_gsn_edf(void) | |||
3692 | 1259 | ||
3693 | INIT_BINHEAP_NODE(&entry->hn); | 1260 | INIT_BINHEAP_NODE(&entry->hn); |
3694 | } | 1261 | } |
3695 | 1262 | ||
3696 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1263 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
3697 | raw_spin_lock_init(&dgl_lock); | 1264 | raw_spin_lock_init(&dgl_lock); |
3698 | #endif | 1265 | #endif |
3699 | 1266 | ||
3700 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 1267 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
3701 | return register_sched_plugin(&gsn_edf_plugin); | 1268 | return register_sched_plugin(&gsn_edf_plugin); |
3702 | } | 1269 | } |