diff options
author | Tanya Amert <tamert@cs.unc.edu> | 2020-10-15 16:28:39 -0400 |
---|---|---|
committer | Tanya Amert <tamert@cs.unc.edu> | 2020-10-15 16:28:39 -0400 |
commit | 16ee246df107e60fa6c215634b5cf5f60ed71ab8 (patch) | |
tree | a3cab3cea94e8194ca75d5beef0713a4355325dc | |
parent | 127b1b76510c5ccaeb985138f5c4aaaaf4ac1a89 (diff) |
Added global OMLP to EXT-RES scheduler.
This includes priority inheritance, but no concept of
forbidden zones.
-rw-r--r-- | include/litmus/reservations/ext_reservation.h | 30 | ||||
-rw-r--r-- | include/litmus/rt_domain.h | 5 | ||||
-rw-r--r-- | litmus/reservations/ext_reservation.c | 22 | ||||
-rw-r--r-- | litmus/reservations/gedf_reservation.c | 457 | ||||
-rw-r--r-- | litmus/rt_domain.c | 2 | ||||
-rw-r--r-- | litmus/sched_ext_res.c | 24 |
6 files changed, 538 insertions, 2 deletions
diff --git a/include/litmus/reservations/ext_reservation.h b/include/litmus/reservations/ext_reservation.h index c9ec924fc55b..432eba0785e2 100644 --- a/include/litmus/reservations/ext_reservation.h +++ b/include/litmus/reservations/ext_reservation.h | |||
@@ -6,6 +6,10 @@ | |||
6 | #include <litmus/debug_trace.h> | 6 | #include <litmus/debug_trace.h> |
7 | #include <litmus/reservations/budget-notifier.h> | 7 | #include <litmus/reservations/budget-notifier.h> |
8 | 8 | ||
9 | #ifdef CONFIG_LITMUS_LOCKING | ||
10 | #include <litmus/locking.h> | ||
11 | #endif | ||
12 | |||
9 | struct reservation_environment; | 13 | struct reservation_environment; |
10 | struct reservation; | 14 | struct reservation; |
11 | 15 | ||
@@ -91,6 +95,17 @@ struct reservation { | |||
91 | struct bheap_node* heap_node; | 95 | struct bheap_node* heap_node; |
92 | struct release_heap* rel_heap; | 96 | struct release_heap* rel_heap; |
93 | struct list_head ln; | 97 | struct list_head ln; |
98 | |||
99 | #ifdef CONFIG_LITMUS_LOCKING | ||
100 | |||
101 | /* reservation representing the current "inherited" reservation | ||
102 | * priority, assigned by the scheduler plugins. | ||
103 | * could point to self if PI does not result in | ||
104 | * an increased task priority. | ||
105 | */ | ||
106 | struct reservation* inh_res; | ||
107 | |||
108 | #endif | ||
94 | }; | 109 | }; |
95 | 110 | ||
96 | void init_ext_reservation( | 111 | void init_ext_reservation( |
@@ -141,6 +156,16 @@ typedef int (*env_is_np_t) ( | |||
141 | typedef void (*env_shutdown_t) ( | 156 | typedef void (*env_shutdown_t) ( |
142 | struct reservation_environment* env); | 157 | struct reservation_environment* env); |
143 | 158 | ||
159 | #ifdef CONFIG_LITMUS_LOCKING | ||
160 | /* Called when the current task attempts to create a new lock of a given | ||
161 | * protocol type. */ | ||
162 | typedef long (*env_allocate_lock_t) ( | ||
163 | struct reservation_environment* env, | ||
164 | struct litmus_lock **lock, | ||
165 | int type, | ||
166 | void* __user config); | ||
167 | #endif | ||
168 | |||
144 | struct reservation_environment_ops { | 169 | struct reservation_environment_ops { |
145 | env_update_time_t update_time; | 170 | env_update_time_t update_time; |
146 | env_dispatch_t dispatch; | 171 | env_dispatch_t dispatch; |
@@ -151,6 +176,11 @@ struct reservation_environment_ops { | |||
151 | env_find_res_t find_res_by_id; | 176 | env_find_res_t find_res_by_id; |
152 | env_is_np_t is_np; | 177 | env_is_np_t is_np; |
153 | env_shutdown_t shutdown; | 178 | env_shutdown_t shutdown; |
179 | |||
180 | #ifdef CONFIG_LITMUS_LOCKING | ||
181 | /* locking protocols */ | ||
182 | env_allocate_lock_t allocate_lock; | ||
183 | #endif | ||
154 | }; | 184 | }; |
155 | 185 | ||
156 | struct reservation_environment { | 186 | struct reservation_environment { |
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 16f6b6cd6357..8dc8efb24d98 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -69,6 +69,11 @@ struct release_heap { | |||
69 | void domain_suspend_releases(rt_domain_t* rt); | 69 | void domain_suspend_releases(rt_domain_t* rt); |
70 | void domain_resume_releases(rt_domain_t* rt); | 70 | void domain_resume_releases(rt_domain_t* rt); |
71 | 71 | ||
72 | /* caller must hold release lock */ | ||
73 | struct release_heap* get_release_heap_res(rt_domain_t *rt, | ||
74 | struct reservation* res, | ||
75 | int use_task_heap); | ||
76 | |||
72 | static inline struct task_struct* __next_ready(rt_domain_t* rt) | 77 | static inline struct task_struct* __next_ready(rt_domain_t* rt) |
73 | { | 78 | { |
74 | struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); | 79 | struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); |
diff --git a/litmus/reservations/ext_reservation.c b/litmus/reservations/ext_reservation.c index 3bedb0fbee04..e6685fca20ea 100644 --- a/litmus/reservations/ext_reservation.c +++ b/litmus/reservations/ext_reservation.c | |||
@@ -22,6 +22,18 @@ int higher_res_prio(struct reservation* first, | |||
22 | if (!first || !second) | 22 | if (!first || !second) |
23 | return first && !second; | 23 | return first && !second; |
24 | 24 | ||
25 | #ifdef CONFIG_LITMUS_LOCKING | ||
26 | |||
27 | /* Check for inherited priorities. Change reservation | ||
28 | * used for comparison in such a case. | ||
29 | */ | ||
30 | if (unlikely(first->inh_res)) | ||
31 | first_task = first->inh_res; | ||
32 | if (unlikely(second->inh_res)) | ||
33 | second_task = second->inh_res; | ||
34 | |||
35 | #endif | ||
36 | |||
25 | if (first_task->priority > second_task->priority) { | 37 | if (first_task->priority > second_task->priority) { |
26 | return 1; | 38 | return 1; |
27 | } | 39 | } |
@@ -30,6 +42,16 @@ int higher_res_prio(struct reservation* first, | |||
30 | if (first_task->id < second_task->id) { | 42 | if (first_task->id < second_task->id) { |
31 | return 1; | 43 | return 1; |
32 | } | 44 | } |
45 | #ifdef CONFIG_LITMUS_LOCKING | ||
46 | else if (first_task->id == second_task->id) { | ||
47 | /* If the PIDs are the same then the task with the | ||
48 | * inherited priority wins. | ||
49 | */ | ||
50 | if (!second->inh_res) { | ||
51 | return 1; | ||
52 | } | ||
53 | } | ||
54 | #endif | ||
33 | } | 55 | } |
34 | return 0; /* fall-through. prio(second_task) > prio(first_task) */ | 56 | return 0; /* fall-through. prio(second_task) > prio(first_task) */ |
35 | } | 57 | } |
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c index 771971d0af1b..44bb13c551a0 100644 --- a/litmus/reservations/gedf_reservation.c +++ b/litmus/reservations/gedf_reservation.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <litmus/np.h> | 11 | #include <litmus/np.h> |
12 | #include <litmus/sched_trace.h> | 12 | #include <litmus/sched_trace.h> |
13 | #include <litmus/debug_trace.h> | 13 | #include <litmus/debug_trace.h> |
14 | #include <litmus/trace.h> | ||
14 | #include <litmus/reservations/gedf_reservation.h> | 15 | #include <litmus/reservations/gedf_reservation.h> |
15 | 16 | ||
16 | // Needed to store context during cross-CPU function calls | 17 | // Needed to store context during cross-CPU function calls |
@@ -705,6 +706,457 @@ static void gedf_env_release_jobs(rt_domain_t* rt, struct bheap* res) | |||
705 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | 706 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); |
706 | } | 707 | } |
707 | 708 | ||
709 | #ifdef CONFIG_LITMUS_LOCKING | ||
710 | |||
711 | #include <litmus/fdso.h> | ||
712 | #include <litmus/wait.h> | ||
713 | |||
714 | /* called with IRQs off */ | ||
715 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
716 | { | ||
717 | struct gedf_cpu_entry* linked_on; | ||
718 | int check_preempt = 0; | ||
719 | |||
720 | struct reservation *t_res = (struct reservation *) tsk_rt(t)->plugin_state; | ||
721 | struct reservation *prio_inh_res = (struct reservation *) tsk_rt(prio_inh)->plugin_state; | ||
722 | |||
723 | struct reservation_environment *env = t_res->par_env; | ||
724 | |||
725 | struct gedf_reservation_environment *gedf_env; | ||
726 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | ||
727 | |||
728 | struct gedf_reservation *gedf_res; | ||
729 | gedf_res = container_of(t_res, struct gedf_reservation, res); | ||
730 | |||
731 | raw_spin_lock(&gedf_env->domain.ready_lock); | ||
732 | |||
733 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
734 | t_res->inh_res = prio_inh_res; | ||
735 | |||
736 | linked_on = gedf_res->linked_on; | ||
737 | |||
738 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
739 | if (linked_on) { | ||
740 | TRACE_TASK(t, "%s: linked on %d\n", | ||
741 | __FUNCTION__, linked_on->id); | ||
742 | /* holder is scheduled; need to re-order CPUs. */ | ||
743 | update_cpu_position(gedf_res->linked_on, &gedf_env->cpu_heap); | ||
744 | } else { | ||
745 | /* holder may be queued: first stop queue changes */ | ||
746 | raw_spin_lock(&gedf_env->domain.release_lock); | ||
747 | if (is_queued_res(t_res)) { | ||
748 | TRACE_TASK(t, "%s: is queued\n", | ||
749 | __FUNCTION__); | ||
750 | /* We need to update the position of holder in some | ||
751 | * heap. Note that this could be a release heap if we | ||
752 | * budget enforcement is used and this job overran. */ | ||
753 | check_preempt = | ||
754 | !bheap_decrease(edf_ready_order, | ||
755 | t_res->heap_node); | ||
756 | } else { | ||
757 | /* Nothing to do: if it is not queued and not linked | ||
758 | * then it is either sleeping or currently being moved | ||
759 | * by other code (e.g., a timer interrupt handler) that | ||
760 | * will use the correct priority when enqueuing the | ||
761 | * task. */ | ||
762 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
763 | __FUNCTION__); | ||
764 | } | ||
765 | raw_spin_unlock(&gedf_env->domain.release_lock); | ||
766 | |||
767 | /* If holder was enqueued in a release heap, then the following | ||
768 | * preemption check is pointless, but we can't easily detect | ||
769 | * that case. If you want to fix this, then consider that | ||
770 | * simply adding a state flag requires O(n) time to update when | ||
771 | * releasing n tasks, which conflicts with the goal to have | ||
772 | * O(log n) merges. */ | ||
773 | if (check_preempt) { | ||
774 | /* heap_decrease() hit the top level of the heap: make | ||
775 | * sure preemption checks get the right task, not the | ||
776 | * potentially stale cache. */ | ||
777 | bheap_uncache_min(edf_ready_order, | ||
778 | &gedf_env->domain.ready_queue); | ||
779 | check_for_preemptions(gedf_env); | ||
780 | } | ||
781 | } | ||
782 | |||
783 | raw_spin_unlock(&gedf_env->domain.ready_lock); | ||
784 | } | ||
785 | |||
786 | /* called with IRQs off */ | ||
787 | static void clear_priority_inheritance(struct task_struct* t) | ||
788 | { | ||
789 | struct reservation *t_res = (struct reservation *) tsk_rt(t)->plugin_state; | ||
790 | |||
791 | struct reservation_environment *env = t_res->par_env; | ||
792 | |||
793 | struct gedf_reservation_environment *gedf_env; | ||
794 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | ||
795 | |||
796 | struct gedf_reservation *gedf_res; | ||
797 | gedf_res = container_of(t_res, struct gedf_reservation, res); | ||
798 | |||
799 | raw_spin_lock(&gedf_env->domain.ready_lock); | ||
800 | |||
801 | /* A job only stops inheriting a priority when it releases a | ||
802 | * resource. Thus we can make the following assumption.*/ | ||
803 | int cpu = smp_processor_id(); | ||
804 | struct gedf_cpu_entry *entry = &gedf_env->cpu_entries[cpu]; | ||
805 | BUG_ON(entry->scheduled != gedf_res); | ||
806 | |||
807 | TRACE_TASK(t, "priority restored\n"); | ||
808 | t_res->inh_res = NULL; | ||
809 | |||
810 | BUG_ON(!gedf_res->linked_on && !bheap_node_in_heap(t_res->heap_node)); | ||
811 | |||
812 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
813 | * since the priority was effectively lowered. Instead, we | ||
814 | * update the position of the CPU on which it is linked, or remove | ||
815 | * and re-add to to the appropriate heap if it is not linked. */ | ||
816 | if (gedf_res->linked_on) { | ||
817 | update_cpu_position(gedf_res->linked_on, &gedf_env->cpu_heap); | ||
818 | } | ||
819 | else { | ||
820 | struct bheap *heap; | ||
821 | if (t_res->replenishment_time > litmus_clock()) { | ||
822 | raw_spin_lock(&gedf_env->domain.release_lock); | ||
823 | heap = &(get_release_heap_res(&gedf_env->domain, t_res, 0)->heap); | ||
824 | raw_spin_unlock(&gedf_env->domain.release_lock); | ||
825 | } | ||
826 | else { | ||
827 | heap = &(gedf_env->domain.ready_queue); | ||
828 | } | ||
829 | |||
830 | bheap_delete(edf_ready_order, heap, t_res->heap_node); | ||
831 | bheap_insert(edf_ready_order, heap, t_res->heap_node); | ||
832 | } | ||
833 | |||
834 | check_for_preemptions(gedf_env); | ||
835 | |||
836 | raw_spin_unlock(&gedf_env->domain.ready_lock); | ||
837 | } | ||
838 | |||
839 | /* ******************** OMLP support ********************** */ | ||
840 | |||
841 | /* struct for semaphore with priority inheritance */ | ||
842 | struct omlp_semaphore { | ||
843 | struct litmus_lock litmus_lock; | ||
844 | |||
845 | /* current resource holder */ | ||
846 | struct task_struct *owner; | ||
847 | |||
848 | /* highest-priority waiter */ | ||
849 | struct task_struct *hp_waiter; | ||
850 | struct reservation *hp_waiter_res; | ||
851 | |||
852 | /* FIFO queue of waiting tasks */ | ||
853 | wait_queue_head_t fifo_wait; | ||
854 | /* Priority queue of waiting tasks */ | ||
855 | wait_queue_head_t prio_wait; | ||
856 | |||
857 | /* How many slots remaining in FIFO queue? */ | ||
858 | unsigned int num_free; | ||
859 | }; | ||
860 | |||
861 | static inline struct omlp_semaphore* omlp_from_lock(struct litmus_lock* lock) | ||
862 | { | ||
863 | return container_of(lock, struct omlp_semaphore, litmus_lock); | ||
864 | } | ||
865 | |||
866 | /* already locked */ | ||
867 | static void omlp_enqueue(struct omlp_semaphore *sem, prio_wait_queue_t* wait) | ||
868 | { | ||
869 | if (sem->num_free) { | ||
870 | /* there is space in the FIFO queue */ | ||
871 | sem->num_free--; | ||
872 | __add_wait_queue_tail_exclusive(&sem->fifo_wait, &wait->wq); | ||
873 | } else { | ||
874 | /* nope, gotta go to the priority queue */ | ||
875 | __add_wait_queue_prio_exclusive(&sem->prio_wait, wait); | ||
876 | } | ||
877 | } | ||
878 | |||
879 | /* already locked */ | ||
880 | static int omlp_move(struct omlp_semaphore *sem) | ||
881 | { | ||
882 | struct list_head* first; | ||
883 | |||
884 | if (waitqueue_active(&sem->prio_wait)) { | ||
885 | first = sem->prio_wait.task_list.next; | ||
886 | list_move_tail(first, &sem->fifo_wait.task_list); | ||
887 | return 1; | ||
888 | } | ||
889 | else | ||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | static struct task_struct* omlp_dequeue(struct omlp_semaphore *sem) | ||
894 | { | ||
895 | struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait); | ||
896 | |||
897 | if (first && !omlp_move(sem)) | ||
898 | sem->num_free++; | ||
899 | |||
900 | return first; | ||
901 | } | ||
902 | |||
903 | /* caller is responsible for locking */ | ||
904 | static struct task_struct* omlp_find_hp_waiter(struct omlp_semaphore *sem, | ||
905 | struct task_struct* skip) | ||
906 | { | ||
907 | struct list_head *pos; | ||
908 | struct task_struct *queued, *found = NULL; | ||
909 | struct reservation *q_res, *f_res = NULL; | ||
910 | |||
911 | /* check FIFO queue first */ | ||
912 | list_for_each(pos, &sem->fifo_wait.task_list) { | ||
913 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
914 | task_list)->private; | ||
915 | |||
916 | /* Compare task prios, find high prio task. */ | ||
917 | q_res = (struct reservation *) tsk_rt(queued)->plugin_state; | ||
918 | if (queued != skip && higher_res_prio(q_res, f_res)) { | ||
919 | f_res = q_res; | ||
920 | found = queued; | ||
921 | } | ||
922 | } | ||
923 | |||
924 | /* check priority queue next */ | ||
925 | if (waitqueue_active(&sem->prio_wait)) { | ||
926 | /* first has highest priority */ | ||
927 | pos = sem->prio_wait.task_list.next; | ||
928 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
929 | task_list)->private; | ||
930 | q_res = (struct reservation *) tsk_rt(queued)->plugin_state; | ||
931 | if (higher_res_prio(q_res, f_res)) { | ||
932 | f_res = q_res; | ||
933 | found = queued; | ||
934 | } | ||
935 | } | ||
936 | |||
937 | return found; | ||
938 | } | ||
939 | |||
940 | int gedf_env_omlp_lock(struct litmus_lock* l) | ||
941 | { | ||
942 | struct task_struct* t = current; | ||
943 | struct omlp_semaphore *sem = omlp_from_lock(l); | ||
944 | prio_wait_queue_t wait; | ||
945 | unsigned long flags; | ||
946 | |||
947 | if (!is_realtime(t)) | ||
948 | return -EPERM; | ||
949 | |||
950 | /* prevent nested lock acquisition --- not supported by global OMLP | ||
951 | by default */ | ||
952 | if (tsk_rt(t)->num_locks_held) | ||
953 | return -EBUSY; | ||
954 | |||
955 | struct reservation *t_res = (struct reservation *) tsk_rt(t)->plugin_state; | ||
956 | |||
957 | struct gedf_reservation *gedf_res = container_of(t_res, struct gedf_reservation, res); | ||
958 | |||
959 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
960 | |||
961 | if (sem->owner) { | ||
962 | /* resource is not free => must suspend and wait */ | ||
963 | |||
964 | /* the priority queue needs the deadline, not the "priority" */ | ||
965 | init_prio_waitqueue_entry(&wait, t, ULLONG_MAX - t_res->priority); | ||
966 | |||
967 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
968 | |||
969 | omlp_enqueue(sem, &wait); | ||
970 | |||
971 | /* check if we need to activate priority inheritance */ | ||
972 | if (higher_res_prio(t_res, sem->hp_waiter_res)) { | ||
973 | sem->hp_waiter = t; | ||
974 | sem->hp_waiter_res = t_res; | ||
975 | struct reservation *o_res = (struct reservation *) tsk_rt(sem->owner)->plugin_state; | ||
976 | if (higher_res_prio(t_res, o_res)) { | ||
977 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
978 | } | ||
979 | } | ||
980 | |||
981 | TS_LOCK_SUSPEND; | ||
982 | |||
983 | /* release lock before sleeping */ | ||
984 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
985 | |||
986 | BUG_ON(!gedf_res->linked_on && !bheap_node_in_heap(t_res->heap_node)); | ||
987 | |||
988 | schedule(); // will have issues if the reservation | ||
989 | // is not linked or on the ready queue | ||
990 | |||
991 | TS_LOCK_RESUME; | ||
992 | |||
993 | /* Since we hold the lock, no other task will change | ||
994 | * ->owner. We can thus check it without acquiring the spin | ||
995 | * lock. */ | ||
996 | BUG_ON(sem->owner != t); | ||
997 | } else { | ||
998 | /* it's ours now */ | ||
999 | sem->owner = t; | ||
1000 | |||
1001 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1002 | } | ||
1003 | |||
1004 | tsk_rt(t)->num_locks_held++; | ||
1005 | |||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static int gedf_env_omlp_unlock(struct litmus_lock* l) | ||
1010 | { | ||
1011 | struct task_struct *t = current, *next; | ||
1012 | struct omlp_semaphore *sem = omlp_from_lock(l); | ||
1013 | unsigned long flags; | ||
1014 | int err = 0; | ||
1015 | |||
1016 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1017 | |||
1018 | if (sem->owner != t) { | ||
1019 | err = -EINVAL; | ||
1020 | goto out; | ||
1021 | } | ||
1022 | |||
1023 | tsk_rt(t)->num_locks_held--; | ||
1024 | |||
1025 | /* check if there are jobs waiting for this resource */ | ||
1026 | next = omlp_dequeue(sem); | ||
1027 | if (next) { | ||
1028 | /* next becomes the resouce holder */ | ||
1029 | sem->owner = next; | ||
1030 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
1031 | |||
1032 | struct reservation *n_res = (struct reservation *) tsk_rt(next)->plugin_state; | ||
1033 | |||
1034 | /* determine new hp_waiter if necessary */ | ||
1035 | if (next == sem->hp_waiter) { | ||
1036 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1037 | /* next has the highest priority --- it doesn't need to | ||
1038 | * inherit. However, we need to make sure that the | ||
1039 | * next-highest priority in the queue is reflected in | ||
1040 | * hp_waiter. */ | ||
1041 | sem->hp_waiter = omlp_find_hp_waiter(sem, next); | ||
1042 | if (sem->hp_waiter) { | ||
1043 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
1044 | sem->hp_waiter_res = (struct reservation *) tsk_rt(sem->hp_waiter)->plugin_state; | ||
1045 | } | ||
1046 | else { | ||
1047 | TRACE("no further waiters\n"); | ||
1048 | sem->hp_waiter_res = NULL; | ||
1049 | } | ||
1050 | } else { | ||
1051 | /* Well, if next is not the highest-priority waiter, | ||
1052 | * then it ought to inherit the highest-priority | ||
1053 | * waiter's priority. */ | ||
1054 | set_priority_inheritance(next, sem->hp_waiter); | ||
1055 | } | ||
1056 | |||
1057 | /* wake up next */ | ||
1058 | wake_up_process(next); | ||
1059 | } else | ||
1060 | /* becomes available */ | ||
1061 | sem->owner = NULL; | ||
1062 | |||
1063 | /* we lose the benefit of priority inheritance (if any) */ | ||
1064 | if (((struct reservation *)tsk_rt(t)->plugin_state)->inh_res) | ||
1065 | clear_priority_inheritance(t); | ||
1066 | |||
1067 | out: | ||
1068 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1069 | |||
1070 | return err; | ||
1071 | } | ||
1072 | |||
1073 | static int gedf_env_omlp_close(struct litmus_lock* l) | ||
1074 | { | ||
1075 | struct task_struct *t = current; | ||
1076 | struct omlp_semaphore *sem = omlp_from_lock(l); | ||
1077 | unsigned long flags; | ||
1078 | |||
1079 | int owner; | ||
1080 | |||
1081 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1082 | |||
1083 | owner = sem->owner == t; | ||
1084 | |||
1085 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1086 | |||
1087 | if (owner) | ||
1088 | gedf_env_omlp_unlock(l); | ||
1089 | |||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | static void gedf_env_omlp_free(struct litmus_lock* lock) | ||
1094 | { | ||
1095 | kfree(omlp_from_lock(lock)); | ||
1096 | } | ||
1097 | |||
1098 | static struct litmus_lock_ops gedf_env_omlp_lock_ops = { | ||
1099 | .close = gedf_env_omlp_close, | ||
1100 | .lock = gedf_env_omlp_lock, | ||
1101 | .unlock = gedf_env_omlp_unlock, | ||
1102 | .deallocate = gedf_env_omlp_free, | ||
1103 | }; | ||
1104 | |||
1105 | static struct litmus_lock* gedf_env_new_omlp(void) | ||
1106 | { | ||
1107 | struct omlp_semaphore* sem; | ||
1108 | struct reservation *t_res; | ||
1109 | struct gedf_reservation_environment *gedf_env; | ||
1110 | |||
1111 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1112 | if (!sem) | ||
1113 | return NULL; | ||
1114 | |||
1115 | t_res = (struct reservation *) tsk_rt(current)->plugin_state; | ||
1116 | gedf_env = container_of(t_res->par_env, struct gedf_reservation_environment, env); | ||
1117 | |||
1118 | sem->owner = NULL; | ||
1119 | sem->hp_waiter = NULL; | ||
1120 | sem->hp_waiter_res = NULL; | ||
1121 | init_waitqueue_head(&sem->fifo_wait); | ||
1122 | init_waitqueue_head(&sem->prio_wait); | ||
1123 | sem->litmus_lock.ops = &gedf_env_omlp_lock_ops; | ||
1124 | /* free = cpus-1 since ->owner is the head and also counted */ | ||
1125 | sem->num_free = gedf_env->num_cpus - 1; | ||
1126 | |||
1127 | return &sem->litmus_lock; | ||
1128 | } | ||
1129 | |||
1130 | /* **** lock constructor **** */ | ||
1131 | |||
1132 | static long gedf_env_allocate_lock( | ||
1133 | struct reservation_environment* env, | ||
1134 | struct litmus_lock **lock, | ||
1135 | int type, | ||
1136 | void* __user unused) | ||
1137 | { | ||
1138 | int err = -ENXIO; | ||
1139 | |||
1140 | /* EXT-RES currently only supports the OMLP within components | ||
1141 | for global resources. */ | ||
1142 | switch (type) { | ||
1143 | |||
1144 | case OMLP_SEM: | ||
1145 | /* O(m) Multiprocessor Locking Protocol */ | ||
1146 | *lock = gedf_env_new_omlp(); | ||
1147 | if (*lock) | ||
1148 | err = 0; | ||
1149 | else | ||
1150 | err = -ENOMEM; | ||
1151 | break; | ||
1152 | |||
1153 | }; | ||
1154 | |||
1155 | return err; | ||
1156 | } | ||
1157 | |||
1158 | #endif | ||
1159 | |||
708 | static struct reservation_environment_ops gedf_env_ops = { | 1160 | static struct reservation_environment_ops gedf_env_ops = { |
709 | .update_time = gedf_env_update_time, | 1161 | .update_time = gedf_env_update_time, |
710 | .dispatch = gedf_env_dispatch, | 1162 | .dispatch = gedf_env_dispatch, |
@@ -714,7 +1166,10 @@ static struct reservation_environment_ops gedf_env_ops = { | |||
714 | .remove_res = gedf_env_remove_res, | 1166 | .remove_res = gedf_env_remove_res, |
715 | .find_res_by_id = gedf_find_res_by_id, | 1167 | .find_res_by_id = gedf_find_res_by_id, |
716 | .is_np = gedf_env_is_np, | 1168 | .is_np = gedf_env_is_np, |
717 | .shutdown = gedf_env_shutdown | 1169 | .shutdown = gedf_env_shutdown, |
1170 | #ifdef CONFIG_LITMUS_LOCKING | ||
1171 | .allocate_lock = gedf_env_allocate_lock, | ||
1172 | #endif | ||
718 | }; | 1173 | }; |
719 | 1174 | ||
720 | long alloc_gedf_reservation_environment( | 1175 | long alloc_gedf_reservation_environment( |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 80e01719884e..03ad18007069 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -202,7 +202,7 @@ static struct release_heap* __get_release_heap(rt_domain_t *rt, | |||
202 | return heap; | 202 | return heap; |
203 | } | 203 | } |
204 | 204 | ||
205 | static struct release_heap* get_release_heap_res(rt_domain_t *rt, | 205 | struct release_heap* get_release_heap_res(rt_domain_t *rt, |
206 | struct reservation* res, | 206 | struct reservation* res, |
207 | int use_task_heap) | 207 | int use_task_heap) |
208 | { | 208 | { |
diff --git a/litmus/sched_ext_res.c b/litmus/sched_ext_res.c index 583a2ed9aef0..4bf81cd46c59 100644 --- a/litmus/sched_ext_res.c +++ b/litmus/sched_ext_res.c | |||
@@ -275,6 +275,27 @@ static bool ext_res_should_wait_for_stack(struct task_struct* t) { | |||
275 | } | 275 | } |
276 | */ | 276 | */ |
277 | 277 | ||
278 | |||
279 | #ifdef CONFIG_LITMUS_LOCKING | ||
280 | |||
281 | /* **** lock constructor **** */ | ||
282 | |||
283 | static long ext_res_allocate_lock(struct litmus_lock **lock, int type, | ||
284 | void* __user unused) | ||
285 | { | ||
286 | struct reservation_environment *gedf_env; | ||
287 | int err = -ENXIO; | ||
288 | |||
289 | /* pass the allocate_lock call to the task's component */ | ||
290 | gedf_env = ((struct reservation*) tsk_rt(current)->plugin_state)->par_env; | ||
291 | err = gedf_env->ops->allocate_lock(gedf_env, lock, type, unused); | ||
292 | |||
293 | return err; | ||
294 | } | ||
295 | |||
296 | #endif | ||
297 | |||
298 | |||
278 | static struct domain_proc_info ext_res_domain_proc_info; | 299 | static struct domain_proc_info ext_res_domain_proc_info; |
279 | 300 | ||
280 | static long ext_res_get_domain_proc_info(struct domain_proc_info **ret) | 301 | static long ext_res_get_domain_proc_info(struct domain_proc_info **ret) |
@@ -361,6 +382,9 @@ static struct sched_plugin ext_res_plugin = { | |||
361 | .reservation_create = ext_res_reservation_create, | 382 | .reservation_create = ext_res_reservation_create, |
362 | //.current_budget = pres_current_budget, | 383 | //.current_budget = pres_current_budget, |
363 | //.should_wait_for_stack = ext_res_should_wait_for_stack, | 384 | //.should_wait_for_stack = ext_res_should_wait_for_stack, |
385 | #ifdef CONFIG_LITMUS_LOCKING | ||
386 | .allocate_lock = ext_res_allocate_lock, | ||
387 | #endif | ||
364 | }; | 388 | }; |
365 | 389 | ||
366 | static int __init init_ext_res(void) | 390 | static int __init init_ext_res(void) |