diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 384 |
1 files changed, 372 insertions, 12 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 677a932e08be..91c38bbf695c 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -718,13 +718,13 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
718 | 718 | ||
719 | /* A job only stops inheriting a priority when it releases a | 719 | /* A job only stops inheriting a priority when it releases a |
720 | * resource. Thus we can make the following assumption.*/ | 720 | * resource. Thus we can make the following assumption.*/ |
721 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 721 | //BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); |
722 | 722 | ||
723 | if(prio_inh) | 723 | if(prio_inh) |
724 | TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); | 724 | TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); |
725 | else | 725 | else |
726 | TRACE_TASK(t, "base priority restored.\n"); | 726 | TRACE_TASK(t, "base priority restored.\n"); |
727 | 727 | ||
728 | tsk_rt(t)->eff_prio = prio_inh; | 728 | tsk_rt(t)->eff_prio = prio_inh; |
729 | 729 | ||
730 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 730 | /* Check if rescheduling is necessary. We can't use heap_decrease() |
@@ -736,6 +736,350 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
736 | } | 736 | } |
737 | 737 | ||
738 | 738 | ||
739 | |||
740 | |||
741 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
742 | |||
743 | |||
744 | |||
745 | /* called with IRQs off */ | ||
746 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
747 | { | ||
748 | increase_priority_inheritance(t, prio_inh); // increase our prio. | ||
749 | |||
750 | // beware: recursion | ||
751 | if(tsk_rt(t)->blocked_lock && | ||
752 | tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance) { | ||
753 | TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l); | ||
754 | tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance(tsk_rt(t)->blocked_lock, t); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | /* called with IRQs off */ | ||
759 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
760 | { | ||
761 | decrease_priority_inheritance(t, prio_inh); | ||
762 | |||
763 | // beware: recursion | ||
764 | if(tsk_rt(t)->blocked_lock && tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance) { | ||
765 | TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l); | ||
766 | tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance(tsk_rt(t)->blocked_lock, t); | ||
767 | } | ||
768 | } | ||
769 | |||
770 | |||
771 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
772 | struct task_struct* t) | ||
773 | { | ||
774 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
775 | unsigned long flags; | ||
776 | |||
777 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
778 | |||
779 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
780 | if(t != mutex->hp_waiter) { | ||
781 | if(edf_higher_prio(t, mutex->hp_waiter)) { | ||
782 | mutex->hp_waiter = t; | ||
783 | |||
784 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter by propagation.\n"); | ||
785 | } | ||
786 | else { | ||
787 | goto EXIT; // HP waiter has greater prio than us. bail out. | ||
788 | } | ||
789 | } | ||
790 | |||
791 | if(edf_higher_prio(mutex->hp_waiter, mutex->owner)) { | ||
792 | struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t; | ||
793 | |||
794 | TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l); | ||
795 | |||
796 | nested_increase_priority_inheritance(mutex->owner, prio); | ||
797 | } | ||
798 | } | ||
799 | |||
800 | EXIT: | ||
801 | |||
802 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | ||
803 | } | ||
804 | |||
805 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
806 | struct task_struct* t) | ||
807 | { | ||
808 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
809 | unsigned long flags; | ||
810 | |||
811 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
812 | |||
813 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
814 | if(t == mutex->hp_waiter) { | ||
815 | struct task_struct* prio; | ||
816 | |||
817 | TRACE_TASK(t, "was highest-prio waiter\n"); | ||
818 | /* next has the highest priority --- it doesn't need to | ||
819 | * inherit. However, we need to make sure that the | ||
820 | * next-highest priority in the queue is reflected in | ||
821 | * hp_waiter. */ | ||
822 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
823 | |||
824 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
825 | |||
826 | TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l); | ||
827 | |||
828 | |||
829 | // lower eff_prio of owner to new hp if needed. | ||
830 | if(t == mutex->owner->eff_prio) | ||
831 | { | ||
832 | |||
833 | } | ||
834 | |||
835 | |||
836 | nested_increase_priority_inheritance(mutex->owner, prio); | ||
837 | } | ||
838 | } | ||
839 | |||
840 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | ||
841 | } | ||
842 | |||
843 | |||
844 | |||
845 | |||
846 | |||
847 | |||
848 | /* ******************** RSM MUTEX ********************** */ | ||
849 | |||
850 | /* struct for semaphore with priority inheritance */ | ||
851 | struct rsm_mutex { | ||
852 | struct litmus_lock litmus_lock; | ||
853 | |||
854 | /* current resource holder */ | ||
855 | struct task_struct *owner; | ||
856 | |||
857 | /* highest-priority waiter */ | ||
858 | struct task_struct *hp_waiter; | ||
859 | |||
860 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | ||
861 | wait_queue_head_t wait; | ||
862 | }; | ||
863 | |||
864 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | ||
865 | { | ||
866 | return container_of(lock, struct rsm_mutex, litmus_lock); | ||
867 | } | ||
868 | |||
869 | /* caller is responsible for locking */ | ||
870 | struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | ||
871 | struct task_struct* skip) | ||
872 | { | ||
873 | struct list_head *pos; | ||
874 | struct task_struct *queued, *found = NULL; | ||
875 | |||
876 | list_for_each(pos, &mutex->wait.task_list) { | ||
877 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
878 | task_list)->private; | ||
879 | |||
880 | /* Compare task prios, find high prio task. */ | ||
881 | if (queued != skip && edf_higher_prio(queued, found)) | ||
882 | found = queued; | ||
883 | } | ||
884 | return found; | ||
885 | } | ||
886 | |||
887 | |||
888 | |||
889 | |||
890 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | ||
891 | { | ||
892 | struct task_struct* t = current; | ||
893 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
894 | wait_queue_t wait; | ||
895 | unsigned long flags; | ||
896 | |||
897 | if (!is_realtime(t)) | ||
898 | return -EPERM; | ||
899 | |||
900 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
901 | |||
902 | if (mutex->owner) { | ||
903 | /* resource is not free => must suspend and wait */ | ||
904 | |||
905 | init_waitqueue_entry(&wait, t); | ||
906 | |||
907 | // TODO: inheritance propagation from another thread may not finish | ||
908 | // before I check local inheritance... | ||
909 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | ||
910 | mb(); | ||
911 | |||
912 | /* FIXME: interruptible would be nice some day */ | ||
913 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
914 | |||
915 | __add_wait_queue_tail_exclusive(&mutex->wait, &wait); | ||
916 | |||
917 | /* check if we need to activate priority inheritance */ | ||
918 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
919 | mutex->hp_waiter = t; | ||
920 | if (edf_higher_prio(t, mutex->owner)) { | ||
921 | struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t; | ||
922 | nested_increase_priority_inheritance(mutex->owner, prio); | ||
923 | } | ||
924 | } | ||
925 | |||
926 | TS_LOCK_SUSPEND; | ||
927 | |||
928 | /* release lock before sleeping */ | ||
929 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | ||
930 | |||
931 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
932 | * when we wake up; we are guaranteed to have the lock since | ||
933 | * there is only one wake up per release. | ||
934 | */ | ||
935 | |||
936 | schedule(); | ||
937 | |||
938 | TS_LOCK_RESUME; | ||
939 | |||
940 | /* Since we hold the lock, no other task will change | ||
941 | * ->owner. We can thus check it without acquiring the spin | ||
942 | * lock. */ | ||
943 | BUG_ON(mutex->owner != t); | ||
944 | } else { | ||
945 | /* it's ours now */ | ||
946 | mutex->owner = t; | ||
947 | |||
948 | nest_lock(l, t); | ||
949 | |||
950 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | ||
951 | } | ||
952 | |||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | ||
957 | { | ||
958 | struct task_struct *t = current, *next; | ||
959 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
960 | unsigned long flags; | ||
961 | int err = 0; | ||
962 | |||
963 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
964 | |||
965 | if (mutex->owner != t) { | ||
966 | err = -EINVAL; | ||
967 | goto out; | ||
968 | } | ||
969 | |||
970 | /* we lose the benefit of priority inheritance (if any) */ | ||
971 | if (tsk_rt(t)->local_prio) { | ||
972 | nested_decrease_priority_inheritance(t, NULL); | ||
973 | } | ||
974 | |||
975 | |||
976 | /* check if there are jobs waiting for this resource */ | ||
977 | next = __waitqueue_remove_first(&mutex->wait); | ||
978 | if (next) { | ||
979 | /* next becomes the resouce holder */ | ||
980 | mutex->owner = next; | ||
981 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
982 | |||
983 | tsk_rt(next)->blocked_lock = NULL; | ||
984 | nest_lock(l, next); // moves local_prio to trans_prio | ||
985 | |||
986 | /* determine new hp_waiter if necessary */ | ||
987 | if (next == mutex->hp_waiter) { | ||
988 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
989 | /* next has the highest priority --- it doesn't need to | ||
990 | * inherit. However, we need to make sure that the | ||
991 | * next-highest priority in the queue is reflected in | ||
992 | * hp_waiter. */ | ||
993 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | ||
994 | if (mutex->hp_waiter) | ||
995 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
996 | else | ||
997 | TRACE("no further waiters\n"); | ||
998 | } else { | ||
999 | /* Well, if next is not the highest-priority waiter, | ||
1000 | * then it ought to inherit the highest-priority | ||
1001 | * waiter's priority. */ | ||
1002 | increase_priority_inheritance(next, mutex->hp_waiter); | ||
1003 | } | ||
1004 | |||
1005 | /* wake up next */ | ||
1006 | wake_up_process(next); | ||
1007 | } | ||
1008 | else { | ||
1009 | /* becomes available */ | ||
1010 | mutex->owner = NULL; | ||
1011 | } | ||
1012 | |||
1013 | out: | ||
1014 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | ||
1015 | |||
1016 | return err; | ||
1017 | } | ||
1018 | |||
1019 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) | ||
1020 | { | ||
1021 | struct task_struct *t = current; | ||
1022 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1023 | unsigned long flags; | ||
1024 | |||
1025 | int owner; | ||
1026 | |||
1027 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
1028 | |||
1029 | owner = (mutex->owner == t); | ||
1030 | |||
1031 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | ||
1032 | |||
1033 | if (owner) | ||
1034 | gsnedf_rsm_mutex_unlock(l); | ||
1035 | |||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | void gsnedf_rsm_mutex_free(struct litmus_lock* lock) | ||
1040 | { | ||
1041 | kfree(rsm_mutex_from_lock(lock)); | ||
1042 | } | ||
1043 | |||
1044 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { | ||
1045 | .close = gsnedf_rsm_mutex_close, | ||
1046 | .lock = gsnedf_rsm_mutex_lock, | ||
1047 | .unlock = gsnedf_rsm_mutex_unlock, | ||
1048 | .deallocate = gsnedf_rsm_mutex_free, | ||
1049 | .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, | ||
1050 | .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance | ||
1051 | }; | ||
1052 | |||
1053 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) | ||
1054 | { | ||
1055 | struct rsm_mutex* mutex; | ||
1056 | |||
1057 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | ||
1058 | if (!mutex) | ||
1059 | return NULL; | ||
1060 | |||
1061 | mutex->owner = NULL; | ||
1062 | mutex->hp_waiter = NULL; | ||
1063 | init_waitqueue_head(&mutex->wait); | ||
1064 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; | ||
1065 | |||
1066 | return &mutex->litmus_lock; | ||
1067 | } | ||
1068 | |||
1069 | /* **** lock constructor **** */ | ||
1070 | |||
1071 | #endif | ||
1072 | |||
1073 | |||
1074 | |||
1075 | |||
1076 | |||
1077 | |||
1078 | |||
1079 | |||
1080 | |||
1081 | |||
1082 | |||
739 | /* ******************** FMLP support ********************** */ | 1083 | /* ******************** FMLP support ********************** */ |
740 | 1084 | ||
741 | /* struct for semaphore with priority inheritance */ | 1085 | /* struct for semaphore with priority inheritance */ |
@@ -918,6 +1262,11 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | |||
918 | .lock = gsnedf_fmlp_lock, | 1262 | .lock = gsnedf_fmlp_lock, |
919 | .unlock = gsnedf_fmlp_unlock, | 1263 | .unlock = gsnedf_fmlp_unlock, |
920 | .deallocate = gsnedf_fmlp_free, | 1264 | .deallocate = gsnedf_fmlp_free, |
1265 | |||
1266 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1267 | .propagate_increase_inheritance = NULL, | ||
1268 | .propagate_decrease_inheritance = NULL | ||
1269 | #endif | ||
921 | }; | 1270 | }; |
922 | 1271 | ||
923 | static struct litmus_lock* gsnedf_new_fmlp(void) | 1272 | static struct litmus_lock* gsnedf_new_fmlp(void) |
@@ -942,7 +1291,7 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
942 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1291 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
943 | void* __user unused) | 1292 | void* __user unused) |
944 | { | 1293 | { |
945 | int err = -ENXIO; | 1294 | int err; |
946 | 1295 | ||
947 | /* GSN-EDF currently only supports the FMLP for global resources. */ | 1296 | /* GSN-EDF currently only supports the FMLP for global resources. */ |
948 | switch (type) { | 1297 | switch (type) { |
@@ -950,14 +1299,25 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
950 | case FMLP_SEM: | 1299 | case FMLP_SEM: |
951 | /* Flexible Multiprocessor Locking Protocol */ | 1300 | /* Flexible Multiprocessor Locking Protocol */ |
952 | *lock = gsnedf_new_fmlp(); | 1301 | *lock = gsnedf_new_fmlp(); |
953 | if (*lock) | ||
954 | err = 0; | ||
955 | else | ||
956 | err = -ENOMEM; | ||
957 | break; | 1302 | break; |
958 | 1303 | ||
1304 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1305 | case RSM_MUTEX: | ||
1306 | *lock = gsnedf_new_rsm_mutex(); | ||
1307 | break; | ||
1308 | #endif | ||
1309 | |||
1310 | default: | ||
1311 | err = -ENXIO; | ||
1312 | goto UNSUPPORTED_LOCK; | ||
959 | }; | 1313 | }; |
960 | 1314 | ||
1315 | if (*lock) | ||
1316 | err = 0; | ||
1317 | else | ||
1318 | err = -ENOMEM; | ||
1319 | |||
1320 | UNSUPPORTED_LOCK: | ||
961 | return err; | 1321 | return err; |
962 | } | 1322 | } |
963 | 1323 | ||