aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-23 11:16:09 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-23 11:16:09 -0400
commitfb0c271c1e8a4d4eac440d3e47d35f19235e07ac (patch)
treef78a5cd9201fc3f4dfd36bdad703f37b19979694
parent8973214f010cf55fbf18cb88471d6c99ed6ff575 (diff)
blah
-rw-r--r--include/litmus/fdso.h6
-rw-r--r--include/litmus/locking.h9
-rw-r--r--include/litmus/rt_param.h3
-rw-r--r--litmus/fdso.c1
-rw-r--r--litmus/locking.c22
-rw-r--r--litmus/sched_gsn_edf.c384
6 files changed, 412 insertions, 13 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index caf2a1e6918c..4b4ae7796788 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -19,8 +19,10 @@ typedef enum {
19 19
20 FMLP_SEM = 0, 20 FMLP_SEM = 0,
21 SRP_SEM = 1, 21 SRP_SEM = 1,
22
23 RSM_MUTEX = 2,
22 24
23 MAX_OBJ_TYPE = 1 25 MAX_OBJ_TYPE = 2
24} obj_type_t; 26} obj_type_t;
25 27
26struct inode_obj_id { 28struct inode_obj_id {
@@ -67,5 +69,7 @@ static inline void* od_lookup(int od, obj_type_t type)
67#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) 69#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
68#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) 70#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
69 71
72#define lookup_rsm_mutex(od)((struct litmus_lock*) od_lookup(od, FMLP_SEM))
73
70 74
71#endif 75#endif
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 27eafd002556..a01890153fa1 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -17,6 +17,10 @@ struct litmus_lock {
17#endif 17#endif
18}; 18};
19 19
20#ifdef CONFIG_LITMUS_NESTED_LOCKING
21void nest_lock(struct litmus_lock *l, struct task_struct *t);
22#endif
23
20struct litmus_lock_ops { 24struct litmus_lock_ops {
21 /* Current task tries to obtain / drop a reference to a lock. 25 /* Current task tries to obtain / drop a reference to a lock.
22 * Optional methods, allowed by default. */ 26 * Optional methods, allowed by default. */
@@ -29,6 +33,11 @@ struct litmus_lock_ops {
29 33
30 /* The lock is no longer being referenced (mandatory method). */ 34 /* The lock is no longer being referenced (mandatory method). */
31 void (*deallocate)(struct litmus_lock*); 35 void (*deallocate)(struct litmus_lock*);
36
37#ifdef CONFIG_LITMUS_NESTED_LOCKING
38 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t);
39 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t);
40#endif
32}; 41};
33 42
34#endif 43#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 3a054db8ee07..5239d4a6f508 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -141,6 +141,9 @@ struct rt_param {
141 141
142 /* pointer to the last lock acquired */ 142 /* pointer to the last lock acquired */
143 struct litmus_lock* last_lock; 143 struct litmus_lock* last_lock;
144
145 /* pointer to lock upon which is currently blocked */
146 struct litmus_lock* blocked_lock;
144#endif 147#endif
145 148
146 149
diff --git a/litmus/fdso.c b/litmus/fdso.c
index aa7b384264e3..f192787b577d 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -23,6 +23,7 @@ extern struct fdso_ops generic_lock_ops;
23static const struct fdso_ops* fdso_ops[] = { 23static const struct fdso_ops* fdso_ops[] = {
24 &generic_lock_ops, /* FMLP_SEM */ 24 &generic_lock_ops, /* FMLP_SEM */
25 &generic_lock_ops, /* SRP_SEM */ 25 &generic_lock_ops, /* SRP_SEM */
26 &generic_lock_ops, /* RSM_MUTEX */
26}; 27};
27 28
28static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) 29static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
diff --git a/litmus/locking.c b/litmus/locking.c
index 5897beb941cf..f3fa273314fb 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -4,6 +4,7 @@
4 4
5#include <litmus/sched_plugin.h> 5#include <litmus/sched_plugin.h>
6#include <litmus/trace.h> 6#include <litmus/trace.h>
7#include <litmus/litmus.h>
7 8
8static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); 9static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
9static int open_generic_lock(struct od_table_entry* entry, void* __user arg); 10static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
@@ -124,6 +125,27 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
124} 125}
125 126
126 127
128#ifdef CONFIG_LITMUS_NESTED_LOCKING
129/* not "lock_nest" ... get it? */
130void nest_lock(struct litmus_lock *l, struct task_struct *t)
131{
132 if(tsk_rt(t)->last_lock) {
133 /* push new lock to front of old lock */
134 struct litmus_lock *old = tsk_rt(t)->last_lock;
135
136 list_add(&l->lock_chain, &old->lock_chain);
137 }
138
139 tsk_rt(t)->last_lock = l;
140
141 // local inh now becomes transitive inh
142 tsk_rt(t)->trans_prio = tsk_rt(t)->local_prio; // what about old transitive prio???
143 tsk_rt(t)->local_prio = NULL;
144}
145#endif
146
147
148
127#else 149#else
128 150
129struct fdso_ops generic_lock_ops = {}; 151struct fdso_ops generic_lock_ops = {};
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 677a932e08be..91c38bbf695c 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -718,13 +718,13 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str
718 718
719 /* A job only stops inheriting a priority when it releases a 719 /* A job only stops inheriting a priority when it releases a
720 * resource. Thus we can make the following assumption.*/ 720 * resource. Thus we can make the following assumption.*/
721 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); 721 //BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
722 722
723 if(prio_inh) 723 if(prio_inh)
724 TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); 724 TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid);
725 else 725 else
726 TRACE_TASK(t, "base priority restored.\n"); 726 TRACE_TASK(t, "base priority restored.\n");
727 727
728 tsk_rt(t)->eff_prio = prio_inh; 728 tsk_rt(t)->eff_prio = prio_inh;
729 729
730 /* Check if rescheduling is necessary. We can't use heap_decrease() 730 /* Check if rescheduling is necessary. We can't use heap_decrease()
@@ -736,6 +736,350 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str
736} 736}
737 737
738 738
739
740
741#ifdef CONFIG_LITMUS_NESTED_LOCKING
742
743
744
745/* called with IRQs off */
746static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
747{
748 increase_priority_inheritance(t, prio_inh); // increase our prio.
749
750 // beware: recursion
751 if(tsk_rt(t)->blocked_lock &&
752 tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance) {
753 TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l);
754 tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance(tsk_rt(t)->blocked_lock, t);
755 }
756}
757
758/* called with IRQs off */
759static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
760{
761 decrease_priority_inheritance(t, prio_inh);
762
763 // beware: recursion
764 if(tsk_rt(t)->blocked_lock && tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance) {
765 TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l);
766 tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance(tsk_rt(t)->blocked_lock, t);
767 }
768}
769
770
771void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
772 struct task_struct* t)
773{
774 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
775 unsigned long flags;
776
777 spin_lock_irqsave(&mutex->wait.lock, flags);
778
779 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
780 if(t != mutex->hp_waiter) {
781 if(edf_higher_prio(t, mutex->hp_waiter)) {
782 mutex->hp_waiter = t;
783
784 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter by propagation.\n");
785 }
786 else {
787 goto EXIT; // HP waiter has greater prio than us. bail out.
788 }
789 }
790
791 if(edf_higher_prio(mutex->hp_waiter, mutex->owner)) {
792 struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t;
793
794 TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l);
795
796 nested_increase_priority_inheritance(mutex->owner, prio);
797 }
798 }
799
800EXIT:
801
802 spin_unlock_irqrestore(&mutex->wait.lock, flags);
803}
804
805void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
806 struct task_struct* t)
807{
808 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
809 unsigned long flags;
810
811 spin_lock_irqsave(&mutex->wait.lock, flags);
812
813 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
814 if(t == mutex->hp_waiter) {
815 struct task_struct* prio;
816
817 TRACE_TASK(t, "was highest-prio waiter\n");
818 /* next has the highest priority --- it doesn't need to
819 * inherit. However, we need to make sure that the
820 * next-highest priority in the queue is reflected in
821 * hp_waiter. */
822 mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL);
823
824 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
825
826 TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l);
827
828
829 // lower eff_prio of owner to new hp if needed.
830 if(t == mutex->owner->eff_prio)
831 {
832
833 }
834
835
836 nested_increase_priority_inheritance(mutex->owner, prio);
837 }
838 }
839
840 spin_unlock_irqrestore(&mutex->wait.lock, flags);
841}
842
843
844
845
846
847
848/* ******************** RSM MUTEX ********************** */
849
850/* struct for semaphore with priority inheritance */
851struct rsm_mutex {
852 struct litmus_lock litmus_lock;
853
854 /* current resource holder */
855 struct task_struct *owner;
856
857 /* highest-priority waiter */
858 struct task_struct *hp_waiter;
859
860 /* FIFO queue of waiting tasks -- for now. time stamp in the future. */
861 wait_queue_head_t wait;
862};
863
864static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock)
865{
866 return container_of(lock, struct rsm_mutex, litmus_lock);
867}
868
869/* caller is responsible for locking */
870struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex,
871 struct task_struct* skip)
872{
873 struct list_head *pos;
874 struct task_struct *queued, *found = NULL;
875
876 list_for_each(pos, &mutex->wait.task_list) {
877 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
878 task_list)->private;
879
880 /* Compare task prios, find high prio task. */
881 if (queued != skip && edf_higher_prio(queued, found))
882 found = queued;
883 }
884 return found;
885}
886
887
888
889
890int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
891{
892 struct task_struct* t = current;
893 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
894 wait_queue_t wait;
895 unsigned long flags;
896
897 if (!is_realtime(t))
898 return -EPERM;
899
900 spin_lock_irqsave(&mutex->wait.lock, flags);
901
902 if (mutex->owner) {
903 /* resource is not free => must suspend and wait */
904
905 init_waitqueue_entry(&wait, t);
906
907 // TODO: inheritance propagation from another thread may not finish
908 // before I check local inheritance...
909 tsk_rt(t)->blocked_lock = l; /* record where we are blocked */
910 mb();
911
912 /* FIXME: interruptible would be nice some day */
913 set_task_state(t, TASK_UNINTERRUPTIBLE);
914
915 __add_wait_queue_tail_exclusive(&mutex->wait, &wait);
916
917 /* check if we need to activate priority inheritance */
918 if (edf_higher_prio(t, mutex->hp_waiter)) {
919 mutex->hp_waiter = t;
920 if (edf_higher_prio(t, mutex->owner)) {
921 struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t;
922 nested_increase_priority_inheritance(mutex->owner, prio);
923 }
924 }
925
926 TS_LOCK_SUSPEND;
927
928 /* release lock before sleeping */
929 spin_unlock_irqrestore(&mutex->wait.lock, flags);
930
931 /* We depend on the FIFO order. Thus, we don't need to recheck
932 * when we wake up; we are guaranteed to have the lock since
933 * there is only one wake up per release.
934 */
935
936 schedule();
937
938 TS_LOCK_RESUME;
939
940 /* Since we hold the lock, no other task will change
941 * ->owner. We can thus check it without acquiring the spin
942 * lock. */
943 BUG_ON(mutex->owner != t);
944 } else {
945 /* it's ours now */
946 mutex->owner = t;
947
948 nest_lock(l, t);
949
950 spin_unlock_irqrestore(&mutex->wait.lock, flags);
951 }
952
953 return 0;
954}
955
956int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
957{
958 struct task_struct *t = current, *next;
959 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
960 unsigned long flags;
961 int err = 0;
962
963 spin_lock_irqsave(&mutex->wait.lock, flags);
964
965 if (mutex->owner != t) {
966 err = -EINVAL;
967 goto out;
968 }
969
970 /* we lose the benefit of priority inheritance (if any) */
971 if (tsk_rt(t)->local_prio) {
972 nested_decrease_priority_inheritance(t, NULL);
973 }
974
975
976 /* check if there are jobs waiting for this resource */
977 next = __waitqueue_remove_first(&mutex->wait);
978 if (next) {
979 /* next becomes the resouce holder */
980 mutex->owner = next;
981 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
982
983 tsk_rt(next)->blocked_lock = NULL;
984 nest_lock(l, next); // moves local_prio to trans_prio
985
986 /* determine new hp_waiter if necessary */
987 if (next == mutex->hp_waiter) {
988 TRACE_TASK(next, "was highest-prio waiter\n");
989 /* next has the highest priority --- it doesn't need to
990 * inherit. However, we need to make sure that the
991 * next-highest priority in the queue is reflected in
992 * hp_waiter. */
993 mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next);
994 if (mutex->hp_waiter)
995 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
996 else
997 TRACE("no further waiters\n");
998 } else {
999 /* Well, if next is not the highest-priority waiter,
1000 * then it ought to inherit the highest-priority
1001 * waiter's priority. */
1002 increase_priority_inheritance(next, mutex->hp_waiter);
1003 }
1004
1005 /* wake up next */
1006 wake_up_process(next);
1007 }
1008 else {
1009 /* becomes available */
1010 mutex->owner = NULL;
1011 }
1012
1013out:
1014 spin_unlock_irqrestore(&mutex->wait.lock, flags);
1015
1016 return err;
1017}
1018
1019int gsnedf_rsm_mutex_close(struct litmus_lock* l)
1020{
1021 struct task_struct *t = current;
1022 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1023 unsigned long flags;
1024
1025 int owner;
1026
1027 spin_lock_irqsave(&mutex->wait.lock, flags);
1028
1029 owner = (mutex->owner == t);
1030
1031 spin_unlock_irqrestore(&mutex->wait.lock, flags);
1032
1033 if (owner)
1034 gsnedf_rsm_mutex_unlock(l);
1035
1036 return 0;
1037}
1038
1039void gsnedf_rsm_mutex_free(struct litmus_lock* lock)
1040{
1041 kfree(rsm_mutex_from_lock(lock));
1042}
1043
1044static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = {
1045 .close = gsnedf_rsm_mutex_close,
1046 .lock = gsnedf_rsm_mutex_lock,
1047 .unlock = gsnedf_rsm_mutex_unlock,
1048 .deallocate = gsnedf_rsm_mutex_free,
1049 .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance,
1050 .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance
1051};
1052
1053static struct litmus_lock* gsnedf_new_rsm_mutex(void)
1054{
1055 struct rsm_mutex* mutex;
1056
1057 mutex = kmalloc(sizeof(*mutex), GFP_KERNEL);
1058 if (!mutex)
1059 return NULL;
1060
1061 mutex->owner = NULL;
1062 mutex->hp_waiter = NULL;
1063 init_waitqueue_head(&mutex->wait);
1064 mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops;
1065
1066 return &mutex->litmus_lock;
1067}
1068
1069/* **** lock constructor **** */
1070
1071#endif
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
739/* ******************** FMLP support ********************** */ 1083/* ******************** FMLP support ********************** */
740 1084
741/* struct for semaphore with priority inheritance */ 1085/* struct for semaphore with priority inheritance */
@@ -918,6 +1262,11 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
918 .lock = gsnedf_fmlp_lock, 1262 .lock = gsnedf_fmlp_lock,
919 .unlock = gsnedf_fmlp_unlock, 1263 .unlock = gsnedf_fmlp_unlock,
920 .deallocate = gsnedf_fmlp_free, 1264 .deallocate = gsnedf_fmlp_free,
1265
1266#ifdef CONFIG_LITMUS_NESTED_LOCKING
1267 .propagate_increase_inheritance = NULL,
1268 .propagate_decrease_inheritance = NULL
1269#endif
921}; 1270};
922 1271
923static struct litmus_lock* gsnedf_new_fmlp(void) 1272static struct litmus_lock* gsnedf_new_fmlp(void)
@@ -942,7 +1291,7 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
942static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, 1291static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
943 void* __user unused) 1292 void* __user unused)
944{ 1293{
945 int err = -ENXIO; 1294 int err;
946 1295
947 /* GSN-EDF currently only supports the FMLP for global resources. */ 1296 /* GSN-EDF currently only supports the FMLP for global resources. */
948 switch (type) { 1297 switch (type) {
@@ -950,14 +1299,25 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
950 case FMLP_SEM: 1299 case FMLP_SEM:
951 /* Flexible Multiprocessor Locking Protocol */ 1300 /* Flexible Multiprocessor Locking Protocol */
952 *lock = gsnedf_new_fmlp(); 1301 *lock = gsnedf_new_fmlp();
953 if (*lock)
954 err = 0;
955 else
956 err = -ENOMEM;
957 break; 1302 break;
958 1303
1304#ifdef CONFIG_LITMUS_NESTED_LOCKING
1305 case RSM_MUTEX:
1306 *lock = gsnedf_new_rsm_mutex();
1307 break;
1308#endif
1309
1310 default:
1311 err = -ENXIO;
1312 goto UNSUPPORTED_LOCK;
959 }; 1313 };
960 1314
1315 if (*lock)
1316 err = 0;
1317 else
1318 err = -ENOMEM;
1319
1320UNSUPPORTED_LOCK:
961 return err; 1321 return err;
962} 1322}
963 1323