aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTanya Amert <tamert@cs.unc.edu>2020-10-15 16:02:37 -0400
committerTanya Amert <tamert@cs.unc.edu>2020-10-15 16:02:37 -0400
commit127b1b76510c5ccaeb985138f5c4aaaaf4ac1a89 (patch)
treecd5b94ded393cb67ad8844bdba1bb35be6e17b2f
parent6e3c78374e56bbdfb5e8d91c956701d061ecc9a7 (diff)
Added global OMLP to GSN-EDF.
This is the variant of the OMLP that uses priority inheritance, and comes from BBB's dissertation work.
-rw-r--r--include/litmus/fdso.h4
-rw-r--r--litmus/fdso.c1
-rw-r--r--litmus/sched_gsn_edf.c282
3 files changed, 285 insertions, 2 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index fd9b30dbfb34..49513497766f 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -27,7 +27,9 @@ typedef enum {
27 27
28 DFLP_SEM = 6, 28 DFLP_SEM = 6,
29 29
30 MAX_OBJ_TYPE = 6 30 OMLP_SEM = 7,
31
32 MAX_OBJ_TYPE = 7
31} obj_type_t; 33} obj_type_t;
32 34
33struct inode_obj_id { 35struct inode_obj_id {
diff --git a/litmus/fdso.c b/litmus/fdso.c
index 0ff54e41839c..e96e575d912e 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -28,6 +28,7 @@ static const struct fdso_ops* fdso_ops[] = {
28 &generic_lock_ops, /* DPCP_SEM */ 28 &generic_lock_ops, /* DPCP_SEM */
29 &generic_lock_ops, /* PCP_SEM */ 29 &generic_lock_ops, /* PCP_SEM */
30 &generic_lock_ops, /* DFLP_SEM */ 30 &generic_lock_ops, /* DFLP_SEM */
31 &generic_lock_ops, /* OMLP_SEM */
31}; 32};
32 33
33static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) 34static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 8f28dc4e5192..d5f8dd89f79a 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -627,6 +627,7 @@ static long gsnedf_admit_task(struct task_struct* tsk)
627#ifdef CONFIG_LITMUS_LOCKING 627#ifdef CONFIG_LITMUS_LOCKING
628 628
629#include <litmus/fdso.h> 629#include <litmus/fdso.h>
630#include <litmus/wait.h>
630 631
631/* called with IRQs off */ 632/* called with IRQs off */
632static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 633static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
@@ -924,6 +925,275 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
924 return &sem->litmus_lock; 925 return &sem->litmus_lock;
925} 926}
926 927
928
929
930/* ******************** OMLP support ********************** */
931
932/* struct for semaphore with priority inheritance */
933struct omlp_semaphore {
934 struct litmus_lock litmus_lock;
935
936 /* current resource holder */
937 struct task_struct *owner;
938
939 /* highest-priority waiter */
940 struct task_struct *hp_waiter;
941
942 /* FIFO queue of waiting tasks */
943 wait_queue_head_t fifo_wait;
944 /* Priority queue of waiting tasks */
945 wait_queue_head_t prio_wait;
946
947 /* How many slots remaining in FIFO queue? */
948 unsigned int num_free;
949};
950
951static inline struct omlp_semaphore* omlp_from_lock(struct litmus_lock* lock)
952{
953 return container_of(lock, struct omlp_semaphore, litmus_lock);
954}
955
956/* already locked */
957static void omlp_enqueue(struct omlp_semaphore *sem, prio_wait_queue_t* wait)
958{
959 if (sem->num_free) {
960 /* there is space in the FIFO queue */
961 sem->num_free--;
962 __add_wait_queue_tail_exclusive(&sem->fifo_wait, &wait->wq);
963 } else {
964 /* nope, gotta go to the priority queue */
965 __add_wait_queue_prio_exclusive(&sem->prio_wait, wait);
966 }
967}
968
969/* already locked */
970static int omlp_move(struct omlp_semaphore *sem)
971{
972 struct list_head* first;
973
974 if (waitqueue_active(&sem->prio_wait)) {
975 first = sem->prio_wait.task_list.next;
976 list_move_tail(first, &sem->fifo_wait.task_list);
977 return 1;
978 }
979 else
980 return 0;
981}
982
983static struct task_struct* omlp_dequeue(struct omlp_semaphore *sem)
984{
985 struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait);
986
987 if (first && !omlp_move(sem))
988 sem->num_free++;
989
990 return first;
991}
992
993/* caller is responsible for locking */
994static struct task_struct* omlp_find_hp_waiter(struct omlp_semaphore *sem,
995 struct task_struct* skip)
996{
997 struct list_head *pos;
998 struct task_struct *queued, *found = NULL;
999
1000 /* check FIFO queue first */
1001 list_for_each(pos, &sem->fifo_wait.task_list) {
1002 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1003 task_list)->private;
1004
1005 /* Compare task prios, find high prio task. */
1006 if (queued != skip && edf_higher_prio(queued, found))
1007 found = queued;
1008 }
1009
1010 /* check priority queue next */
1011 if (waitqueue_active(&sem->prio_wait)) {
1012 /* first has highest priority */
1013 pos = sem->prio_wait.task_list.next;
1014 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1015 task_list)->private;
1016 if (edf_higher_prio(queued, found))
1017 found = queued;
1018 }
1019
1020 return found;
1021}
1022
1023int gsnedf_omlp_lock(struct litmus_lock* l)
1024{
1025 struct task_struct* t = current;
1026 struct omlp_semaphore *sem = omlp_from_lock(l);
1027 prio_wait_queue_t wait;
1028 unsigned long flags;
1029
1030 if (!is_realtime(t))
1031 return -EPERM;
1032
1033 /* prevent nested lock acquisition --- not supported by global OMLP
1034 by default */
1035 if (tsk_rt(t)->num_locks_held)
1036 return -EBUSY;
1037
1038 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
1039
1040 if (sem->owner) {
1041 /* resource is not free => must suspend and wait */
1042
1043 init_prio_waitqueue_entry(&wait, t, get_deadline(t));
1044
1045 set_task_state(t, TASK_UNINTERRUPTIBLE);
1046
1047 omlp_enqueue(sem, &wait);
1048
1049 /* check if we need to activate priority inheritance */
1050 if (edf_higher_prio(t, sem->hp_waiter)) {
1051 sem->hp_waiter = t;
1052 if (edf_higher_prio(t, sem->owner))
1053 set_priority_inheritance(sem->owner, sem->hp_waiter);
1054 }
1055
1056 TS_LOCK_SUSPEND;
1057
1058 /* release lock before sleeping */
1059 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1060
1061 schedule();
1062
1063 TS_LOCK_RESUME;
1064
1065 /* Since we hold the lock, no other task will change
1066 * ->owner. We can thus check it without acquiring the spin
1067 * lock. */
1068 BUG_ON(sem->owner != t);
1069 } else {
1070 /* it's ours now */
1071 sem->owner = t;
1072
1073 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1074 }
1075
1076 tsk_rt(t)->num_locks_held++;
1077
1078 return 0;
1079}
1080
1081static int gsnedf_omlp_unlock(struct litmus_lock* l)
1082{
1083 struct task_struct *t = current, *next;
1084 struct omlp_semaphore *sem = omlp_from_lock(l);
1085 unsigned long flags;
1086 int err = 0;
1087
1088 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
1089
1090 if (sem->owner != t) {
1091 err = -EINVAL;
1092 goto out;
1093 }
1094
1095 tsk_rt(t)->num_locks_held--;
1096
1097 /* check if there are jobs waiting for this resource */
1098 next = omlp_dequeue(sem);
1099 if (next) {
1100 /* next becomes the resouce holder */
1101 sem->owner = next;
1102 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
1103
1104 /* determine new hp_waiter if necessary */
1105 if (next == sem->hp_waiter) {
1106 TRACE_TASK(next, "was highest-prio waiter\n");
1107 /* next has the highest priority --- it doesn't need to
1108 * inherit. However, we need to make sure that the
1109 * next-highest priority in the queue is reflected in
1110 * hp_waiter. */
1111 sem->hp_waiter = omlp_find_hp_waiter(sem, next);
1112 if (sem->hp_waiter)
1113 TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n");
1114 else
1115 TRACE("no further waiters\n");
1116 } else {
1117 /* Well, if next is not the highest-priority waiter,
1118 * then it ought to inherit the highest-priority
1119 * waiter's priority. */
1120 set_priority_inheritance(next, sem->hp_waiter);
1121 }
1122
1123 /* wake up next */
1124 wake_up_process(next);
1125 } else
1126 /* becomes available */
1127 sem->owner = NULL;
1128
1129 /* we lose the benefit of priority inheritance (if any) */
1130 if (tsk_rt(t)->inh_task)
1131 clear_priority_inheritance(t);
1132
1133out:
1134 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1135
1136 return err;
1137}
1138
1139static int gsnedf_omlp_close(struct litmus_lock* l)
1140{
1141 struct task_struct *t = current;
1142 struct omlp_semaphore *sem = omlp_from_lock(l);
1143 unsigned long flags;
1144
1145 int owner;
1146
1147 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
1148
1149 owner = sem->owner == t;
1150
1151 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1152
1153 if (owner)
1154 gsnedf_omlp_unlock(l);
1155
1156 return 0;
1157}
1158
1159static void gsnedf_omlp_free(struct litmus_lock* lock)
1160{
1161 kfree(omlp_from_lock(lock));
1162}
1163
1164static struct litmus_lock_ops gsnedf_omlp_lock_ops = {
1165 .close = gsnedf_omlp_close,
1166 .lock = gsnedf_omlp_lock,
1167 .unlock = gsnedf_omlp_unlock,
1168 .deallocate = gsnedf_omlp_free,
1169};
1170
1171static struct litmus_lock* gsnedf_new_omlp(void)
1172{
1173 struct omlp_semaphore* sem;
1174
1175 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1176 if (!sem)
1177 return NULL;
1178
1179 sem->owner = NULL;
1180 sem->hp_waiter = NULL;
1181 init_waitqueue_head(&sem->fifo_wait);
1182 init_waitqueue_head(&sem->prio_wait);
1183 sem->litmus_lock.ops = &gsnedf_omlp_lock_ops;
1184 /* free = cpus -1 since ->owner is the head and also counted */
1185 sem->num_free = num_online_cpus() - 1;
1186
1187#ifdef CONFIG_RELEASE_MASTER
1188 /* If we use dedicated interrupt handling, then there are actually
1189 * only m - 1 CPUs around. */
1190 if (gsnedf.release_master != NO_CPU)
1191 sem->num_free -= 1;
1192#endif
1193
1194 return &sem->litmus_lock;
1195}
1196
927/* **** lock constructor **** */ 1197/* **** lock constructor **** */
928 1198
929 1199
@@ -932,7 +1202,8 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
932{ 1202{
933 int err = -ENXIO; 1203 int err = -ENXIO;
934 1204
935 /* GSN-EDF currently only supports the FMLP for global resources. */ 1205 /* GSN-EDF currently only supports the FMLP and OMLP
1206 for global resources. */
936 switch (type) { 1207 switch (type) {
937 1208
938 case FMLP_SEM: 1209 case FMLP_SEM:
@@ -944,6 +1215,15 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
944 err = -ENOMEM; 1215 err = -ENOMEM;
945 break; 1216 break;
946 1217
1218 case OMLP_SEM:
1219 /* O(m) Multiprocessor Locking Protocol */
1220 *lock = gsnedf_new_omlp();
1221 if (*lock)
1222 err = 0;
1223 else
1224 err = -ENOMEM;
1225 break;
1226
947 }; 1227 };
948 1228
949 return err; 1229 return err;