aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c460
1 files changed, 378 insertions, 82 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 3d653bdca357..c0316c4a1b35 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -120,6 +120,9 @@ static struct binheap_handle gsnedf_cpu_heap;
120static rt_domain_t gsnedf; 120static rt_domain_t gsnedf;
121#define gsnedf_lock (gsnedf.ready_lock) 121#define gsnedf_lock (gsnedf.ready_lock)
122 122
123#ifdef CONFIG_LITMUS_DGL_SUPPORT
124static raw_spinlock_t dgl_lock;
125#endif
123 126
124/* Uncomment this if you want to see all scheduling decisions in the 127/* Uncomment this if you want to see all scheduling decisions in the
125 * TRACE() log. 128 * TRACE() log.
@@ -835,6 +838,43 @@ void print_hp_waiters(struct binheap_node* n, int depth)
835 if(n->right) print_hp_waiters(n->right, depth+1); 838 if(n->right) print_hp_waiters(n->right, depth+1);
836} 839}
837 840
841void dump_node_data(struct binheap_node* parent, struct binheap_node* child)
842{
843 struct binheap_node *root = (parent != BINHEAP_POISON) ? parent : child;
844 struct binheap_node *bad_node = (parent == BINHEAP_POISON) ? parent : child;
845 struct nested_info *nest;
846
847 while(root->parent != NULL) {
848 root = root->parent;
849 }
850
851 if(parent == BINHEAP_POISON) {
852 TRACE_CUR("parent was bad node.\n");
853 }
854 else {
855 TRACE_CUR("child was bad node.\n");
856 }
857 TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right);
858
859 nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node);
860 TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1);
861
862 print_hp_waiters(root, 1);
863}
864
865void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node)
866{
867 struct binheap_node *root = handle->root;
868 struct nested_info *nest;
869
870 TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right);
871
872 nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node);
873 TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1);
874
875 print_hp_waiters(root, 1);
876}
877
838 878
839/* called with IRQs off */ 879/* called with IRQs off */
840/* preconditions: 880/* preconditions:
@@ -861,12 +901,12 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t
861 } 901 }
862 else { 902 else {
863 TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); 903 TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident);
864 raw_spin_unlock_irqrestore(to_unlock, irqflags); 904 unlock_fine_irqrestore(to_unlock, irqflags);
865 } 905 }
866 } 906 }
867 else { 907 else {
868 TRACE_TASK(t, "is not blocked. No propagation.\n"); 908 TRACE_TASK(t, "is not blocked. No propagation.\n");
869 raw_spin_unlock_irqrestore(to_unlock, irqflags); 909 unlock_fine_irqrestore(to_unlock, irqflags);
870 } 910 }
871} 911}
872 912
@@ -891,12 +931,12 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, struct t
891 } 931 }
892 else { 932 else {
893 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); 933 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock);
894 raw_spin_unlock_irqrestore(to_unlock, irqflags); 934 unlock_fine_irqrestore(to_unlock, irqflags);
895 } 935 }
896 } 936 }
897 else { 937 else {
898 TRACE_TASK(t, "is not blocked. No propagation.\n"); 938 TRACE_TASK(t, "is not blocked. No propagation.\n");
899 raw_spin_unlock_irqrestore(to_unlock, irqflags); 939 unlock_fine_irqrestore(to_unlock, irqflags);
900 } 940 }
901} 941}
902 942
@@ -930,16 +970,38 @@ static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock)
930struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, 970struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex,
931 struct task_struct* skip) 971 struct task_struct* skip)
932{ 972{
973 wait_queue_t *q;
933 struct list_head *pos; 974 struct list_head *pos;
934 struct task_struct *queued, *found = NULL; 975 struct task_struct *queued = NULL, *found = NULL;
976
977#ifdef CONFIG_LITMUS_DGL_SUPPORT
978 dgl_wait_state_t *dgl_wait = NULL;
979#endif
935 980
936 list_for_each(pos, &mutex->wait.task_list) { 981 list_for_each(pos, &mutex->wait.task_list) {
937 queued = (struct task_struct*) list_entry(pos, wait_queue_t, 982 q = list_entry(pos, wait_queue_t, task_list);
938 task_list)->private; 983
984#ifdef CONFIG_LITMUS_DGL_SUPPORT
985 if(q->func == dgl_wake_up) {
986 dgl_wait = (dgl_wait_state_t*) q->private;
987 if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) {
988 queued = dgl_wait->task;
989 }
990 else {
991 queued = NULL; // skip it.
992 }
993 }
994 else {
995 queued = (struct task_struct*) q->private;
996 }
997#else
998 queued = (struct task_struct*) q->private;
999#endif
939 1000
940 /* Compare task prios, find high prio task. */ 1001 /* Compare task prios, find high prio task. */
941 if (queued != skip && edf_higher_prio(queued, found)) 1002 if (queued && queued != skip && edf_higher_prio(queued, found)) {
942 found = queued; 1003 found = queued;
1004 }
943 } 1005 }
944 return found; 1006 return found;
945} 1007}
@@ -951,6 +1013,136 @@ static inline struct task_struct* top_priority(struct binheap_handle* handle) {
951 return NULL; 1013 return NULL;
952} 1014}
953 1015
1016#ifdef CONFIG_LITMUS_DGL_SUPPORT
1017//static void gsnedf_rsm_mutex_reserve(struct litmus_lock *l, unsigned long *irqflags)
1018//{
1019// struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1020// raw_spin_lock_irqsave(&mutex->lock, *irqflags);
1021//}
1022//
1023//static void gsnedf_rsm_mutex_unreserve(struct litmus_lock *l, unsigned long irqflags)
1024//{
1025// struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1026// raw_spin_unlock_irqrestore(&mutex->lock, irqflags);
1027//}
1028
1029static raw_spinlock_t* gsn_edf_get_dgl_spinlock(struct task_struct *t)
1030{
1031 return(&dgl_lock);
1032}
1033
1034static int gsn_edf_rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t)
1035{
1036 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1037 return(mutex->owner == t);
1038}
1039
1040
1041// return 1 if resource was immediatly acquired.
1042// Assumes mutex->lock is held.
1043// Must set task state to TASK_UNINTERRUPTIBLE if task blocks.
1044static int gsn_edf_rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node)
1045{
1046 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1047 struct task_struct *t = dgl_wait->task;
1048
1049 int acquired_immediatly = 0;
1050
1051 BUG_ON(t != current);
1052
1053 if (mutex->owner) {
1054 TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident);
1055
1056 init_dgl_waitqueue_entry(wq_node, dgl_wait);
1057
1058 set_task_state(t, TASK_UNINTERRUPTIBLE);
1059 __add_wait_queue_tail_exclusive(&mutex->wait, wq_node);
1060 } else {
1061 TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident);
1062
1063 /* it's ours now */
1064 mutex->owner = t;
1065
1066 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1067 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1068 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
1069
1070 acquired_immediatly = 1;
1071 }
1072
1073 return acquired_immediatly;
1074}
1075
1076// Assumes mutex->lock is held.
1077static void gsn_edf_rsm_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait)
1078{
1079 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1080 struct task_struct *t = dgl_wait->task;
1081 struct task_struct *owner = mutex->owner;
1082 unsigned long flags = 0; // these are unused under DGL coarse-grain locking
1083
1084 BUG_ON(owner == t);
1085
1086 tsk_rt(t)->blocked_lock = l;
1087 mb();
1088
1089 if (edf_higher_prio(t, mutex->hp_waiter)) {
1090
1091 struct task_struct *old_max_eff_prio;
1092 struct task_struct *new_max_eff_prio;
1093 struct task_struct *new_prio = NULL;
1094
1095 if(mutex->hp_waiter)
1096 TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid);
1097 else
1098 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
1099
1100 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1101
1102 //TRACE_TASK(owner, "Heap Before:\n");
1103 //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0);
1104
1105 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1106
1107 mutex->hp_waiter = t;
1108 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
1109
1110 binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1111
1112 //TRACE_TASK(owner, "Heap After:\n");
1113 //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0);
1114
1115 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1116
1117 if(new_max_eff_prio != old_max_eff_prio) {
1118 TRACE_TASK(t, "is new hp_waiter.\n");
1119
1120 if ((effective_priority(owner) == old_max_eff_prio) ||
1121 (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){
1122 new_prio = new_max_eff_prio;
1123 }
1124 }
1125 else {
1126 TRACE_TASK(t, "no change in max_eff_prio of heap.\n");
1127 }
1128
1129 //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1130
1131 if(new_prio) {
1132 nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock.
1133 }
1134 else {
1135 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1136 unlock_fine_irqrestore(&mutex->lock, flags);
1137 }
1138 }
1139 else {
1140 TRACE_TASK(t, "no change in hp_waiter.\n");
1141 unlock_fine_irqrestore(&mutex->lock, flags);
1142 }
1143}
1144#endif
1145
954int gsnedf_rsm_mutex_lock(struct litmus_lock* l) 1146int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
955{ 1147{
956 struct task_struct *t = current; 1148 struct task_struct *t = current;
@@ -962,9 +1154,10 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
962 if (!is_realtime(t)) 1154 if (!is_realtime(t))
963 return -EPERM; 1155 return -EPERM;
964 1156
965 raw_spin_lock_irqsave(&mutex->lock, flags); 1157
966 //raw_spin_lock_irqsave(&rsm_global_lock, flags); 1158 lock_global_irqsave(&dgl_lock, flags);
967 1159 lock_fine_irqsave(&mutex->lock, flags);
1160
968 if (mutex->owner) { 1161 if (mutex->owner) {
969 TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); 1162 TRACE_TASK(t, "Blocking on lock %d.\n", l->ident);
970 1163
@@ -1023,29 +1216,24 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
1023 TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); 1216 TRACE_TASK(t, "no change in max_eff_prio of heap.\n");
1024 } 1217 }
1025 1218
1026 //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1027
1028 if(new_prio) { 1219 if(new_prio) {
1029 nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. 1220 nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock.
1030 } 1221 }
1031 else { 1222 else {
1032 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 1223 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1033 raw_spin_unlock_irqrestore(&mutex->lock, flags); 1224 unlock_fine_irqrestore(&mutex->lock, flags);
1034 } 1225 }
1035
1036 } 1226 }
1037 else { 1227 else {
1038 TRACE_TASK(t, "no change in hp_waiter.\n"); 1228 TRACE_TASK(t, "no change in hp_waiter.\n");
1039 raw_spin_unlock_irqrestore(&mutex->lock, flags); 1229
1230 unlock_fine_irqrestore(&mutex->lock, flags);
1040 } 1231 }
1041 1232
1042 1233 unlock_global_irqrestore(&dgl_lock, flags);
1234
1043 TS_LOCK_SUSPEND; 1235 TS_LOCK_SUSPEND;
1044 1236
1045 /* release lock before sleeping */
1046 //raw_spin_unlock_irqrestore(&rsm_global_lock, flags);
1047 //raw_spin_unlock_irqrestore(&mutex->lock, flags);
1048
1049 /* We depend on the FIFO order. Thus, we don't need to recheck 1237 /* We depend on the FIFO order. Thus, we don't need to recheck
1050 * when we wake up; we are guaranteed to have the lock since 1238 * when we wake up; we are guaranteed to have the lock since
1051 * there is only one wake up per release. 1239 * there is only one wake up per release.
@@ -1072,32 +1260,56 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
1072 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); 1260 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1073 raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); 1261 raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock);
1074 1262
1075 raw_spin_unlock_irqrestore(&mutex->lock, flags); 1263
1076 //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); 1264 unlock_fine_irqrestore(&mutex->lock, flags);
1265 unlock_global_irqrestore(&dgl_lock, flags);
1077 } 1266 }
1078 1267
1079 return 0; 1268 return 0;
1080} 1269}
1081 1270
1082 1271
1272#ifdef CONFIG_LITMUS_DGL_SUPPORT
1273void select_next_lock_if_primary(struct litmus_lock *l, dgl_wait_state_t *dgl_wait)
1274{
1275 if(tsk_rt(dgl_wait->task)->blocked_lock == l) {
1276 TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid);
1277 tsk_rt(dgl_wait->task)->blocked_lock = NULL;
1278 mb();
1279 select_next_lock(dgl_wait, l); // pick the next lock to be blocked on
1280 }
1281 else {
1282 TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid);
1283 }
1284}
1285#endif
1286
1287
1083int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) 1288int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1084{ 1289{
1085 struct task_struct *t = current, *next; 1290 struct task_struct *t = current, *next = NULL;
1086 struct rsm_mutex *mutex = rsm_mutex_from_lock(l); 1291 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1087 unsigned long flags; 1292 unsigned long flags;
1088 1293
1089 struct task_struct *old_max_eff_prio; 1294 struct task_struct *old_max_eff_prio;
1090 1295
1296 int wake_up_task = 1;
1297
1298#ifdef CONFIG_LITMUS_DGL_SUPPORT
1299 dgl_wait_state_t *dgl_wait = NULL;
1300#endif
1091 1301
1092 int err = 0; 1302 int err = 0;
1093 1303
1094 raw_spin_lock_irqsave(&mutex->lock, flags); 1304 lock_global_irqsave(&dgl_lock, flags);
1095 //raw_spin_lock_irqsave(&rsm_global_lock, flags); 1305 lock_fine_irqsave(&mutex->lock, flags);
1096 1306
1097 1307
1098 if (mutex->owner != t) { 1308 if (mutex->owner != t) {
1099 err = -EINVAL; 1309 err = -EINVAL;
1100 goto out; 1310 unlock_fine_irqrestore(&mutex->lock, flags);
1311 unlock_global_irqrestore(&dgl_lock, flags);
1312 return err;
1101 } 1313 }
1102 1314
1103 1315
@@ -1147,16 +1359,25 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1147 1359
1148 1360
1149 /* check if there are jobs waiting for this resource */ 1361 /* check if there are jobs waiting for this resource */
1362#ifdef CONFIG_LITMUS_DGL_SUPPORT
1363 __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next);
1364 if(dgl_wait) {
1365 next = dgl_wait->task;
1366 //select_next_lock_if_primary(l, dgl_wait);
1367 }
1368#else
1150 next = __waitqueue_remove_first(&mutex->wait); 1369 next = __waitqueue_remove_first(&mutex->wait);
1370#endif
1151 if (next) { 1371 if (next) {
1152 /* next becomes the resouce holder */ 1372 /* next becomes the resouce holder */
1153 mutex->owner = next; 1373 mutex->owner = next;
1154 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); 1374 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
1155 1375
1156 1376// if(tsk_rt(next)->blocked_lock == &mutex->litmus_lock) { // might be false for DGL.
1157 tsk_rt(next)->blocked_lock = NULL; 1377// tsk_rt(next)->blocked_lock = NULL;
1378// mb();
1379// }
1158 1380
1159
1160 /* determine new hp_waiter if necessary */ 1381 /* determine new hp_waiter if necessary */
1161 if (next == mutex->hp_waiter) { 1382 if (next == mutex->hp_waiter) {
1162 1383
@@ -1181,10 +1402,19 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1181 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); 1402 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1182 1403
1183 //TRACE_TASK(next, "Heap After:\n"); 1404 //TRACE_TASK(next, "Heap After:\n");
1184 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1405 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
1185 1406
1407#ifdef CONFIG_LITMUS_DGL_SUPPORT
1408 if(dgl_wait) {
1409 select_next_lock_if_primary(l, dgl_wait);
1410 //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining);
1411 --(dgl_wait->nr_remaining);
1412 wake_up_task = (dgl_wait->nr_remaining == 0);
1413 }
1414#endif
1186 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 1415 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
1187 } else { 1416 }
1417 else {
1188 /* Well, if 'next' is not the highest-priority waiter, 1418 /* Well, if 'next' is not the highest-priority waiter,
1189 * then it (probably) ought to inherit the highest-priority 1419 * then it (probably) ought to inherit the highest-priority
1190 * waiter's priority. */ 1420 * waiter's priority. */
@@ -1198,6 +1428,16 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1198 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, 1428 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks,
1199 struct nested_info, hp_binheap_node); 1429 struct nested_info, hp_binheap_node);
1200 1430
1431
1432#ifdef CONFIG_LITMUS_DGL_SUPPORT
1433 if(dgl_wait) {
1434 select_next_lock_if_primary(l, dgl_wait);
1435// wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining);
1436 --(dgl_wait->nr_remaining);
1437 wake_up_task = (dgl_wait->nr_remaining == 0);
1438 }
1439#endif
1440
1201 //TRACE_TASK(next, "Heap After:\n"); 1441 //TRACE_TASK(next, "Heap After:\n");
1202 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1442 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
1203 1443
@@ -1209,26 +1449,53 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1209 * since the effective priority of hp_waiter can change (and the 1449 * since the effective priority of hp_waiter can change (and the
1210 * update has not made it to this lock).) 1450 * update has not made it to this lock).)
1211 */ 1451 */
1452#ifdef CONFIG_LITMUS_DGL_SUPPORT
1453 if((l->nest.hp_waiter_eff_prio != NULL) && (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio))
1454 {
1455 if(dgl_wait && tsk_rt(next)->blocked_lock) {
1456 BUG_ON(wake_up_task);
1457 if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) {
1458 nested_increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock.
1459 goto out; // all spinlocks are released. bail out now.
1460 }
1461 }
1462 else {
1463 increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio);
1464 }
1465 }
1466
1467 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
1468#else
1212 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) 1469 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio))
1213 { 1470 {
1214 increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); 1471 increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio);
1215 } 1472 }
1216
1217 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 1473 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
1474#endif
1475 }
1476
1477 if(wake_up_task) {
1478 TRACE_TASK(next, "waking up since it is no longer blocked.\n");
1479
1480 tsk_rt(next)->blocked_lock = NULL;
1481 mb();
1482
1483 wake_up_process(next);
1484 }
1485 else {
1486 TRACE_TASK(next, "is still blocked.\n");
1218 } 1487 }
1219
1220 /* wake up next */
1221 wake_up_process(next);
1222 } 1488 }
1223 else { 1489 else {
1224 /* becomes available */ 1490 /* becomes available */
1225 mutex->owner = NULL; 1491 mutex->owner = NULL;
1226 } 1492 }
1227 1493
1494 unlock_fine_irqrestore(&mutex->lock, flags);
1495
1228out: 1496out:
1229 raw_spin_unlock_irqrestore(&mutex->lock, flags); 1497 unlock_global_irqrestore(&dgl_lock, flags);
1230 //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); 1498
1231
1232 return err; 1499 return err;
1233} 1500}
1234 1501
@@ -1241,8 +1508,8 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1241 struct rsm_mutex *mutex = rsm_mutex_from_lock(l); 1508 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1242 1509
1243 // relay-style locking 1510 // relay-style locking
1244 raw_spin_lock(&mutex->lock); 1511 lock_fine(&mutex->lock);
1245 raw_spin_unlock(to_unlock); 1512 unlock_fine(to_unlock);
1246 1513
1247 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked 1514 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
1248 struct task_struct *owner = mutex->owner; 1515 struct task_struct *owner = mutex->owner;
@@ -1261,6 +1528,10 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1261 if(t == mutex->hp_waiter) { 1528 if(t == mutex->hp_waiter) {
1262 // reflect the decreased priority in the heap node. 1529 // reflect the decreased priority in the heap node.
1263 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); 1530 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
1531
1532 BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node));
1533 BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks));
1534
1264 binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 1535 binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1265 } 1536 }
1266 1537
@@ -1280,13 +1551,13 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1280 else { 1551 else {
1281 TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); 1552 TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid);
1282 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 1553 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1283 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1554 unlock_fine_irqrestore(&mutex->lock, irqflags);
1284 } 1555 }
1285 } 1556 }
1286 else { 1557 else {
1287 TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); 1558 TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n");
1288 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 1559 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1289 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1560 unlock_fine_irqrestore(&mutex->lock, irqflags);
1290 } 1561 }
1291 } 1562 }
1292 else { 1563 else {
@@ -1303,11 +1574,11 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1303 } 1574 }
1304 else { 1575 else {
1305 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); 1576 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked);
1306 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1577 unlock_fine_irqrestore(&mutex->lock, irqflags);
1307 } 1578 }
1308 } 1579 }
1309 else { 1580 else {
1310 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1581 unlock_fine_irqrestore(&mutex->lock, irqflags);
1311 } 1582 }
1312 } 1583 }
1313} 1584}
@@ -1321,8 +1592,8 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
1321 struct rsm_mutex *mutex = rsm_mutex_from_lock(l); 1592 struct rsm_mutex *mutex = rsm_mutex_from_lock(l);
1322 1593
1323 // relay-style locking 1594 // relay-style locking
1324 raw_spin_lock(&mutex->lock); 1595 lock_fine(&mutex->lock);
1325 raw_spin_unlock(to_unlock); 1596 unlock_fine(to_unlock);
1326 1597
1327 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked 1598 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
1328 if(t == mutex->hp_waiter) { 1599 if(t == mutex->hp_waiter) {
@@ -1377,12 +1648,12 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
1377 } 1648 }
1378 else { 1649 else {
1379 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 1650 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1380 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1651 unlock_fine_irqrestore(&mutex->lock, irqflags);
1381 } 1652 }
1382 } 1653 }
1383 else { 1654 else {
1384 TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); 1655 TRACE_TASK(t, "is not hp_waiter. No propagation.\n");
1385 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1656 unlock_fine_irqrestore(&mutex->lock, irqflags);
1386 } 1657 }
1387 } 1658 }
1388 else { 1659 else {
@@ -1399,11 +1670,11 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
1399 } 1670 }
1400 else { 1671 else {
1401 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); 1672 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked);
1402 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1673 unlock_fine_irqrestore(&mutex->lock, irqflags);
1403 } 1674 }
1404 } 1675 }
1405 else { 1676 else {
1406 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1677 unlock_fine_irqrestore(&mutex->lock, irqflags);
1407 } 1678 }
1408 } 1679 }
1409} 1680}
@@ -1418,14 +1689,15 @@ int gsnedf_rsm_mutex_close(struct litmus_lock* l)
1418 1689
1419 int owner; 1690 int owner;
1420 1691
1421 raw_spin_lock_irqsave(&mutex->lock, flags); 1692
1422 //raw_spin_lock_irqsave(&rsm_global_lock, flags); 1693 lock_global_irqsave(&dgl_lock, flags);
1694 lock_fine_irqsave(&mutex->lock, flags);
1423 1695
1424 owner = (mutex->owner == t); 1696 owner = (mutex->owner == t);
1425 1697
1426 raw_spin_unlock_irqrestore(&mutex->lock, flags); 1698 unlock_fine_irqrestore(&mutex->lock, flags);
1427 //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); 1699 unlock_global_irqrestore(&dgl_lock, flags);
1428 1700
1429 if (owner) 1701 if (owner)
1430 gsnedf_rsm_mutex_unlock(l); 1702 gsnedf_rsm_mutex_unlock(l);
1431 1703
@@ -1443,7 +1715,15 @@ static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = {
1443 .unlock = gsnedf_rsm_mutex_unlock, 1715 .unlock = gsnedf_rsm_mutex_unlock,
1444 .deallocate = gsnedf_rsm_mutex_free, 1716 .deallocate = gsnedf_rsm_mutex_free,
1445 .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, 1717 .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance,
1446 .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance 1718 .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance,
1719
1720#ifdef CONFIG_LITMUS_DGL_SUPPORT
1721// .reserve = gsnedf_rsm_mutex_reserve,
1722// .unreserve = gsnedf_rsm_mutex_unreserve,
1723 .dgl_lock = gsn_edf_rsm_mutex_dgl_lock,
1724 .is_owner = gsn_edf_rsm_mutex_is_owner,
1725 .enable_priority = gsn_edf_rsm_enable_priority,
1726#endif
1447}; 1727};
1448 1728
1449static struct litmus_lock* gsnedf_new_rsm_mutex(void) 1729static struct litmus_lock* gsnedf_new_rsm_mutex(void)
@@ -1928,7 +2208,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif
1928 TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", 2208 TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n",
1929 new_max_eff_prio->comm, new_max_eff_prio->pid); 2209 new_max_eff_prio->comm, new_max_eff_prio->pid);
1930 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 2210 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1931 raw_spin_unlock_irqrestore(&sem->lock, flags); 2211 unlock_fine_irqrestore(&sem->lock, flags);
1932 } 2212 }
1933 } 2213 }
1934 else { 2214 else {
@@ -1936,12 +2216,12 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif
1936 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); 2216 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter);
1937 2217
1938 TRACE_TASK(t, "no owner??\n"); 2218 TRACE_TASK(t, "no owner??\n");
1939 raw_spin_unlock_irqrestore(&sem->lock, flags); 2219 unlock_fine_irqrestore(&sem->lock, flags);
1940 } 2220 }
1941 } 2221 }
1942 else { 2222 else {
1943 TRACE_TASK(t, "hp_waiter is unaffected.\n"); 2223 TRACE_TASK(t, "hp_waiter is unaffected.\n");
1944 raw_spin_unlock_irqrestore(&sem->lock, flags); 2224 unlock_fine_irqrestore(&sem->lock, flags);
1945 } 2225 }
1946} 2226}
1947 2227
@@ -1955,7 +2235,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg
1955 2235
1956 if(!owner) { 2236 if(!owner) {
1957 TRACE_CUR("No owner. Returning.\n"); 2237 TRACE_CUR("No owner. Returning.\n");
1958 raw_spin_unlock_irqrestore(&sem->lock, flags); 2238 unlock_fine_irqrestore(&sem->lock, flags);
1959 return; 2239 return;
1960 } 2240 }
1961 2241
@@ -2004,7 +2284,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg
2004 else { 2284 else {
2005 TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); 2285 TRACE_TASK(owner, "No need to propagate priority decrease forward.\n");
2006 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 2286 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
2007 raw_spin_unlock_irqrestore(&sem->lock, flags); 2287 unlock_fine_irqrestore(&sem->lock, flags);
2008 } 2288 }
2009} 2289}
2010 2290
@@ -2049,7 +2329,7 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo
2049 else { 2329 else {
2050 TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); 2330 TRACE_TASK(owner, "No need to propagate priority decrease forward.\n");
2051 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 2331 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
2052 raw_spin_unlock_irqrestore(&sem->lock, flags); 2332 unlock_fine_irqrestore(&sem->lock, flags);
2053 } 2333 }
2054} 2334}
2055 2335
@@ -2103,7 +2383,7 @@ static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, st
2103 2383
2104 sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); 2384 sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue);
2105 2385
2106 raw_spin_unlock_irqrestore(&sem->lock, flags); 2386 unlock_fine_irqrestore(&sem->lock, flags);
2107} 2387}
2108 2388
2109 2389
@@ -2136,9 +2416,9 @@ static void __ikglp_enqueue_on_fq(
2136 } 2416 }
2137 // update donor eligiblity list. 2417 // update donor eligiblity list.
2138 if(likely(donee_heap_node)) { 2418 if(likely(donee_heap_node)) {
2139 if(binheap_is_in_heap(&donee_heap_node->node)) { 2419// if(binheap_is_in_heap(&donee_heap_node->node)) {
2140 WARN_ON(1); 2420// WARN_ON(1);
2141 } 2421// }
2142 ikglp_add_donees(sem, fq, t, donee_heap_node); 2422 ikglp_add_donees(sem, fq, t, donee_heap_node);
2143 } 2423 }
2144 2424
@@ -2353,7 +2633,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state
2353 TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", 2633 TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n",
2354 new_max_eff_prio->comm, new_max_eff_prio->pid); 2634 new_max_eff_prio->comm, new_max_eff_prio->pid);
2355 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); 2635 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock);
2356 raw_spin_unlock_irqrestore(&sem->lock, flags); 2636 unlock_fine_irqrestore(&sem->lock, flags);
2357 } 2637 }
2358 2638
2359 2639
@@ -2366,7 +2646,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l)
2366{ 2646{
2367 struct task_struct* t = current; 2647 struct task_struct* t = current;
2368 struct ikglp_semaphore *sem = ikglp_from_lock(l); 2648 struct ikglp_semaphore *sem = ikglp_from_lock(l);
2369 unsigned long flags, real_flags; 2649 unsigned long flags = 0, real_flags;
2370 struct fifo_queue *fq = NULL; 2650 struct fifo_queue *fq = NULL;
2371 int replica = -EINVAL; 2651 int replica = -EINVAL;
2372 2652
@@ -2376,13 +2656,17 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l)
2376 return -EPERM; 2656 return -EPERM;
2377 2657
2378 raw_spin_lock_irqsave(&sem->real_lock, real_flags); 2658 raw_spin_lock_irqsave(&sem->real_lock, real_flags);
2379 raw_spin_lock_irqsave(&sem->lock, flags); 2659
2660 lock_global_irqsave(&dgl_lock, flags);
2661 lock_fine_irqsave(&sem->lock, flags);
2380 2662
2381 if(sem->shortest_fifo_queue->count == 0) { 2663 if(sem->shortest_fifo_queue->count == 0) {
2382 // take available resource 2664 // take available resource
2383 replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); 2665 replica = ikglp_get_idx(sem, sem->shortest_fifo_queue);
2384 2666
2385 ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock 2667 ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock
2668
2669 unlock_global_irqrestore(&dgl_lock, flags);
2386 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); 2670 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2387 } 2671 }
2388 else 2672 else
@@ -2410,7 +2694,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l)
2410 if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { 2694 if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) {
2411 // enqueue on PQ 2695 // enqueue on PQ
2412 ikglp_enqueue_on_pq(sem, &wait); 2696 ikglp_enqueue_on_pq(sem, &wait);
2413 raw_spin_unlock_irqrestore(&sem->lock, flags); 2697 unlock_fine_irqrestore(&sem->lock, flags);
2414 } 2698 }
2415 else { 2699 else {
2416 // enqueue as donor 2700 // enqueue as donor
@@ -2418,6 +2702,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l)
2418 } 2702 }
2419 } 2703 }
2420 2704
2705 unlock_global_irqrestore(&dgl_lock, flags);
2421 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); 2706 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2422 2707
2423 TS_LOCK_SUSPEND; 2708 TS_LOCK_SUSPEND;
@@ -2631,12 +2916,14 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2631 struct fifo_queue *to_steal = NULL; 2916 struct fifo_queue *to_steal = NULL;
2632 struct fifo_queue *fq; 2917 struct fifo_queue *fq;
2633 2918
2634 unsigned long flags, real_flags; 2919 unsigned long flags = 0, real_flags;
2635 2920
2636 int err = 0; 2921 int err = 0;
2637 2922
2638 raw_spin_lock_irqsave(&sem->real_lock, real_flags); 2923 raw_spin_lock_irqsave(&sem->real_lock, real_flags);
2639 raw_spin_lock_irqsave(&sem->lock, flags); 2924
2925 lock_global_irqsave(&dgl_lock, flags); // TODO: Push this deeper
2926 lock_fine_irqsave(&sem->lock, flags);
2640 2927
2641 fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. 2928 fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner.
2642 2929
@@ -2781,7 +3068,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2781 ikglp_get_idx(sem, other_fq)); 3068 ikglp_get_idx(sem, other_fq));
2782 3069
2783 ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); 3070 ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags);
2784 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! 3071 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
2785 } 3072 }
2786 else { 3073 else {
2787 TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", 3074 TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n",
@@ -2801,7 +3088,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2801 (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); 3088 (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1);
2802 3089
2803 ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. 3090 ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it.
2804 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! 3091 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
2805 } 3092 }
2806 } 3093 }
2807 } 3094 }
@@ -2810,7 +3097,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2810 ikglp_get_idx(sem, to_steal)); 3097 ikglp_get_idx(sem, to_steal));
2811 3098
2812 ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. 3099 ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it.
2813 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! 3100 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
2814 } 3101 }
2815 3102
2816 // check for new HP waiter. 3103 // check for new HP waiter.
@@ -2930,7 +3217,8 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2930 } 3217 }
2931 3218
2932out: 3219out:
2933 raw_spin_unlock_irqrestore(&sem->lock, flags); 3220 unlock_fine_irqrestore(&sem->lock, flags);
3221 unlock_global_irqrestore(&dgl_lock, flags);
2934 3222
2935 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); 3223 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2936 3224
@@ -2947,7 +3235,7 @@ static int gsnedf_ikglp_close(struct litmus_lock* l)
2947 int owner = 0; 3235 int owner = 0;
2948 int i; 3236 int i;
2949 3237
2950 raw_spin_lock_irqsave(&sem->lock, flags); 3238 raw_spin_lock_irqsave(&sem->real_lock, flags);
2951 3239
2952 for(i = 0; i < sem->nr_replicas; ++i) { 3240 for(i = 0; i < sem->nr_replicas; ++i) {
2953 if(sem->fifo_queues[i].owner == t) { 3241 if(sem->fifo_queues[i].owner == t) {
@@ -2956,7 +3244,7 @@ static int gsnedf_ikglp_close(struct litmus_lock* l)
2956 } 3244 }
2957 } 3245 }
2958 3246
2959 raw_spin_unlock_irqrestore(&sem->lock, flags); 3247 raw_spin_unlock_irqrestore(&sem->real_lock, flags);
2960 3248
2961 if (owner) 3249 if (owner)
2962 gsnedf_ikglp_unlock(l); 3250 gsnedf_ikglp_unlock(l);
@@ -3384,6 +3672,9 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
3384#ifdef CONFIG_LITMUS_LOCKING 3672#ifdef CONFIG_LITMUS_LOCKING
3385 .allocate_lock = gsnedf_allocate_lock, 3673 .allocate_lock = gsnedf_allocate_lock,
3386#endif 3674#endif
3675#ifdef CONFIG_LITMUS_DGL_SUPPORT
3676 .get_dgl_spinlock = gsn_edf_get_dgl_spinlock,
3677#endif
3387}; 3678};
3388 3679
3389 3680
@@ -3401,6 +3692,11 @@ static int __init init_gsn_edf(void)
3401 3692
3402 INIT_BINHEAP_NODE(&entry->hn); 3693 INIT_BINHEAP_NODE(&entry->hn);
3403 } 3694 }
3695
3696#ifdef CONFIG_LITMUS_DGL_SUPPORT
3697 raw_spin_lock_init(&dgl_lock);
3698#endif
3699
3404 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 3700 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
3405 return register_sched_plugin(&gsn_edf_plugin); 3701 return register_sched_plugin(&gsn_edf_plugin);
3406} 3702}