aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2011-03-31 11:08:29 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2011-03-31 11:08:29 -0400
commit53c37bbcc07707f88312efca136aa239f25d775c (patch)
tree17f0adc8f3743f515d5829a37960d3eebc26b7a3
parentc089cad00c670bf41edecf0aed688fe8d2f43125 (diff)
parent0f6a8e02773f8c23b5b6a3dbfa044e50c9d7d811 (diff)
Merge remote branch 'origin/wip-fmlp-dequeue' into wip-k-fmlp
Conflicts: include/litmus/litmus.h
-rw-r--r--include/litmus/litmus.h4
-rw-r--r--litmus/locking.c19
-rw-r--r--litmus/sched_gsn_edf.c59
-rw-r--r--litmus/sched_psn_edf.c6
4 files changed, 28 insertions, 60 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index befdf6381693..3df242bf272f 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -26,8 +26,8 @@ static inline int in_list(struct list_head* list)
26 ); 26 );
27} 27}
28 28
29struct task_struct* waitqueue_first(wait_queue_head_t *wq); 29
30struct task_struct* waitqueue_first_and_remove(wait_queue_head_t *wq); 30struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
31 31
32#define NO_CPU 0xffffffff 32#define NO_CPU 0xffffffff
33 33
diff --git a/litmus/locking.c b/litmus/locking.c
index e9e682adc2c9..cfce98e7480d 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -107,27 +107,14 @@ asmlinkage long sys_litmus_unlock(int lock_od)
107 return err; 107 return err;
108} 108}
109 109
110struct task_struct* waitqueue_first(wait_queue_head_t *wq) 110struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
111{ 111{
112 wait_queue_t *q; 112 wait_queue_t* q;
113 struct task_struct* t = NULL;
113 114
114 if (waitqueue_active(wq)) { 115 if (waitqueue_active(wq)) {
115 q = list_entry(wq->task_list.next, 116 q = list_entry(wq->task_list.next,
116 wait_queue_t, task_list); 117 wait_queue_t, task_list);
117 return (struct task_struct*) q->private;
118 } else
119 return NULL;
120}
121
122struct task_struct* waitqueue_first_and_remove(wait_queue_head_t *wq)
123{
124 wait_queue_t *q;
125 struct task_struct* t = NULL;
126
127 if(waitqueue_active(wq))
128 {
129 q = list_entry(wq->task_list.next,
130 wait_queue_t, task_list);
131 t = (struct task_struct*) q->private; 118 t = (struct task_struct*) q->private;
132 __remove_wait_queue(wq, q); 119 __remove_wait_queue(wq, q);
133 } 120 }
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index ec641a094a08..bf8d989f1c4a 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -797,8 +797,6 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
797 * ->owner. We can thus check it without acquiring the spin 797 * ->owner. We can thus check it without acquiring the spin
798 * lock. */ 798 * lock. */
799 BUG_ON(sem->owner != t); 799 BUG_ON(sem->owner != t);
800
801 remove_wait_queue(&sem->wait, &wait);
802 } else { 800 } else {
803 /* it's ours now */ 801 /* it's ours now */
804 sem->owner = t; 802 sem->owner = t;
@@ -824,7 +822,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
824 } 822 }
825 823
826 /* check if there are jobs waiting for this resource */ 824 /* check if there are jobs waiting for this resource */
827 next = waitqueue_first(&sem->wait); 825 next = __waitqueue_remove_first(&sem->wait);
828 if (next) { 826 if (next) {
829 /* next becomes the resouce holder */ 827 /* next becomes the resouce holder */
830 sem->owner = next; 828 sem->owner = next;
@@ -1040,6 +1038,7 @@ struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1040 1038
1041 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); 1039 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
1042 1040
1041 /*
1043 if(my_queue->hp_waiter) 1042 if(my_queue->hp_waiter)
1044 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", 1043 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
1045 kfmlp_get_idx(sem, my_queue), 1044 kfmlp_get_idx(sem, my_queue),
@@ -1048,9 +1047,11 @@ struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1048 else 1047 else
1049 TRACE_CUR("queue %d: new hp_waiter is %p\n", 1048 TRACE_CUR("queue %d: new hp_waiter is %p\n",
1050 kfmlp_get_idx(sem, my_queue), NULL); 1049 kfmlp_get_idx(sem, my_queue), NULL);
1050 */
1051 1051
1052 raw_spin_lock(&gsnedf_lock); 1052 raw_spin_lock(&gsnedf_lock);
1053 1053
1054 /*
1054 if(my_queue->owner) 1055 if(my_queue->owner)
1055 TRACE_CUR("queue %d: owner is %s/%d\n", 1056 TRACE_CUR("queue %d: owner is %s/%d\n",
1056 kfmlp_get_idx(sem, my_queue), 1057 kfmlp_get_idx(sem, my_queue),
@@ -1060,19 +1061,13 @@ struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1060 TRACE_CUR("queue %d: owner is %p\n", 1061 TRACE_CUR("queue %d: owner is %p\n",
1061 kfmlp_get_idx(sem, my_queue), 1062 kfmlp_get_idx(sem, my_queue),
1062 NULL); 1063 NULL);
1064 */
1063 1065
1064 if(tsk_rt(my_queue->owner)->inh_task == max_hp) 1066 if(tsk_rt(my_queue->owner)->inh_task == max_hp)
1065 { 1067 {
1066 TRACE_CUR("queue %d: CRAZY: clearing the inheritance of %s/%d\n",
1067 kfmlp_get_idx(sem, my_queue), my_queue->owner->comm, my_queue->owner->pid);
1068
1069 __clear_priority_inheritance(my_queue->owner); 1068 __clear_priority_inheritance(my_queue->owner);
1070 if(my_queue->hp_waiter != NULL) 1069 if(my_queue->hp_waiter != NULL)
1071 { 1070 {
1072 TRACE_CUR("queue %d: CRAZY: setting the inheritance of %s/%d to %s/%d\n",
1073 kfmlp_get_idx(sem, my_queue),
1074 my_queue->owner->comm, my_queue->owner->pid,
1075 my_queue->hp_waiter->comm, my_queue->hp_waiter->pid);
1076 __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); 1071 __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1077 } 1072 }
1078 } 1073 }
@@ -1085,9 +1080,10 @@ struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1085 /* Compare task prios, find high prio task. */ 1080 /* Compare task prios, find high prio task. */
1086 if (queued == max_hp) 1081 if (queued == max_hp)
1087 { 1082 {
1083 /*
1088 TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", 1084 TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n",
1089 kfmlp_get_idx(sem, my_queue)); 1085 kfmlp_get_idx(sem, my_queue));
1090 1086 */
1091 __remove_wait_queue(&my_queue->wait, 1087 __remove_wait_queue(&my_queue->wait,
1092 list_entry(pos, wait_queue_t, task_list)); 1088 list_entry(pos, wait_queue_t, task_list));
1093 break; 1089 break;
@@ -1144,42 +1140,30 @@ int gsnedf_kfmlp_lock(struct litmus_lock* l)
1144 1140
1145 /* We depend on the FIFO order. Thus, we don't need to recheck 1141 /* We depend on the FIFO order. Thus, we don't need to recheck
1146 * when we wake up; we are guaranteed to have the lock since 1142 * when we wake up; we are guaranteed to have the lock since
1147 * there is only one wake up per release. 1143 * there is only one wake up per release (or steal).
1148 */ 1144 */
1149
1150 schedule(); 1145 schedule();
1151 1146
1152 spin_lock_irqsave(&sem->lock, flags); 1147
1153 if(my_queue->owner == t) 1148 if(my_queue->owner == t)
1154 { 1149 {
1155 TRACE_CUR("queue %d: acquired through waiting\n", 1150 TRACE_CUR("queue %d: acquired through waiting\n",
1156 kfmlp_get_idx(sem, my_queue)); 1151 kfmlp_get_idx(sem, my_queue));
1157 //__remove_wait_queue(&my_queue->wait, &wait);
1158 } 1152 }
1159 else 1153 else
1160 { 1154 {
1161 /* this case may happen if our wait entry was stolen 1155 /* this case may happen if our wait entry was stolen
1162 between queues. */ 1156 between queues. */
1163 1157 BUG_ON(!kfmlp_get_queue(sem, t));
1164 struct kfmlp_queue* my_new_queue;
1165
1166 my_new_queue = kfmlp_get_queue(sem, t);
1167 BUG_ON(!my_new_queue);
1168 TRACE_CUR("queue %d: acquired through stealing\n", 1158 TRACE_CUR("queue %d: acquired through stealing\n",
1169 kfmlp_get_idx(sem, my_new_queue)); 1159 kfmlp_get_idx(sem, kfmlp_get_queue(sem, t)));
1170 } 1160 }
1171 spin_unlock_irqrestore(&sem->lock, flags);
1172 } 1161 }
1173 else 1162 else
1174 { 1163 {
1175 TRACE_CUR("queue %d: acquired immediatly\n", 1164 TRACE_CUR("queue %d: acquired immediately\n",
1176 kfmlp_get_idx(sem, my_queue)); 1165 kfmlp_get_idx(sem, my_queue));
1177 1166
1178 /* it's ours now */
1179 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - immediate\n",
1180 kfmlp_get_idx(sem, my_queue),
1181 t->comm, t->pid);
1182
1183 my_queue->owner = t; 1167 my_queue->owner = t;
1184 1168
1185 ++(my_queue->count); 1169 ++(my_queue->count);
@@ -1209,14 +1193,13 @@ int gsnedf_kfmlp_unlock(struct litmus_lock* l)
1209 } 1193 }
1210 1194
1211 /* check if there are jobs waiting for this resource */ 1195 /* check if there are jobs waiting for this resource */
1212 //next = waitqueue_first(&my_queue->wait); 1196 next = __waitqueue_remove_first(&my_queue->wait);
1213 next = waitqueue_first_and_remove(&my_queue->wait);
1214 if (next) { 1197 if (next) {
1215 1198 /*
1216 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", 1199 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
1217 kfmlp_get_idx(sem, my_queue), 1200 kfmlp_get_idx(sem, my_queue),
1218 next->comm, next->pid); 1201 next->comm, next->pid);
1219 1202 */
1220 /* next becomes the resouce holder */ 1203 /* next becomes the resouce holder */
1221 my_queue->owner = next; 1204 my_queue->owner = next;
1222 1205
@@ -1257,16 +1240,18 @@ int gsnedf_kfmlp_unlock(struct litmus_lock* l)
1257 1240
1258 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ 1241 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
1259 1242
1243 /*
1260 if(next) 1244 if(next)
1261 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", 1245 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
1262 kfmlp_get_idx(sem, my_queue), 1246 kfmlp_get_idx(sem, my_queue),
1263 next->comm, next->pid); 1247 next->comm, next->pid);
1248 */
1264 1249
1265 my_queue->owner = next; 1250 my_queue->owner = next;
1266 1251
1267 if(next) 1252 if(next)
1268 { 1253 {
1269 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", 1254 TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
1270 kfmlp_get_idx(sem, my_queue), 1255 kfmlp_get_idx(sem, my_queue),
1271 next->comm, next->pid); 1256 next->comm, next->pid);
1272 1257
@@ -1275,7 +1260,7 @@ int gsnedf_kfmlp_unlock(struct litmus_lock* l)
1275 } 1260 }
1276 else 1261 else
1277 { 1262 {
1278 TRACE_CUR("queue %d: lock ownership passed to %p\n", kfmlp_get_idx(sem, my_queue), NULL); 1263 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
1279 1264
1280 --(my_queue->count); 1265 --(my_queue->count);
1281 if(my_queue->count < sem->shortest_queue->count) 1266 if(my_queue->count < sem->shortest_queue->count)
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index abb06fa53e3a..71c02409efa2 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -442,10 +442,6 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
442 * ->owner. We can thus check it without acquiring the spin 442 * ->owner. We can thus check it without acquiring the spin
443 * lock. */ 443 * lock. */
444 BUG_ON(sem->owner != t); 444 BUG_ON(sem->owner != t);
445
446 /* FIXME: could we punt the dequeuing to the previous job,
447 * which is holding the spinlock anyway? */
448 remove_wait_queue(&sem->wait, &wait);
449 } else { 445 } else {
450 /* it's ours now */ 446 /* it's ours now */
451 sem->owner = t; 447 sem->owner = t;
@@ -478,7 +474,7 @@ int psnedf_fmlp_unlock(struct litmus_lock* l)
478 unboost_priority(t); 474 unboost_priority(t);
479 475
480 /* check if there are jobs waiting for this resource */ 476 /* check if there are jobs waiting for this resource */
481 next = waitqueue_first(&sem->wait); 477 next = __waitqueue_remove_first(&sem->wait);
482 if (next) { 478 if (next) {
483 /* boost next job */ 479 /* boost next job */
484 boost_priority(next); 480 boost_priority(next);