aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-11 19:24:10 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-11 19:24:10 -0500
commit40d12009bd0c3515c5bfee5425bd80f58cdd7b73 (patch)
tree08205dc1246c0283c213dbbb0714fe3baae4c397
parent71d7f3404ed6d11497ead7d6e41a49188e094f74 (diff)
PRIOQ_MUTEX dgl progress
-rw-r--r--include/litmus/locking.h19
-rw-r--r--include/litmus/rt_domain.h8
-rw-r--r--litmus/fifo_lock.c9
-rw-r--r--litmus/ikglp_lock.c3
-rw-r--r--litmus/kfmlp_lock.c2
-rw-r--r--litmus/locking.c46
-rw-r--r--litmus/prioq_lock.c477
-rw-r--r--litmus/sched_cedf.c4
8 files changed, 348 insertions, 220 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index cbc99ee54020..b1024e397f67 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -74,9 +74,24 @@ int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key);
74struct task_struct* __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait); 74struct task_struct* __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait);
75 75
76int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait); 76int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait);
77#endif
78 77
79 78
79static inline struct task_struct* get_queued_task_and_dgl_wait(wait_queue_t* q, dgl_wait_state_t** dgl_wait_ptr)
80{
81 struct task_struct *queued;
82
83 if(q->func == dgl_wake_up) {
84 *dgl_wait_ptr = (dgl_wait_state_t*) q->private;
85 queued = (*dgl_wait_ptr)->task;
86 }
87 else {
88 *dgl_wait_ptr = NULL;
89 queued = (struct task_struct*) q->private;
90 }
91
92 return queued;
93}
94#endif
80 95
81 96
82static inline struct task_struct* get_queued_task(wait_queue_t* q) 97static inline struct task_struct* get_queued_task(wait_queue_t* q)
@@ -206,7 +221,7 @@ struct litmus_lock_ops {
206 221
207 222
208void suspend_for_lock(void); 223void suspend_for_lock(void);
209 224int wake_up_for_lock(struct task_struct* t);
210 225
211#endif 226#endif
212 227
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index 89a8850ecf9d..6d3c4672dca9 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -88,12 +88,12 @@ static inline struct task_struct* __take_ready(rt_domain_t* rt)
88 if (hn) { 88 if (hn) {
89 struct task_struct* taken = bheap2task(hn); 89 struct task_struct* taken = bheap2task(hn);
90 TRACE("rt: taking %s/%d [inh_task: %s/%d]\n", 90 TRACE("rt: taking %s/%d [inh_task: %s/%d]\n",
91 (taken) ? taken->comm : "nil", 91 (taken) ? taken->comm : "null",
92 (taken) ? taken->pid : -1, 92 (taken) ? taken->pid : 0,
93 (taken && tsk_rt(taken)->inh_task) ? 93 (taken && tsk_rt(taken)->inh_task) ?
94 tsk_rt(taken)->inh_task->comm : "nil", 94 tsk_rt(taken)->inh_task->comm : "null",
95 (taken && tsk_rt(taken)->inh_task) ? 95 (taken && tsk_rt(taken)->inh_task) ?
96 tsk_rt(taken)->inh_task->pid : -1); 96 tsk_rt(taken)->inh_task->pid : 0);
97 97
98 return taken; 98 return taken;
99 } 99 }
diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c
index cc3e1ab5a965..dfe56bface6d 100644
--- a/litmus/fifo_lock.c
+++ b/litmus/fifo_lock.c
@@ -486,14 +486,7 @@ int fifo_mutex_unlock(struct litmus_lock* l)
486 tsk_rt(next)->blocked_lock = NULL; 486 tsk_rt(next)->blocked_lock = NULL;
487 mb(); 487 mb();
488 488
489#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 489 wake_up_for_lock(next);
490 // re-enable tracking
491 if(tsk_rt(next)->held_gpus) {
492 tsk_rt(next)->suspend_gpu_tracker_on_block = 0;
493 }
494#endif
495
496 wake_up_process(next);
497 } 490 }
498 else { 491 else {
499 TRACE_TASK(next, "is still blocked.\n"); 492 TRACE_TASK(next, "is still blocked.\n");
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index 59d7ee4f9fd1..3d79e41b42df 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -1598,9 +1598,8 @@ wake_kludge:
1598 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 1598 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
1599 } 1599 }
1600 1600
1601
1602 // wake up the new resource holder! 1601 // wake up the new resource holder!
1603 wake_up_process(next); 1602 wake_up_for_lock(next);
1604 } 1603 }
1605 if(fq_of_new_on_fq && fq_of_new_on_fq != fq && fq_of_new_on_fq->count == 1) { 1604 if(fq_of_new_on_fq && fq_of_new_on_fq != fq && fq_of_new_on_fq->count == 1) {
1606 // The guy we promoted when to an empty FQ. (Why didn't stealing pick this up?) 1605 // The guy we promoted when to an empty FQ. (Why didn't stealing pick this up?)
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c
index 377e5a8f7456..041561839976 100644
--- a/litmus/kfmlp_lock.c
+++ b/litmus/kfmlp_lock.c
@@ -391,7 +391,7 @@ RETRY:
391 } 391 }
392 392
393 /* wake up next */ 393 /* wake up next */
394 wake_up_process(next); 394 wake_up_for_lock(next);
395 } 395 }
396 else { 396 else {
397 // TODO: put this stealing logic before we attempt to release 397 // TODO: put this stealing logic before we attempt to release
diff --git a/litmus/locking.c b/litmus/locking.c
index 0b5e162c0c02..4b8382cd77d1 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -365,6 +365,18 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[],
365#endif 365#endif
366 366
367 367
368static int failed_owner(struct litmus_lock *cur_lock, struct task_struct *t)
369{
370 struct task_struct *cur_owner = cur_lock->ops->get_owner(cur_lock);
371 printk(KERN_EMERG "lock %d expected owner %s/%d but got %s/%d.\n",
372 cur_lock->ident,
373 (t) ? t->comm : "null",
374 (t) ? t->pid : 0,
375 (cur_owner) ? cur_owner->comm : "null",
376 (cur_owner) ? cur_owner->pid : 0);
377 BUG();
378}
379
368/* only valid when locks are prioq locks!!! 380/* only valid when locks are prioq locks!!!
369 * THE BIG DGL LOCK MUST BE HELD! */ 381 * THE BIG DGL LOCK MUST BE HELD! */
370int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) 382int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait)
@@ -386,7 +398,9 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t
386 398
387 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); 399 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]);
388 400
389 BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); 401 if(!(l->ops->is_owner(l, dgl_wait->task)))
402 failed_owner(l, dgl_wait->task);
403 //BUG_ON(!(l->ops->is_owner(l, dgl_wait->task)));
390 } 404 }
391 405
392 return 0; /* success */ 406 return 0; /* success */
@@ -564,7 +578,9 @@ all_acquired:
564 // SANITY CHECK FOR TESTING 578 // SANITY CHECK FOR TESTING
565 for(i = 0; i < dgl_wait->size; ++i) { 579 for(i = 0; i < dgl_wait->size; ++i) {
566 struct litmus_lock *l = dgl_wait->locks[i]; 580 struct litmus_lock *l = dgl_wait->locks[i];
567 BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); 581 if(!(l->ops->is_owner(l, dgl_wait->task)))
582 failed_owner(l, dgl_wait->task);
583 //BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
568 } 584 }
569 585
570 TRACE_CUR("Acquired entire DGL\n"); 586 TRACE_CUR("Acquired entire DGL\n");
@@ -830,8 +846,25 @@ void suspend_for_lock(void)
830 } 846 }
831#endif 847#endif
832 848
849#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
850 // disable tracking
851 if(tsk_rt(t)->held_gpus) {
852 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
853 }
854#endif
855
833 schedule(); 856 schedule();
834 857
858
859 /* TODO: Move the following to wake_up_for_lock()? */
860
861#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
862 // re-enable tracking
863 if(tsk_rt(t)->held_gpus) {
864 tsk_rt(t)->suspend_gpu_tracker_on_block = 0;
865 }
866#endif
867
835#ifdef CONFIG_LITMUS_NVIDIA 868#ifdef CONFIG_LITMUS_NVIDIA
836 if (gpu_restore) { 869 if (gpu_restore) {
837 /* restore our state */ 870 /* restore our state */
@@ -847,6 +880,15 @@ void suspend_for_lock(void)
847#endif 880#endif
848} 881}
849 882
883int wake_up_for_lock(struct task_struct* t)
884{
885 int ret;
886
887 ret = wake_up_process(t);
888
889 return ret;
890}
891
850 892
851#else // CONFIG_LITMUS_LOCKING 893#else // CONFIG_LITMUS_LOCKING
852 894
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c
index ff6419ba1a13..faf8c15df542 100644
--- a/litmus/prioq_lock.c
+++ b/litmus/prioq_lock.c
@@ -128,6 +128,7 @@ static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_
128 __add_wait_queue_sorted(q, wait); 128 __add_wait_queue_sorted(q, wait);
129} 129}
130 130
131
131static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *t) 132static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *t)
132{ 133{
133 wait_queue_t *q; 134 wait_queue_t *q;
@@ -148,188 +149,310 @@ static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *
148 BUG(); 149 BUG();
149} 150}
150 151
151 152#ifndef CONFIG_LITMUS_DGL_SUPPORT
152static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) 153static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t)
153{ 154{
155 /* TODO: Make this efficient instead of remove/add */
156 __prioq_increase_pos(mutex, t);
157}
158#endif
159
160
161/* caller is responsible for locking */
162static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex,
163 struct task_struct* skip)
164{
154 wait_queue_t *q; 165 wait_queue_t *q;
155 struct list_head *pos; 166 struct list_head *pos;
156 struct task_struct *queued; 167 struct task_struct *queued = NULL, *found = NULL;
157 168
158 /* TODO: Make this efficient instead of remove/add */ 169 /* list in sorted order. higher-prio tasks likely at the front. */
159 list_for_each(pos, &mutex->wait.task_list) { 170 list_for_each(pos, &mutex->wait.task_list) {
160 q = list_entry(pos, wait_queue_t, task_list); 171 q = list_entry(pos, wait_queue_t, task_list);
161 queued = get_queued_task(q); 172 queued = get_queued_task(q);
162 if (queued == t) { 173
163 __remove_wait_queue(&mutex->wait, q); 174 /* Compare task prios, find high prio task. */
164 __add_wait_queue_sorted(&mutex->wait, q); 175 if (queued &&
165 return; 176 (queued != skip) &&
177 (tsk_rt(queued)->blocked_lock == &mutex->litmus_lock) &&
178 litmus->compare(queued, found)) {
179 found = queued;
166 } 180 }
167 } 181 }
168 182 return found;
169 BUG();
170} 183}
171 184
185
186
172#ifdef CONFIG_LITMUS_DGL_SUPPORT 187#ifdef CONFIG_LITMUS_DGL_SUPPORT
173static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struct *t)
174{
175// TODO:
176// (1) Increase position for 't' in all of it's DGLs.
177// (2) Check to see if 't' can take the DGLs atomically
178// (3) If it can take the DGLs, do so.
179// (4) Cleanup?
180// (5) Wake up 't'
181 188
189static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struct task_struct *t, dgl_wait_state_t *dgl_wait)
190{
191 /* Any task that acquires a PRIOQ mutex via inheritance does not inheritance
192 * does not inherit priority from the hp_waiter, by defintion of the
193 * priority queue. */
194
195 struct litmus_lock *l;
196 BUG_ON(mutex->owner != NULL);
197 BUG_ON(list_empty(&mutex->wait.task_list));
198
199 l = &mutex->litmus_lock;
200
201 if (dgl_wait) {
202 BUG_ON(t != dgl_wait->task);
203
204 /* we're a part of a DGL */
205 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
206 TRACE_CUR("%s/%d cannot take entire DGL via inheritance.\n",
207 t->comm, t->pid);
208 /* it can't take the lock. nullify 't'. */
209 t = NULL;
210 }
211 else {
212 TRACE_CUR("%s/%d can take its entire DGL atomically via inheritance!\n",
213 t->comm, t->pid);
214 /* __attempt_atomic_dgl_acquire() already cleaned up the state of acquired locks */
215 }
216 }
217 else {
218 /* we're a regular singular request. we can always take the lock if
219 * there is no mutex owner. */
220 wait_queue_t *first;
221
222 TRACE_CUR("%s/%d can take it's singular lock via inheritance!\n",
223 t->comm, t->pid);
224
225 first = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
226
227 BUG_ON(get_queued_task(first) != t);
228
229 __remove_wait_queue(&mutex->wait, first); /* remove the blocked task */
230
231 /* update/cleanup the state of the lock */
232
233 mutex->owner = t; /* take ownership!!! */
234
235 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t);
236 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
237 effective_priority(mutex->hp_waiter) : NULL;
238
239 if (mutex->hp_waiter)
240 TRACE_CUR("%s/%d is new highest-prio waiter\n",
241 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
242 else
243 TRACE_CUR("no further waiters\n");
244
245 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
246
247 binheap_add(&l->nest.hp_binheap_node,
248 &tsk_rt(t)->hp_blocked_tasks,
249 struct nested_info, hp_binheap_node);
250
251 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
252 }
253
254 if (t) {
255 BUG_ON(mutex->owner != t);
256
257 TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", t->comm, t->pid);
258
259 tsk_rt(t)->blocked_lock = NULL;
260 mb();
261
262 wake_up_for_lock(t);
263 }
264
265 return (t != NULL);
266}
182 267
183 wait_queue_t *q; 268static inline struct task_struct* get_head_task(struct prioq_mutex *mutex)
184 struct list_head *pos; 269{
185 struct task_struct *queued; 270 wait_queue_t *q_node = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
186 int i; 271 return get_queued_task(q_node);
187 int ret = 0; 272}
188 273
274static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struct *t)
275{
276// (1) Increase position for 't' for all locks it is waiting.
277// (2) Check to see if 't' can take the lock, DGL or singular lock.
278// (3) If it can, do so and wake up 't'.
279
280 struct list_head *pos;
281 struct task_struct *new_head;
282 struct task_struct *cur_head = NULL;
283 dgl_wait_state_t *dgl_wait = NULL;
284 int woke_up = 0;
285 int found = 0;
286
287
288 BUG_ON(list_empty(&mutex->wait.task_list));
289
290 /* note the task at the head of the queue */
291 if(mutex->owner == NULL) {
292 cur_head = get_head_task(mutex);
293 }
294
189 list_for_each(pos, &mutex->wait.task_list) { 295 list_for_each(pos, &mutex->wait.task_list) {
190 q = list_entry(pos, wait_queue_t, task_list); 296 dgl_wait_state_t *temp_dgl_state;
191 if(q->func == dgl_wake_up) { 297 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list);
192 // we're looking at a dgl request 298 struct task_struct *queued = get_queued_task_and_dgl_wait(q, &temp_dgl_state);
193 dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private; 299
194 queued = dgl_wait->task; 300 if (queued == t) {
195 301
196 if (queued == t) // is it the one we're looking for? 302 TRACE_CUR("found %s/%d in prioq of lock %d\n",
197 { 303 t->comm, t->pid,
304 mutex->litmus_lock.ident);
305
306 if(temp_dgl_state) { /* it's a DGL request */
307 int i;
308 dgl_wait = temp_dgl_state;
309
310 TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n",
311 t->comm, t->pid, dgl_wait->size);
312
198 // reposition on the other mutexes 313 // reposition on the other mutexes
199 for(i = 0; i < dgl_wait->size; ++i) { 314 for(i = 0; i < dgl_wait->size; ++i) {
315 // assume they're all PRIOQ_MUTEX
200 struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i]; 316 struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i];
201 if (pm != mutex) 317 if (pm != mutex)
202 __prioq_increase_pos(pm, t); 318 __prioq_increase_pos(pm, t);
203 } 319 }
204 // reposition on this mutex
205 __remove_wait_queue(&mutex->wait, q);
206 __add_wait_queue_sorted(&mutex->wait, q);
207
208
209 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
210 /* it can't take the lock. do nothing. */
211 }
212 else {
213 TRACE_CUR("%s/%d can take its entire DGL atomically via inheritance!\n",
214 dgl_wait->task->comm, dgl_wait->task->pid);
215
216 /* we took the lock! we've already been removed from mutex->wait.task_list */
217
218 TRACE_TASK(t, "waking up since it is no longer blocked.\n");
219
220 tsk_rt(t)->blocked_lock = NULL;
221 mb();
222
223#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
224 // re-enable tracking
225 if(tsk_rt(t)->held_gpus) {
226 tsk_rt(t)->suspend_gpu_tracker_on_block = 0;
227 }
228#endif
229 wake_up_process(t);
230 ret = 1;
231 }
232 break;
233 } 320 }
321
322 // reposition on this mutex
323 __remove_wait_queue(&mutex->wait, q);
324 __add_wait_queue_sorted(&mutex->wait, q);
325 found = 1;
326 break;
234 } 327 }
235 else { 328 }
236 // not dgl request. 329
237 queued = (struct task_struct*) q->private; 330 BUG_ON(!found);
238 if (queued == t) { // is this the one we're looking for? 331
239 // if so, reposition it. 332 if (mutex->owner == NULL) {
240 __remove_wait_queue(&mutex->wait, q); 333 /* who is the new head? */
241 __add_wait_queue_sorted(&mutex->wait, q); 334 new_head = get_head_task(mutex);
335
336 /* is the prioq mutex idle? */
337 if(cur_head != new_head) {
338 /* the new head might be able to take the lock */
339
340 BUG_ON(new_head != t); /* the new head must be this task since our prio increased */
341
342 TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n",
343 mutex->litmus_lock.ident,
344 cur_head->comm, cur_head->pid,
345 new_head->comm, new_head->pid);
346
347 woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, t, dgl_wait);
348 }
349 }
350
351 return woke_up;
352}
353
354static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex, struct task_struct *t, wait_queue_t *q)
355{
356 struct list_head *pos;
357 struct task_struct *new_head;
358 struct task_struct *cur_head = NULL;
359 int woke_up = 0;
360 int found = 1;
361
362 BUG_ON(list_empty(&mutex->wait.task_list));
363
364 /* find the position of t in mutex's wait q if it's not provided */
365 if (q == NULL) {
366 found = 0;
367 list_for_each(pos, &mutex->wait.task_list) {
368 q = list_entry(pos, wait_queue_t, task_list);
369 if (t == get_queued_task(q)) {
370 found = 1;
242 break; 371 break;
243 } 372 }
244 } 373 }
245 } 374 }
246 375
247 return ret; 376 BUG_ON(!q);
377 BUG_ON(!found);
378
379 if(mutex->owner == NULL) {
380 cur_head = get_head_task(mutex);
381 }
382
383 // update the position
384 __remove_wait_queue(&mutex->wait, q);
385 __add_wait_queue_sorted(&mutex->wait, q);
386
387 if(mutex->owner == NULL) {
388 // get a reference to dgl_wait of the new head is a DGL request
389 dgl_wait_state_t *dgl_wait;
390 q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
391 new_head = get_queued_task_and_dgl_wait(q, &dgl_wait);
392
393 /* is the prioq mutex idle and did the head change? */
394 if(cur_head != new_head) {
395 /* the new head might be able to take the lock */
396 TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n",
397 mutex->litmus_lock.ident,
398 cur_head->comm, cur_head->pid,
399 new_head->comm, new_head->pid);
400
401 woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, new_head, dgl_wait);
402 }
403 }
404 return woke_up;
248} 405}
249 406
250static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) 407static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t)
251{ 408{
252 // TODO: 409// (1) Decrease position for 't' for all locks it is waiting.
253 // (1) Increase position for 't' in all of it's DGLs. 410// (2) For every lock upon which 't' was the head AND that lock is idle:
254 // (2) Check to see if 't' can take the DGLs atomically 411// (3) Can the new head take the lock?
255 // (3) If it can take the DGLs, do so. 412// (4) If it can, do so and wake up the new head.
256 // (4) Cleanup? 413
257 // (5) Wake up 't'
258
259
260 wait_queue_t *q;
261 struct list_head *pos; 414 struct list_head *pos;
262 struct task_struct *queued; 415
263 int i; 416 BUG_ON(list_empty(&mutex->wait.task_list));
264 417
265 list_for_each(pos, &mutex->wait.task_list) { 418 list_for_each(pos, &mutex->wait.task_list) {
266 q = list_entry(pos, wait_queue_t, task_list); 419 dgl_wait_state_t *dgl_wait;
267 if(q->func == dgl_wake_up) { 420 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list);
268 // we're looking at a dgl request 421 struct task_struct *queued = get_queued_task_and_dgl_wait(q, &dgl_wait);
269 dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private; 422
270 queued = dgl_wait->task; 423 if (queued == t) {
271 424 TRACE_CUR("found %s/%d in prioq of lock %d\n",
272 if (queued == t) // is it the one we're looking for? 425 t->comm, t->pid,
273 { 426 mutex->litmus_lock.ident);
274 // reposition on the other mutexes 427
428 if (dgl_wait) {
429 // reposition on all mutexes and check for wakeup
430 int i;
431
432 TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n",
433 t->comm, t->pid, dgl_wait->size);
434
275 for(i = 0; i < dgl_wait->size; ++i) { 435 for(i = 0; i < dgl_wait->size; ++i) {
276 struct prioq_mutex *pm = (struct prioq_mutex *)dgl_wait->locks[i]; 436 // assume they're all PRIOQ_MUTEX
437 struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i];
277 if (pm != mutex) 438 if (pm != mutex)
278 __prioq_decrease_pos(pm, t); 439 ___prioq_dgl_decrease_pos_and_check_acquire(pm, t, NULL);
440 else
441 ___prioq_dgl_decrease_pos_and_check_acquire(pm, t, q);
279 } 442 }
280 // reposition on this mutex
281 __remove_wait_queue(&mutex->wait, q);
282 __add_wait_queue_sorted(&mutex->wait, q);
283 return;
284 } 443 }
285 } 444 else {
286 else { 445 ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q);
287 // not dgl request.
288 queued = (struct task_struct*) q->private;
289 if (queued == t) { // is this the one we're looking for?
290 // if so, reposition it.
291 __remove_wait_queue(&mutex->wait, q);
292 __add_wait_queue_sorted(&mutex->wait, q);
293 return;
294 } 446 }
447 return;
295 } 448 }
296 } 449 }
297 450
298 BUG(); 451 BUG();
299} 452}
300#endif
301 453
302 454
303 455
304
305
306/* caller is responsible for locking */
307static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex,
308 struct task_struct* skip)
309{
310 wait_queue_t *q;
311 struct list_head *pos;
312 struct task_struct *queued = NULL, *found = NULL;
313
314 /* list in sorted order. higher-prio tasks likely at the front. */
315 list_for_each(pos, &mutex->wait.task_list) {
316 q = list_entry(pos, wait_queue_t, task_list);
317 queued = get_queued_task(q);
318
319 /* Compare task prios, find high prio task. */
320 if (queued &&
321 (queued != skip) &&
322 (tsk_rt(queued)->blocked_lock == &mutex->litmus_lock) &&
323 litmus->compare(queued, found)) {
324 found = queued;
325 }
326 }
327 return found;
328}
329
330
331#ifdef CONFIG_LITMUS_DGL_SUPPORT
332
333int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) 456int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t)
334{ 457{
335 struct prioq_mutex *mutex = prioq_mutex_from_lock(l); 458 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
@@ -474,10 +597,12 @@ static void select_next_lock_if_primary(struct litmus_lock *l,
474 effective_priority(mutex->hp_waiter) : 597 effective_priority(mutex->hp_waiter) :
475 NULL; 598 NULL;
476 599
600
477 if (mutex->hp_waiter) 601 if (mutex->hp_waiter)
478 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); 602 TRACE_CUR("%s/%d is new highest-prio waiter\n",
603 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
479 else 604 else
480 TRACE("no further waiters\n"); 605 TRACE_CUR("no further waiters\n");
481 } 606 }
482 607
483 select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on 608 select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on
@@ -487,29 +612,6 @@ static void select_next_lock_if_primary(struct litmus_lock *l,
487 l->ident, t->comm, t->pid); 612 l->ident, t->comm, t->pid);
488 } 613 }
489} 614}
490#endif
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512#ifdef CONFIG_LITMUS_DGL_SUPPORT
513 615
514int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t) 616int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t)
515{ 617{
@@ -569,28 +671,6 @@ void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_l
569 671
570 672
571 673
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594int prioq_mutex_lock(struct litmus_lock* l) 674int prioq_mutex_lock(struct litmus_lock* l)
595{ 675{
596 struct task_struct *t = current; 676 struct task_struct *t = current;
@@ -819,12 +899,12 @@ int prioq_mutex_unlock(struct litmus_lock* l)
819 * DGL waiter can't acquire all locks at once. 899 * DGL waiter can't acquire all locks at once.
820 */ 900 */
821 wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); 901 wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
822 if(q->func == dgl_wake_up) { 902 get_queued_task_and_dgl_wait(q, &dgl_wait);
823 dgl_wait = (dgl_wait_state_t*) q->private; 903
824 904 if (dgl_wait) {
825 TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n", 905 TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n",
826 dgl_wait->task->comm, dgl_wait->task->pid); 906 dgl_wait->task->comm, dgl_wait->task->pid);
827 907
828 if(__attempt_atomic_dgl_acquire(l, dgl_wait)) { 908 if(__attempt_atomic_dgl_acquire(l, dgl_wait)) {
829 /* failed. can't take this lock yet. we remain at head of prioq 909 /* failed. can't take this lock yet. we remain at head of prioq
830 * allow hp requests in the future to go ahead of us. */ 910 * allow hp requests in the future to go ahead of us. */
@@ -839,6 +919,8 @@ int prioq_mutex_unlock(struct litmus_lock* l)
839 919
840 /* remove the first */ 920 /* remove the first */
841 next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait); 921 next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait);
922
923 BUG_ON(dgl_wait && (next != dgl_wait->task));
842 } 924 }
843#else 925#else
844 /* check if there are jobs waiting for this resource */ 926 /* check if there are jobs waiting for this resource */
@@ -852,7 +934,8 @@ int prioq_mutex_unlock(struct litmus_lock* l)
852 /* determine new hp_waiter if necessary */ 934 /* determine new hp_waiter if necessary */
853 if (next == mutex->hp_waiter) { 935 if (next == mutex->hp_waiter) {
854 936
855 TRACE_TASK(next, "was highest-prio waiter\n"); 937 TRACE_CUR("%s/%d was highest-prio waiter\n", next->comm, next->pid);
938
856 /* next has the highest priority --- it doesn't need to 939 /* next has the highest priority --- it doesn't need to
857 * inherit. However, we need to make sure that the 940 * inherit. However, we need to make sure that the
858 * next-highest priority in the queue is reflected in 941 * next-highest priority in the queue is reflected in
@@ -862,10 +945,13 @@ int prioq_mutex_unlock(struct litmus_lock* l)
862 effective_priority(mutex->hp_waiter) : 945 effective_priority(mutex->hp_waiter) :
863 NULL; 946 NULL;
864 947
948
865 if (mutex->hp_waiter) 949 if (mutex->hp_waiter)
866 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); 950 TRACE_CUR("%s/%d is new highest-prio waiter\n",
951 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
867 else 952 else
868 TRACE("no further waiters\n"); 953 TRACE_CUR("no further waiters\n");
954
869 955
870 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); 956 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
871 957
@@ -879,7 +965,7 @@ int prioq_mutex_unlock(struct litmus_lock* l)
879 /* Well, if 'next' is not the highest-priority waiter, 965 /* Well, if 'next' is not the highest-priority waiter,
880 * then it (probably) ought to inherit the highest-priority 966 * then it (probably) ought to inherit the highest-priority
881 * waiter's priority. */ 967 * waiter's priority. */
882 TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); 968 TRACE_CUR("%s/%d is not hp_waiter of lock %d.\n", next->comm, next->pid, l->ident);
883 969
884 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); 970 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
885 971
@@ -920,19 +1006,12 @@ int prioq_mutex_unlock(struct litmus_lock* l)
920#endif 1006#endif
921 } 1007 }
922 1008
923 TRACE_TASK(next, "waking up since it is no longer blocked.\n"); 1009 TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", next->comm, next->pid);
924 1010
925 tsk_rt(next)->blocked_lock = NULL; 1011 tsk_rt(next)->blocked_lock = NULL;
926 mb(); 1012 mb();
927 1013
928#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 1014 wake_up_for_lock(next);
929 // re-enable tracking
930 if(tsk_rt(next)->held_gpus) {
931 tsk_rt(next)->suspend_gpu_tracker_on_block = 0;
932 }
933#endif
934
935 wake_up_process(next);
936 } 1015 }
937 1016
938 unlock_fine_irqrestore(&mutex->lock, flags); 1017 unlock_fine_irqrestore(&mutex->lock, flags);
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 9b6d25f11c93..40daf8e16d74 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1479,13 +1479,13 @@ static void nested_increase_priority_inheritance(struct task_struct* t,
1479 irqflags); 1479 irqflags);
1480 } 1480 }
1481 else { 1481 else {
1482 TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", 1482 TRACE_TASK(t, "Inheritor is blocked on litmus lock (%d) that does not support nesting!\n",
1483 blocked_lock->ident); 1483 blocked_lock->ident);
1484 unlock_fine_irqrestore(to_unlock, irqflags); 1484 unlock_fine_irqrestore(to_unlock, irqflags);
1485 } 1485 }
1486 } 1486 }
1487 else { 1487 else {
1488 TRACE_TASK(t, "is not blocked. No propagation.\n"); 1488 TRACE_TASK(t, "is not blocked on litmus lock. No propagation.\n");
1489 unlock_fine_irqrestore(to_unlock, irqflags); 1489 unlock_fine_irqrestore(to_unlock, irqflags);
1490 } 1490 }
1491} 1491}