aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/locking.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/locking.c')
-rw-r--r--litmus/locking.c181
1 files changed, 160 insertions, 21 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
index 4fe572c28aea..0b5e162c0c02 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -234,8 +234,13 @@ void print_hp_waiters(struct binheap_node* n, int depth)
234 234
235#ifdef CONFIG_LITMUS_DGL_SUPPORT 235#ifdef CONFIG_LITMUS_DGL_SUPPORT
236 236
237struct prioq_mutex;
238
237void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/) 239void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/)
238{ 240{
241 int start = dgl_wait->last_primary;
242 extern void __dump_prioq_lock_info(struct prioq_mutex *mutex);
243
239 /* 244 /*
240 We pick the next lock in reverse order. This causes inheritance propagation 245 We pick the next lock in reverse order. This causes inheritance propagation
241 from locks received earlier to flow in the same direction as regular nested 246 from locks received earlier to flow in the same direction as regular nested
@@ -244,30 +249,56 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo
244 249
245 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); 250 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock);
246 251
247 //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); 252 // note reverse order
253 for(dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1;
254 dgl_wait->last_primary != start;
255 dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1)
256 {
257
258 struct litmus_lock *l = dgl_wait->locks[dgl_wait->last_primary];
259
260 if(!l->ops->is_owner(l, dgl_wait->task) &&
261 l->ops->get_owner(l)) {
262
263 tsk_rt(dgl_wait->task)->blocked_lock =
264 dgl_wait->locks[dgl_wait->last_primary];
265 mb();
266
267 TRACE_TASK(dgl_wait->task, "New blocked lock is %d\n", l->ident);
268
269 l->ops->enable_priority(l, dgl_wait);
248 270
249 if (dgl_wait->last_primary == 0) { 271 return;
250 /* loop around */ 272 }
251 dgl_wait->last_primary = dgl_wait->size;
252 } 273 }
253 274
275 // There was no one to push on. This can happen if the blocked task is
276 // behind a task that is idling a prioq-mutex.
277
254 // note reverse order 278 // note reverse order
255 for(dgl_wait->last_primary = dgl_wait->last_primary - 1; 279 dgl_wait->last_primary = start;
256 dgl_wait->last_primary >= 0; 280 for(dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1;
257 --(dgl_wait->last_primary)){ 281 dgl_wait->last_primary != start;
258 if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner( 282 dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1)
259 dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { 283 {
284
285 struct litmus_lock *l = dgl_wait->locks[dgl_wait->last_primary];
286
287 if(!l->ops->is_owner(l, dgl_wait->task)) {
260 288
261 tsk_rt(dgl_wait->task)->blocked_lock = 289 tsk_rt(dgl_wait->task)->blocked_lock =
262 dgl_wait->locks[dgl_wait->last_primary]; 290 dgl_wait->locks[dgl_wait->last_primary];
263 mb(); 291 mb();
264 292
265 TRACE_CUR("New blocked lock is %d\n", 293 TRACE_TASK(dgl_wait->task, "New blocked lock is %d\n", l->ident);
266 dgl_wait->locks[dgl_wait->last_primary]->ident);
267 294
268 break; 295 l->ops->enable_priority(l, dgl_wait);
296
297 return;
269 } 298 }
270 } 299 }
300
301 BUG();
271} 302}
272 303
273int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) 304int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
@@ -277,14 +308,13 @@ int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
277 return 1; 308 return 1;
278} 309}
279 310
280void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, 311struct task_struct* __waitqueue_dgl_remove_first(wait_queue_head_t *wq,
281 dgl_wait_state_t** dgl_wait, 312 dgl_wait_state_t** dgl_wait)
282 struct task_struct **task)
283{ 313{
284 wait_queue_t *q; 314 wait_queue_t *q;
315 struct task_struct *task = NULL;
285 316
286 *dgl_wait = NULL; 317 *dgl_wait = NULL;
287 *task = NULL;
288 318
289 if (waitqueue_active(wq)) { 319 if (waitqueue_active(wq)) {
290 q = list_entry(wq->task_list.next, 320 q = list_entry(wq->task_list.next,
@@ -292,13 +322,15 @@ void __waitqueue_dgl_remove_first(wait_queue_head_t *wq,
292 322
293 if(q->func == dgl_wake_up) { 323 if(q->func == dgl_wake_up) {
294 *dgl_wait = (dgl_wait_state_t*) q->private; 324 *dgl_wait = (dgl_wait_state_t*) q->private;
325 task = (*dgl_wait)->task;
295 } 326 }
296 else { 327 else {
297 *task = (struct task_struct*) q->private; 328 task = (struct task_struct*) q->private;
298 } 329 }
299 330
300 __remove_wait_queue(wq, q); 331 __remove_wait_queue(wq, q);
301 } 332 }
333 return task;
302} 334}
303 335
304void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait) 336void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait)
@@ -354,7 +386,7 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t
354 386
355 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); 387 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]);
356 388
357 BUG_ON(dgl_wait->task != *(l->nest.owner_ptr)); 389 BUG_ON(!(l->ops->is_owner(l, dgl_wait->task)));
358 } 390 }
359 391
360 return 0; /* success */ 392 return 0; /* success */
@@ -456,15 +488,108 @@ all_acquired:
456 return 0; 488 return 0;
457} 489}
458 490
491
492
493static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
494{
495 int i;
496 unsigned long irqflags; //, dummyflags;
497 raw_spinlock_t *dgl_lock;
498 struct litmus_lock *l;
499
500#ifdef CONFIG_SCHED_DEBUG_TRACE
501 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5];
502 snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size);
503 TRACE_CUR("Atomic locking DGL with size %d: %s\n", dgl_wait->size, dglstr);
504#endif
505
506 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
507
508 BUG_ON(dgl_wait->task != current);
509
510 raw_spin_lock_irqsave(dgl_lock, irqflags);
511
512
513 dgl_wait->nr_remaining = dgl_wait->size;
514
515 for(i = 0; i < dgl_wait->size; ++i) {
516 struct litmus_lock *l = dgl_wait->locks[i];
517 l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); // this should be a forced enqueue if atomic DGLs are needed.
518 }
519
520 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
521 /* Failed to acquire all locks at once.
522 * Pick a lock to push on and suspend. */
523 TRACE_CUR("Could not atomically acquire all locks.\n");
524
525
526#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
527 // KLUDGE: don't count this suspension as time in the critical gpu
528 // critical section
529 if(tsk_rt(dgl_wait->task)->held_gpus) {
530 tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1;
531 }
532#endif
533 // we are not the owner of any lock, so push on the last one in the DGL
534 // by default.
535
536 l = dgl_wait->locks[dgl_wait->size - 1];
537
538 TRACE_CUR("Activating priority inheritance on lock %d\n",
539 l->ident);
540
541 TS_DGL_LOCK_SUSPEND;
542
543 l->ops->enable_priority(l, dgl_wait);
544 dgl_wait->last_primary = dgl_wait->size - 1;
545
546 TRACE_CUR("Suspending for lock %d\n", l->ident);
547
548 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
549
550 suspend_for_lock(); // suspend!!!
551
552 TS_DGL_LOCK_RESUME;
553
554 TRACE_CUR("Woken up from DGL suspension.\n");
555
556 goto all_acquired; // we should hold all locks when we wake up.
557 }
558 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
559
560all_acquired:
561
562 dgl_wait->nr_remaining = 0;
563
564 // SANITY CHECK FOR TESTING
565 for(i = 0; i < dgl_wait->size; ++i) {
566 struct litmus_lock *l = dgl_wait->locks[i];
567 BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
568 }
569
570 TRACE_CUR("Acquired entire DGL\n");
571
572 return 0;
573}
574
575
576
577
459static int supports_dgl(struct litmus_lock *l) 578static int supports_dgl(struct litmus_lock *l)
460{ 579{
461 struct litmus_lock_ops* ops = l->ops; 580 struct litmus_lock_ops* ops = l->ops;
462
463 return (ops->dgl_lock && 581 return (ops->dgl_lock &&
464 ops->is_owner && 582 ops->is_owner &&
583 ops->get_owner &&
465 ops->enable_priority); 584 ops->enable_priority);
466} 585}
467 586
587static int needs_atomic_dgl(struct litmus_lock *l)
588{
589 struct litmus_lock_ops* ops = l->ops;
590 return (ops->dgl_quick_lock != NULL);
591}
592
468asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) 593asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
469{ 594{
470 struct task_struct *t = current; 595 struct task_struct *t = current;
@@ -472,6 +597,8 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
472 int dgl_ods[MAX_DGL_SIZE]; 597 int dgl_ods[MAX_DGL_SIZE];
473 int i; 598 int i;
474 599
600 int num_need_atomic = 0;
601
475 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. 602 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held.
476 603
477 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) 604 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
@@ -503,6 +630,10 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
503 dgl_wait_state.locks[i]->ident); 630 dgl_wait_state.locks[i]->ident);
504 goto out; 631 goto out;
505 } 632 }
633
634 if (needs_atomic_dgl(dgl_wait_state.locks[i])) {
635 ++num_need_atomic;
636 }
506 } 637 }
507 else { 638 else {
508 TRACE_CUR("Invalid lock identifier\n"); 639 TRACE_CUR("Invalid lock identifier\n");
@@ -510,11 +641,19 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
510 } 641 }
511 } 642 }
512 643
644 if (num_need_atomic && num_need_atomic != dgl_size) {
645 TRACE_CUR("All locks in DGL must support atomic acquire if any one does.\n");
646 goto out;
647 }
648
513 dgl_wait_state.task = t; 649 dgl_wait_state.task = t;
514 dgl_wait_state.size = dgl_size; 650 dgl_wait_state.size = dgl_size;
515 651
516 TS_DGL_LOCK_START; 652 TS_DGL_LOCK_START;
517 err = do_litmus_dgl_lock(&dgl_wait_state); 653 if (!num_need_atomic)
654 err = do_litmus_dgl_lock(&dgl_wait_state);
655 else
656 err = do_litmus_dgl_atomic_lock(&dgl_wait_state);
518 657
519 /* Note: task my have been suspended or preempted in between! Take 658 /* Note: task my have been suspended or preempted in between! Take
520 * this into account when computing overheads. */ 659 * this into account when computing overheads. */