diff options
Diffstat (limited to 'litmus/locking.c')
-rw-r--r-- | litmus/locking.c | 92 |
1 files changed, 31 insertions, 61 deletions
diff --git a/litmus/locking.c b/litmus/locking.c index 4b8382cd77d1..eddc67a4d36a 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -365,18 +365,6 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], | |||
365 | #endif | 365 | #endif |
366 | 366 | ||
367 | 367 | ||
368 | static int failed_owner(struct litmus_lock *cur_lock, struct task_struct *t) | ||
369 | { | ||
370 | struct task_struct *cur_owner = cur_lock->ops->get_owner(cur_lock); | ||
371 | printk(KERN_EMERG "lock %d expected owner %s/%d but got %s/%d.\n", | ||
372 | cur_lock->ident, | ||
373 | (t) ? t->comm : "null", | ||
374 | (t) ? t->pid : 0, | ||
375 | (cur_owner) ? cur_owner->comm : "null", | ||
376 | (cur_owner) ? cur_owner->pid : 0); | ||
377 | BUG(); | ||
378 | } | ||
379 | |||
380 | /* only valid when locks are prioq locks!!! | 368 | /* only valid when locks are prioq locks!!! |
381 | * THE BIG DGL LOCK MUST BE HELD! */ | 369 | * THE BIG DGL LOCK MUST BE HELD! */ |
382 | int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) | 370 | int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) |
@@ -395,12 +383,8 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t | |||
395 | /* take the locks */ | 383 | /* take the locks */ |
396 | for(i = 0; i < dgl_wait->size; ++i) { | 384 | for(i = 0; i < dgl_wait->size; ++i) { |
397 | struct litmus_lock *l = dgl_wait->locks[i]; | 385 | struct litmus_lock *l = dgl_wait->locks[i]; |
398 | |||
399 | l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); | 386 | l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); |
400 | 387 | BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); | |
401 | if(!(l->ops->is_owner(l, dgl_wait->task))) | ||
402 | failed_owner(l, dgl_wait->task); | ||
403 | //BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); | ||
404 | } | 388 | } |
405 | 389 | ||
406 | return 0; /* success */ | 390 | return 0; /* success */ |
@@ -510,6 +494,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) | |||
510 | unsigned long irqflags; //, dummyflags; | 494 | unsigned long irqflags; //, dummyflags; |
511 | raw_spinlock_t *dgl_lock; | 495 | raw_spinlock_t *dgl_lock; |
512 | struct litmus_lock *l; | 496 | struct litmus_lock *l; |
497 | struct task_struct *t = current; | ||
513 | 498 | ||
514 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 499 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
515 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; | 500 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; |
@@ -519,7 +504,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) | |||
519 | 504 | ||
520 | dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); | 505 | dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); |
521 | 506 | ||
522 | BUG_ON(dgl_wait->task != current); | 507 | BUG_ON(dgl_wait->task != t); |
523 | 508 | ||
524 | raw_spin_lock_irqsave(dgl_lock, irqflags); | 509 | raw_spin_lock_irqsave(dgl_lock, irqflags); |
525 | 510 | ||
@@ -528,7 +513,8 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) | |||
528 | 513 | ||
529 | for(i = 0; i < dgl_wait->size; ++i) { | 514 | for(i = 0; i < dgl_wait->size; ++i) { |
530 | struct litmus_lock *l = dgl_wait->locks[i]; | 515 | struct litmus_lock *l = dgl_wait->locks[i]; |
531 | l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); // this should be a forced enqueue if atomic DGLs are needed. | 516 | // this should be a forced enqueue if atomic DGLs are needed. |
517 | l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); | ||
532 | } | 518 | } |
533 | 519 | ||
534 | if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { | 520 | if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { |
@@ -536,27 +522,26 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) | |||
536 | * Pick a lock to push on and suspend. */ | 522 | * Pick a lock to push on and suspend. */ |
537 | TRACE_CUR("Could not atomically acquire all locks.\n"); | 523 | TRACE_CUR("Could not atomically acquire all locks.\n"); |
538 | 524 | ||
525 | /* we set the uninterruptible state here since | ||
526 | * __attempt_atomic_dgl_acquire() may actually succeed. */ | ||
527 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
539 | 528 | ||
540 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 529 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
541 | // KLUDGE: don't count this suspension as time in the critical gpu | 530 | // KLUDGE: don't count this suspension as time in the critical gpu |
542 | // critical section | 531 | // critical section |
543 | if(tsk_rt(dgl_wait->task)->held_gpus) { | 532 | if(tsk_rt(t)->held_gpus) { |
544 | tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1; | 533 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; |
545 | } | 534 | } |
546 | #endif | 535 | #endif |
547 | // we are not the owner of any lock, so push on the last one in the DGL | ||
548 | // by default. | ||
549 | 536 | ||
550 | l = dgl_wait->locks[dgl_wait->size - 1]; | 537 | // select a lock to push priority on |
538 | dgl_wait->last_primary = 0; // default | ||
539 | select_next_lock(dgl_wait); // may change value of last_primary | ||
551 | 540 | ||
552 | TRACE_CUR("Activating priority inheritance on lock %d\n", | 541 | l = dgl_wait->locks[dgl_wait->last_primary]; |
553 | l->ident); | ||
554 | 542 | ||
555 | TS_DGL_LOCK_SUSPEND; | 543 | TS_DGL_LOCK_SUSPEND; |
556 | 544 | ||
557 | l->ops->enable_priority(l, dgl_wait); | ||
558 | dgl_wait->last_primary = dgl_wait->size - 1; | ||
559 | |||
560 | TRACE_CUR("Suspending for lock %d\n", l->ident); | 545 | TRACE_CUR("Suspending for lock %d\n", l->ident); |
561 | 546 | ||
562 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending | 547 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending |
@@ -578,9 +563,7 @@ all_acquired: | |||
578 | // SANITY CHECK FOR TESTING | 563 | // SANITY CHECK FOR TESTING |
579 | for(i = 0; i < dgl_wait->size; ++i) { | 564 | for(i = 0; i < dgl_wait->size; ++i) { |
580 | struct litmus_lock *l = dgl_wait->locks[i]; | 565 | struct litmus_lock *l = dgl_wait->locks[i]; |
581 | if(!(l->ops->is_owner(l, dgl_wait->task))) | 566 | BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); |
582 | failed_owner(l, dgl_wait->task); | ||
583 | //BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); | ||
584 | } | 567 | } |
585 | 568 | ||
586 | TRACE_CUR("Acquired entire DGL\n"); | 569 | TRACE_CUR("Acquired entire DGL\n"); |
@@ -589,23 +572,6 @@ all_acquired: | |||
589 | } | 572 | } |
590 | 573 | ||
591 | 574 | ||
592 | |||
593 | |||
594 | static int supports_dgl(struct litmus_lock *l) | ||
595 | { | ||
596 | struct litmus_lock_ops* ops = l->ops; | ||
597 | return (ops->dgl_lock && | ||
598 | ops->is_owner && | ||
599 | ops->get_owner && | ||
600 | ops->enable_priority); | ||
601 | } | ||
602 | |||
603 | static int needs_atomic_dgl(struct litmus_lock *l) | ||
604 | { | ||
605 | struct litmus_lock_ops* ops = l->ops; | ||
606 | return (ops->dgl_quick_lock != NULL); | ||
607 | } | ||
608 | |||
609 | asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | 575 | asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) |
610 | { | 576 | { |
611 | struct task_struct *t = current; | 577 | struct task_struct *t = current; |
@@ -641,13 +607,13 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
641 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); | 607 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); |
642 | if(entry && is_lock(entry)) { | 608 | if(entry && is_lock(entry)) { |
643 | dgl_wait_state.locks[i] = get_lock(entry); | 609 | dgl_wait_state.locks[i] = get_lock(entry); |
644 | if(!supports_dgl(dgl_wait_state.locks[i])) { | 610 | if(!dgl_wait_state.locks[i]->ops->supports_dgl) { |
645 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", | 611 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", |
646 | dgl_wait_state.locks[i]->ident); | 612 | dgl_wait_state.locks[i]->ident); |
647 | goto out; | 613 | goto out; |
648 | } | 614 | } |
649 | 615 | ||
650 | if (needs_atomic_dgl(dgl_wait_state.locks[i])) { | 616 | if(dgl_wait_state.locks[i]->ops->requires_atomic_dgl) { |
651 | ++num_need_atomic; | 617 | ++num_need_atomic; |
652 | } | 618 | } |
653 | } | 619 | } |
@@ -686,9 +652,13 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | |||
686 | long err = 0; | 652 | long err = 0; |
687 | 653 | ||
688 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 654 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
689 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; | 655 | { |
690 | snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); | 656 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; |
691 | TRACE_CUR("Unlocking a DGL with size %d: %s\n", dgl_size, dglstr); | 657 | snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); |
658 | TRACE_CUR("Unlocking a DGL with size %d: %s\n", | ||
659 | dgl_size, | ||
660 | dglstr); | ||
661 | } | ||
692 | #endif | 662 | #endif |
693 | 663 | ||
694 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order | 664 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order |
@@ -740,7 +710,7 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
740 | entry = get_entry_for_od(dgl_ods[i]); | 710 | entry = get_entry_for_od(dgl_ods[i]); |
741 | if(entry && is_lock(entry)) { | 711 | if(entry && is_lock(entry)) { |
742 | dgl_locks[i] = get_lock(entry); | 712 | dgl_locks[i] = get_lock(entry); |
743 | if(!supports_dgl(dgl_locks[i])) { | 713 | if(!dgl_locks[i]->ops->supports_dgl) { |
744 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", | 714 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", |
745 | dgl_locks[i]->ident); | 715 | dgl_locks[i]->ident); |
746 | goto out; | 716 | goto out; |
@@ -852,19 +822,19 @@ void suspend_for_lock(void) | |||
852 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; | 822 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; |
853 | } | 823 | } |
854 | #endif | 824 | #endif |
855 | 825 | ||
856 | schedule(); | 826 | schedule(); |
857 | 827 | ||
858 | 828 | ||
859 | /* TODO: Move the following to wake_up_for_lock()? */ | 829 | /* TODO: Move the following to wake_up_for_lock()? */ |
860 | 830 | ||
861 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 831 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
862 | // re-enable tracking | 832 | // re-enable tracking |
863 | if(tsk_rt(t)->held_gpus) { | 833 | if(tsk_rt(t)->held_gpus) { |
864 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; | 834 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; |
865 | } | 835 | } |
866 | #endif | 836 | #endif |
867 | 837 | ||
868 | #ifdef CONFIG_LITMUS_NVIDIA | 838 | #ifdef CONFIG_LITMUS_NVIDIA |
869 | if (gpu_restore) { | 839 | if (gpu_restore) { |
870 | /* restore our state */ | 840 | /* restore our state */ |
@@ -883,9 +853,9 @@ void suspend_for_lock(void) | |||
883 | int wake_up_for_lock(struct task_struct* t) | 853 | int wake_up_for_lock(struct task_struct* t) |
884 | { | 854 | { |
885 | int ret; | 855 | int ret; |
886 | 856 | ||
887 | ret = wake_up_process(t); | 857 | ret = wake_up_process(t); |
888 | 858 | ||
889 | return ret; | 859 | return ret; |
890 | } | 860 | } |
891 | 861 | ||