diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-30 16:43:52 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-30 16:43:52 -0400 |
commit | 62f2907f445b08f958acf1cc1a0c29736d4ba206 (patch) | |
tree | a11743eddcc125c9c3ac0c527078338e3d01b295 /litmus/sched_gsn_edf.c | |
parent | d0961e328a2a4c026c884c768b798cb882922708 (diff) |
Nested inheritance with fine-grained locking.
Minor hack to lockdep was required too allow
the inheritance propagation locking logic to
work.
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 741 |
1 files changed, 565 insertions, 176 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6edcc339ea5e..99c93ae65e06 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -638,11 +638,17 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
638 | 638 | ||
639 | static long gsnedf_admit_task(struct task_struct* tsk) | 639 | static long gsnedf_admit_task(struct task_struct* tsk) |
640 | { | 640 | { |
641 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
642 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, edf_max_heap_base_priority_order); | ||
643 | #endif | ||
644 | |||
641 | return 0; | 645 | return 0; |
642 | } | 646 | } |
643 | 647 | ||
644 | #ifdef CONFIG_LITMUS_LOCKING | 648 | #ifdef CONFIG_LITMUS_LOCKING |
645 | 649 | ||
650 | extern raw_spinlock_t rsm_global_lock; | ||
651 | |||
646 | #include <litmus/fdso.h> | 652 | #include <litmus/fdso.h> |
647 | 653 | ||
648 | /* called with IRQs off */ | 654 | /* called with IRQs off */ |
@@ -653,60 +659,71 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
653 | 659 | ||
654 | raw_spin_lock(&gsnedf_lock); | 660 | raw_spin_lock(&gsnedf_lock); |
655 | 661 | ||
656 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 662 | /* this sanity check allows for weaker locking in protocols */ |
657 | tsk_rt(t)->eff_prio = prio_inh; | 663 | if(__edf_higher_prio(prio_inh, BASE, t, INHERITED)) { |
658 | 664 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
659 | linked_on = tsk_rt(t)->linked_on; | 665 | tsk_rt(t)->inh_task = prio_inh; |
660 | 666 | ||
661 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 667 | linked_on = tsk_rt(t)->linked_on; |
662 | if (linked_on != NO_CPU) { | 668 | |
663 | TRACE_TASK(t, "%s: linked on %d\n", | 669 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
664 | __FUNCTION__, linked_on); | 670 | if (linked_on != NO_CPU) { |
665 | /* Holder is scheduled; need to re-order CPUs. | 671 | TRACE_TASK(t, "%s: linked on %d\n", |
666 | * We can't use heap_decrease() here since | 672 | __FUNCTION__, linked_on); |
667 | * the cpu_heap is ordered in reverse direction, so | 673 | /* Holder is scheduled; need to re-order CPUs. |
668 | * it is actually an increase. */ | 674 | * We can't use heap_decrease() here since |
669 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); | 675 | * the cpu_heap is ordered in reverse direction, so |
670 | binheap_add(&gsnedf_cpus[linked_on]->hn, | 676 | * it is actually an increase. */ |
671 | &gsnedf_cpu_heap, cpu_entry_t, hn); | 677 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); |
672 | } else { | 678 | binheap_add(&gsnedf_cpus[linked_on]->hn, |
673 | /* holder may be queued: first stop queue changes */ | 679 | &gsnedf_cpu_heap, cpu_entry_t, hn); |
674 | raw_spin_lock(&gsnedf.release_lock); | ||
675 | if (is_queued(t)) { | ||
676 | TRACE_TASK(t, "%s: is queued\n", | ||
677 | __FUNCTION__); | ||
678 | /* We need to update the position of holder in some | ||
679 | * heap. Note that this could be a release heap if we | ||
680 | * budget enforcement is used and this job overran. */ | ||
681 | check_preempt = | ||
682 | !bheap_decrease(edf_ready_order, | ||
683 | tsk_rt(t)->heap_node); | ||
684 | } else { | 680 | } else { |
685 | /* Nothing to do: if it is not queued and not linked | 681 | /* holder may be queued: first stop queue changes */ |
686 | * then it is either sleeping or currently being moved | 682 | raw_spin_lock(&gsnedf.release_lock); |
687 | * by other code (e.g., a timer interrupt handler) that | 683 | if (is_queued(t)) { |
688 | * will use the correct priority when enqueuing the | 684 | TRACE_TASK(t, "%s: is queued\n", |
689 | * task. */ | 685 | __FUNCTION__); |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 686 | /* We need to update the position of holder in some |
691 | __FUNCTION__); | 687 | * heap. Note that this could be a release heap if we |
692 | } | 688 | * budget enforcement is used and this job overran. */ |
693 | raw_spin_unlock(&gsnedf.release_lock); | 689 | check_preempt = |
694 | 690 | !bheap_decrease(edf_ready_order, | |
695 | /* If holder was enqueued in a release heap, then the following | 691 | tsk_rt(t)->heap_node); |
696 | * preemption check is pointless, but we can't easily detect | 692 | } else { |
697 | * that case. If you want to fix this, then consider that | 693 | /* Nothing to do: if it is not queued and not linked |
698 | * simply adding a state flag requires O(n) time to update when | 694 | * then it is either sleeping or currently being moved |
699 | * releasing n tasks, which conflicts with the goal to have | 695 | * by other code (e.g., a timer interrupt handler) that |
700 | * O(log n) merges. */ | 696 | * will use the correct priority when enqueuing the |
701 | if (check_preempt) { | 697 | * task. */ |
702 | /* heap_decrease() hit the top level of the heap: make | 698 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", |
703 | * sure preemption checks get the right task, not the | 699 | __FUNCTION__); |
704 | * potentially stale cache. */ | 700 | } |
705 | bheap_uncache_min(edf_ready_order, | 701 | raw_spin_unlock(&gsnedf.release_lock); |
706 | &gsnedf.ready_queue); | 702 | |
707 | check_for_preemptions(); | 703 | /* If holder was enqueued in a release heap, then the following |
704 | * preemption check is pointless, but we can't easily detect | ||
705 | * that case. If you want to fix this, then consider that | ||
706 | * simply adding a state flag requires O(n) time to update when | ||
707 | * releasing n tasks, which conflicts with the goal to have | ||
708 | * O(log n) merges. */ | ||
709 | if (check_preempt) { | ||
710 | /* heap_decrease() hit the top level of the heap: make | ||
711 | * sure preemption checks get the right task, not the | ||
712 | * potentially stale cache. */ | ||
713 | bheap_uncache_min(edf_ready_order, | ||
714 | &gsnedf.ready_queue); | ||
715 | check_for_preemptions(); | ||
716 | } | ||
708 | } | 717 | } |
709 | } | 718 | } |
719 | else { | ||
720 | TRACE_TASK(t, "Spurious invalid priority increase. " | ||
721 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
722 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
723 | t->comm, t->pid, | ||
724 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
725 | prio_inh->comm, prio_inh->pid); | ||
726 | } | ||
710 | 727 | ||
711 | raw_spin_unlock(&gsnedf_lock); | 728 | raw_spin_unlock(&gsnedf_lock); |
712 | } | 729 | } |
@@ -715,23 +732,52 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
715 | static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 732 | static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
716 | { | 733 | { |
717 | raw_spin_lock(&gsnedf_lock); | 734 | raw_spin_lock(&gsnedf_lock); |
718 | 735 | ||
719 | /* A job only stops inheriting a priority when it releases a | 736 | if(__edf_higher_prio(t, INHERITED, prio_inh, BASE)) { |
720 | * resource. Thus we can make the following assumption.*/ | 737 | /* A job only stops inheriting a priority when it releases a |
721 | //BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 738 | * resource. Thus we can make the following assumption.*/ |
722 | 739 | if(prio_inh) | |
723 | if(prio_inh) | 740 | TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); |
724 | TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); | 741 | else |
725 | else | 742 | TRACE_TASK(t, "base priority restored.\n"); |
726 | TRACE_TASK(t, "base priority restored.\n"); | ||
727 | 743 | ||
728 | tsk_rt(t)->eff_prio = prio_inh; | 744 | tsk_rt(t)->inh_task = prio_inh; |
729 | 745 | ||
730 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 746 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
731 | * since the priority was effectively lowered. */ | 747 | TRACE_TASK(t, "is scheduled.\n"); |
732 | unlink(t); | 748 | |
733 | gsnedf_job_arrival(t); | 749 | /* Check if rescheduling is necessary. We can't use heap_decrease() |
750 | * since the priority was effectively lowered. */ | ||
751 | unlink(t); | ||
752 | gsnedf_job_arrival(t); | ||
753 | } | ||
754 | else { | ||
755 | /* task is queued */ | ||
756 | raw_spin_lock(&gsnedf.release_lock); | ||
757 | if (is_queued(t)) { | ||
758 | TRACE_TASK(t, "is queued.\n"); | ||
759 | |||
760 | /* decrease in priority, so we have to re-add to binomial heap */ | ||
761 | unlink(t); | ||
762 | gsnedf_job_arrival(t); | ||
763 | } | ||
764 | else { | ||
765 | TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n"); | ||
766 | } | ||
767 | raw_spin_unlock(&gsnedf.release_lock); | ||
768 | } | ||
769 | } | ||
770 | else { | ||
771 | TRACE_TASK(t, "Spurious invalid priority decrease. " | ||
772 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
773 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
774 | t->comm, t->pid, | ||
775 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
776 | (prio_inh) ? prio_inh->comm : "nil", | ||
777 | (prio_inh) ? prio_inh->pid : -1); | ||
778 | } | ||
734 | 779 | ||
780 | |||
735 | raw_spin_unlock(&gsnedf_lock); | 781 | raw_spin_unlock(&gsnedf_lock); |
736 | } | 782 | } |
737 | 783 | ||
@@ -741,110 +787,113 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
741 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 787 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
742 | 788 | ||
743 | 789 | ||
744 | 790 | void print_hp_waiters(struct binheap_node* n, int depth) | |
745 | /* called with IRQs off */ | ||
746 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
747 | { | 791 | { |
748 | increase_priority_inheritance(t, prio_inh); // increase our prio. | 792 | struct litmus_lock* l; |
793 | char padding[81] = " "; | ||
794 | struct task_struct *hp = NULL; | ||
795 | struct task_struct *hp_eff = NULL; | ||
796 | struct task_struct *node_prio = NULL; | ||
797 | |||
749 | 798 | ||
750 | // beware: recursion | 799 | if(n == NULL) { |
751 | if(tsk_rt(t)->blocked_lock && | 800 | TRACE("+-> %p\n", NULL); |
752 | tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance) { | 801 | return; |
753 | TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l); | ||
754 | tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance(tsk_rt(t)->blocked_lock, t); | ||
755 | } | 802 | } |
803 | |||
804 | l = binheap_entry(n, struct litmus_lock, hp_binheap_node); | ||
805 | |||
806 | if(depth*2 <= 80) | ||
807 | padding[depth*2] = '\0'; | ||
808 | |||
809 | if(*(l->hp_waiter_ptr)) { | ||
810 | hp = *(l->hp_waiter_ptr); | ||
811 | |||
812 | if(tsk_rt(hp)->inh_task) { | ||
813 | hp_eff = tsk_rt(hp)->inh_task; | ||
814 | } | ||
815 | } | ||
816 | |||
817 | node_prio = l->hp_waiter_eff_prio; | ||
818 | |||
819 | TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", | ||
820 | padding, | ||
821 | (node_prio) ? node_prio->comm : "nil", | ||
822 | (node_prio) ? node_prio->pid : -1, | ||
823 | (hp) ? hp->comm : "nil", | ||
824 | (hp) ? hp->pid : -1, | ||
825 | (hp_eff) ? hp_eff->comm : "nil", | ||
826 | (hp_eff) ? hp_eff->pid : -1, | ||
827 | l->ident); | ||
828 | |||
829 | if(n->left) print_hp_waiters(n->left, depth+1); | ||
830 | if(n->right) print_hp_waiters(n->right, depth+1); | ||
756 | } | 831 | } |
757 | 832 | ||
833 | |||
758 | /* called with IRQs off */ | 834 | /* called with IRQs off */ |
759 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 835 | /* preconditions: |
836 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
837 | (2) The lock 'to_unlock' is held. | ||
838 | */ | ||
839 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | ||
760 | { | 840 | { |
761 | decrease_priority_inheritance(t, prio_inh); | 841 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
762 | |||
763 | // beware: recursion | ||
764 | if(tsk_rt(t)->blocked_lock && tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance) { | ||
765 | TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l); | ||
766 | tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance(tsk_rt(t)->blocked_lock, t); | ||
767 | } | ||
768 | } | ||
769 | 842 | ||
843 | increase_priority_inheritance(t, prio_inh); // increase our prio. | ||
770 | 844 | ||
771 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | 845 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
772 | struct task_struct* t) | ||
773 | { | ||
774 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
775 | unsigned long flags; | ||
776 | 846 | ||
777 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
778 | 847 | ||
779 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 848 | if(blocked_lock) { |
780 | if(t != mutex->hp_waiter) { | 849 | if(blocked_lock->ops->propagate_increase_inheritance) { |
781 | if(edf_higher_prio(t, mutex->hp_waiter)) { | 850 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); |
782 | mutex->hp_waiter = t; | ||
783 | |||
784 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter by propagation.\n"); | ||
785 | } | ||
786 | else { | ||
787 | goto EXIT; // HP waiter has greater prio than us. bail out. | ||
788 | } | ||
789 | } | ||
790 | 851 | ||
791 | if(edf_higher_prio(mutex->hp_waiter, mutex->owner)) { | 852 | // beware: recursion |
792 | struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t; | 853 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, t, to_unlock, irqflags); |
793 | 854 | } | |
794 | TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l); | 855 | else { |
795 | 856 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); | |
796 | nested_increase_priority_inheritance(mutex->owner, prio); | 857 | raw_spin_unlock_irqrestore(to_unlock, irqflags); |
797 | } | 858 | } |
798 | } | 859 | } |
799 | 860 | else { | |
800 | EXIT: | 861 | TRACE_TASK(t, "is not blocked. No propagation.\n"); |
801 | 862 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | |
802 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 863 | } |
803 | } | 864 | } |
804 | 865 | ||
805 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | 866 | /* called with IRQs off */ |
806 | struct task_struct* t) | 867 | /* preconditions: |
868 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
869 | (2) The lock 'to_unlock' is held. | ||
870 | */ | ||
871 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | ||
807 | { | 872 | { |
808 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 873 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
809 | unsigned long flags; | 874 | decrease_priority_inheritance(t, prio_inh); |
810 | 875 | ||
811 | spin_lock_irqsave(&mutex->wait.lock, flags); | 876 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
812 | 877 | ||
813 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 878 | if(blocked_lock) { |
814 | if(t == mutex->hp_waiter) { | 879 | if(blocked_lock->ops->propagate_decrease_inheritance) { |
815 | struct task_struct* prio; | 880 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); |
816 | |||
817 | TRACE_TASK(t, "was highest-prio waiter\n"); | ||
818 | /* next has the highest priority --- it doesn't need to | ||
819 | * inherit. However, we need to make sure that the | ||
820 | * next-highest priority in the queue is reflected in | ||
821 | * hp_waiter. */ | ||
822 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
823 | |||
824 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
825 | |||
826 | TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l); | ||
827 | 881 | ||
828 | 882 | // beware: recursion | |
829 | // lower eff_prio of owner to new hp if needed. | 883 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, to_unlock, irqflags); |
830 | if(t == mutex->owner->eff_prio) | 884 | } |
831 | { | 885 | else { |
832 | 886 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); | |
833 | } | 887 | raw_spin_unlock_irqrestore(to_unlock, irqflags); |
834 | |||
835 | |||
836 | nested_increase_priority_inheritance(mutex->owner, prio); | ||
837 | } | 888 | } |
838 | } | 889 | } |
839 | 890 | else { | |
840 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 891 | TRACE_TASK(t, "is not blocked. No propagation.\n"); |
892 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | ||
893 | } | ||
841 | } | 894 | } |
842 | 895 | ||
843 | 896 | ||
844 | |||
845 | |||
846 | |||
847 | |||
848 | /* ******************** RSM MUTEX ********************** */ | 897 | /* ******************** RSM MUTEX ********************** */ |
849 | 898 | ||
850 | /* struct for semaphore with priority inheritance */ | 899 | /* struct for semaphore with priority inheritance */ |
@@ -858,7 +907,11 @@ struct rsm_mutex { | |||
858 | struct task_struct *hp_waiter; | 907 | struct task_struct *hp_waiter; |
859 | 908 | ||
860 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | 909 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ |
861 | wait_queue_head_t wait; | 910 | wait_queue_head_t wait; |
911 | |||
912 | /* we do some nesting within spinlocks, so we can't use the normal | ||
913 | sleeplocks found in wait_queue_head_t. */ | ||
914 | raw_spinlock_t lock; | ||
862 | }; | 915 | }; |
863 | 916 | ||
864 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | 917 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) |
@@ -884,30 +937,38 @@ struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | |||
884 | return found; | 937 | return found; |
885 | } | 938 | } |
886 | 939 | ||
887 | 940 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | |
888 | 941 | if(!binheap_empty(handle)) { | |
942 | return (struct task_struct*)(binheap_top_entry(handle, struct litmus_lock, hp_binheap_node)->hp_waiter_eff_prio); | ||
943 | } | ||
944 | return NULL; | ||
945 | } | ||
889 | 946 | ||
890 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | 947 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) |
891 | { | 948 | { |
892 | struct task_struct* t = current; | 949 | struct task_struct *t = current; |
950 | struct task_struct *owner; | ||
893 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 951 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
894 | wait_queue_t wait; | 952 | wait_queue_t wait; |
895 | unsigned long flags; | 953 | unsigned long flags; |
896 | 954 | ||
897 | if (!is_realtime(t)) | 955 | if (!is_realtime(t)) |
898 | return -EPERM; | 956 | return -EPERM; |
899 | 957 | ||
900 | spin_lock_irqsave(&mutex->wait.lock, flags); | 958 | raw_spin_lock_irqsave(&mutex->lock, flags); |
901 | 959 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | |
960 | |||
902 | if (mutex->owner) { | 961 | if (mutex->owner) { |
962 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
963 | |||
903 | /* resource is not free => must suspend and wait */ | 964 | /* resource is not free => must suspend and wait */ |
965 | |||
966 | owner = mutex->owner; | ||
904 | 967 | ||
905 | init_waitqueue_entry(&wait, t); | 968 | init_waitqueue_entry(&wait, t); |
906 | 969 | ||
907 | // TODO: inheritance propagation from another thread may not finish | ||
908 | // before I check local inheritance... | ||
909 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | 970 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ |
910 | mb(); | 971 | mb(); // needed? |
911 | 972 | ||
912 | /* FIXME: interruptible would be nice some day */ | 973 | /* FIXME: interruptible would be nice some day */ |
913 | set_task_state(t, TASK_UNINTERRUPTIBLE); | 974 | set_task_state(t, TASK_UNINTERRUPTIBLE); |
@@ -916,17 +977,67 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
916 | 977 | ||
917 | /* check if we need to activate priority inheritance */ | 978 | /* check if we need to activate priority inheritance */ |
918 | if (edf_higher_prio(t, mutex->hp_waiter)) { | 979 | if (edf_higher_prio(t, mutex->hp_waiter)) { |
980 | |||
981 | struct task_struct *old_max_eff_prio; | ||
982 | struct task_struct *new_max_eff_prio; | ||
983 | struct task_struct *new_prio = NULL; | ||
984 | |||
985 | if(mutex->hp_waiter) | ||
986 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
987 | else | ||
988 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
989 | |||
990 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
991 | |||
992 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
993 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
994 | |||
995 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
996 | |||
919 | mutex->hp_waiter = t; | 997 | mutex->hp_waiter = t; |
920 | if (edf_higher_prio(t, mutex->owner)) { | 998 | l->hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); |
921 | struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t; | 999 | |
922 | nested_increase_priority_inheritance(mutex->owner, prio); | 1000 | binheap_decrease(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); |
1001 | |||
1002 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1003 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1004 | |||
1005 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1006 | |||
1007 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1008 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1009 | |||
1010 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1011 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))){ | ||
1012 | new_prio = new_max_eff_prio; | ||
1013 | } | ||
1014 | } | ||
1015 | else { | ||
1016 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1017 | } | ||
1018 | |||
1019 | //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1020 | |||
1021 | if(new_prio) { | ||
1022 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
923 | } | 1023 | } |
1024 | else { | ||
1025 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1026 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
1027 | } | ||
1028 | |||
1029 | } | ||
1030 | else { | ||
1031 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1032 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
924 | } | 1033 | } |
925 | 1034 | ||
1035 | |||
926 | TS_LOCK_SUSPEND; | 1036 | TS_LOCK_SUSPEND; |
927 | 1037 | ||
928 | /* release lock before sleeping */ | 1038 | /* release lock before sleeping */ |
929 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1039 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); |
1040 | //raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
930 | 1041 | ||
931 | /* We depend on the FIFO order. Thus, we don't need to recheck | 1042 | /* We depend on the FIFO order. Thus, we don't need to recheck |
932 | * when we wake up; we are guaranteed to have the lock since | 1043 | * when we wake up; we are guaranteed to have the lock since |
@@ -934,44 +1045,99 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
934 | */ | 1045 | */ |
935 | 1046 | ||
936 | schedule(); | 1047 | schedule(); |
937 | 1048 | ||
938 | TS_LOCK_RESUME; | 1049 | TS_LOCK_RESUME; |
939 | 1050 | ||
940 | /* Since we hold the lock, no other task will change | 1051 | /* Since we hold the lock, no other task will change |
941 | * ->owner. We can thus check it without acquiring the spin | 1052 | * ->owner. We can thus check it without acquiring the spin |
942 | * lock. */ | 1053 | * lock. */ |
943 | BUG_ON(mutex->owner != t); | 1054 | BUG_ON(mutex->owner != t); |
1055 | |||
1056 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
1057 | |||
944 | } else { | 1058 | } else { |
1059 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1060 | |||
945 | /* it's ours now */ | 1061 | /* it's ours now */ |
946 | mutex->owner = t; | 1062 | mutex->owner = t; |
947 | |||
948 | nest_lock(l, t); | ||
949 | 1063 | ||
950 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1064 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); |
1065 | binheap_add(&l->hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); | ||
1066 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
1067 | |||
1068 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
1069 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
951 | } | 1070 | } |
952 | 1071 | ||
953 | return 0; | 1072 | return 0; |
954 | } | 1073 | } |
955 | 1074 | ||
1075 | |||
956 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | 1076 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) |
957 | { | 1077 | { |
958 | struct task_struct *t = current, *next; | 1078 | struct task_struct *t = current, *next; |
959 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 1079 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
960 | unsigned long flags; | 1080 | unsigned long flags; |
1081 | |||
1082 | struct task_struct *old_max_eff_prio; | ||
1083 | |||
1084 | |||
961 | int err = 0; | 1085 | int err = 0; |
962 | 1086 | ||
963 | spin_lock_irqsave(&mutex->wait.lock, flags); | 1087 | raw_spin_lock_irqsave(&mutex->lock, flags); |
1088 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | ||
1089 | |||
964 | 1090 | ||
965 | if (mutex->owner != t) { | 1091 | if (mutex->owner != t) { |
966 | err = -EINVAL; | 1092 | err = -EINVAL; |
967 | goto out; | 1093 | goto out; |
968 | } | 1094 | } |
969 | 1095 | ||
970 | /* we lose the benefit of priority inheritance (if any) */ | 1096 | |
971 | if (tsk_rt(t)->local_prio) { | 1097 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); |
972 | nested_decrease_priority_inheritance(t, NULL); | 1098 | |
1099 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
1100 | //TRACE_TASK(t, "Heap Before:\n"); | ||
1101 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1102 | |||
1103 | //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node)->hp_waiter_ptr); | ||
1104 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1105 | binheap_delete(&l->hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
1106 | |||
1107 | //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1108 | |||
1109 | //TRACE_TASK(t, "Heap After:\n"); | ||
1110 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1111 | |||
1112 | if(tsk_rt(t)->inh_task){ | ||
1113 | struct task_struct *new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1114 | |||
1115 | if((new_max_eff_prio == NULL) || | ||
1116 | ( (new_max_eff_prio != old_max_eff_prio) /* there was a change in eff prio */ && | ||
1117 | (effective_priority(t) == old_max_eff_prio) /* and owner had the old eff prio */) ) | ||
1118 | { | ||
1119 | // old_max_eff_prio > new_max_eff_prio | ||
1120 | |||
1121 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, INHERITED)) { | ||
1122 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
1123 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
1124 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1125 | WARN_ON(1); | ||
1126 | } | ||
1127 | |||
1128 | decrease_priority_inheritance(t, new_max_eff_prio); | ||
1129 | } | ||
973 | } | 1130 | } |
974 | 1131 | ||
1132 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && tsk_rt(t)->inh_task != NULL) | ||
1133 | { | ||
1134 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
1135 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
1136 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1137 | } | ||
1138 | |||
1139 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1140 | |||
975 | 1141 | ||
976 | /* check if there are jobs waiting for this resource */ | 1142 | /* check if there are jobs waiting for this resource */ |
977 | next = __waitqueue_remove_first(&mutex->wait); | 1143 | next = __waitqueue_remove_first(&mutex->wait); |
@@ -980,28 +1146,70 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
980 | mutex->owner = next; | 1146 | mutex->owner = next; |
981 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | 1147 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); |
982 | 1148 | ||
1149 | |||
983 | tsk_rt(next)->blocked_lock = NULL; | 1150 | tsk_rt(next)->blocked_lock = NULL; |
984 | nest_lock(l, next); // moves local_prio to trans_prio | 1151 | |
985 | 1152 | ||
986 | /* determine new hp_waiter if necessary */ | 1153 | /* determine new hp_waiter if necessary */ |
987 | if (next == mutex->hp_waiter) { | 1154 | if (next == mutex->hp_waiter) { |
1155 | |||
988 | TRACE_TASK(next, "was highest-prio waiter\n"); | 1156 | TRACE_TASK(next, "was highest-prio waiter\n"); |
989 | /* next has the highest priority --- it doesn't need to | 1157 | /* next has the highest priority --- it doesn't need to |
990 | * inherit. However, we need to make sure that the | 1158 | * inherit. However, we need to make sure that the |
991 | * next-highest priority in the queue is reflected in | 1159 | * next-highest priority in the queue is reflected in |
992 | * hp_waiter. */ | 1160 | * hp_waiter. */ |
993 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | 1161 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); |
1162 | l->hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1163 | |||
994 | if (mutex->hp_waiter) | 1164 | if (mutex->hp_waiter) |
995 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | 1165 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); |
996 | else | 1166 | else |
997 | TRACE("no further waiters\n"); | 1167 | TRACE("no further waiters\n"); |
1168 | |||
1169 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1170 | |||
1171 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1172 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1173 | |||
1174 | binheap_add(&l->hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); | ||
1175 | |||
1176 | //TRACE_TASK(next, "Heap After:\n"); | ||
1177 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1178 | |||
1179 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
998 | } else { | 1180 | } else { |
999 | /* Well, if next is not the highest-priority waiter, | 1181 | /* Well, if 'next' is not the highest-priority waiter, |
1000 | * then it ought to inherit the highest-priority | 1182 | * then it (probably) ought to inherit the highest-priority |
1001 | * waiter's priority. */ | 1183 | * waiter's priority. */ |
1002 | increase_priority_inheritance(next, mutex->hp_waiter); | 1184 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); |
1185 | |||
1186 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1187 | |||
1188 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1189 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1190 | |||
1191 | binheap_add(&l->hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | ||
1192 | struct litmus_lock, hp_binheap_node); | ||
1193 | |||
1194 | //TRACE_TASK(next, "Heap After:\n"); | ||
1195 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1196 | |||
1197 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
1198 | * because that update hasn't yet executed (update operation is | ||
1199 | * probably blocked on mutex->lock). So only inherit if the top of | ||
1200 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
1201 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
1202 | * since the effective priority of hp_waiter can change (and the | ||
1203 | * update has not made it to this lock).) | ||
1204 | */ | ||
1205 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->hp_waiter_eff_prio)) | ||
1206 | { | ||
1207 | increase_priority_inheritance(next, l->hp_waiter_eff_prio); | ||
1208 | } | ||
1209 | |||
1210 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1003 | } | 1211 | } |
1004 | 1212 | ||
1005 | /* wake up next */ | 1213 | /* wake up next */ |
1006 | wake_up_process(next); | 1214 | wake_up_process(next); |
1007 | } | 1215 | } |
@@ -1011,11 +1219,178 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
1011 | } | 1219 | } |
1012 | 1220 | ||
1013 | out: | 1221 | out: |
1014 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1222 | raw_spin_unlock_irqrestore(&mutex->lock, flags); |
1223 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
1015 | 1224 | ||
1016 | return err; | 1225 | return err; |
1017 | } | 1226 | } |
1018 | 1227 | ||
1228 | |||
1229 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
1230 | struct task_struct* t, | ||
1231 | raw_spinlock_t* to_unlock, | ||
1232 | unsigned long irqflags) | ||
1233 | { | ||
1234 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1235 | |||
1236 | // relay-style locking | ||
1237 | raw_spin_lock(&mutex->lock); | ||
1238 | raw_spin_unlock(to_unlock); | ||
1239 | |||
1240 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1241 | struct task_struct *owner = mutex->owner; | ||
1242 | |||
1243 | struct task_struct *old_max_eff_prio; | ||
1244 | struct task_struct *new_max_eff_prio; | ||
1245 | |||
1246 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1247 | |||
1248 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1249 | |||
1250 | if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
1251 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
1252 | mutex->hp_waiter = t; | ||
1253 | } | ||
1254 | if(t == mutex->hp_waiter) { | ||
1255 | // reflect the decreased priority in the heap node. | ||
1256 | l->hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1257 | binheap_decrease(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1258 | } | ||
1259 | |||
1260 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1261 | |||
1262 | |||
1263 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1264 | // new_max_eff_prio > old_max_eff_prio holds. | ||
1265 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1266 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))) { | ||
1267 | |||
1268 | TRACE_TASK(new_max_eff_prio, "Propagating inheritance to holder of lock %d.\n", l->ident); | ||
1269 | |||
1270 | // beware: recursion | ||
1271 | nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock | ||
1272 | } | ||
1273 | else { | ||
1274 | TRACE_TASK(new_max_eff_prio, "Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); | ||
1275 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1276 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1277 | } | ||
1278 | } | ||
1279 | else { | ||
1280 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
1281 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1282 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1283 | } | ||
1284 | } | ||
1285 | else { | ||
1286 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1287 | |||
1288 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1289 | if(still_blocked) { | ||
1290 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1291 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
1292 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1293 | we know that task 't' has not released any locks behind us in this | ||
1294 | chain. Propagation just needs to catch up with task 't'. */ | ||
1295 | still_blocked->ops->propagate_increase_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1296 | } | ||
1297 | else { | ||
1298 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1299 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1300 | } | ||
1301 | } | ||
1302 | else { | ||
1303 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1304 | } | ||
1305 | } | ||
1306 | } | ||
1307 | |||
1308 | |||
1309 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
1310 | struct task_struct* t, | ||
1311 | raw_spinlock_t* to_unlock, | ||
1312 | unsigned long irqflags) | ||
1313 | { | ||
1314 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1315 | |||
1316 | // relay-style locking | ||
1317 | raw_spin_lock(&mutex->lock); | ||
1318 | raw_spin_unlock(to_unlock); | ||
1319 | |||
1320 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1321 | if(t == mutex->hp_waiter) { | ||
1322 | struct task_struct *owner = mutex->owner; | ||
1323 | |||
1324 | struct task_struct *old_max_eff_prio; | ||
1325 | struct task_struct *new_max_eff_prio; | ||
1326 | |||
1327 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1328 | |||
1329 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1330 | |||
1331 | binheap_delete(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1332 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
1333 | l->hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1334 | binheap_add(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); | ||
1335 | |||
1336 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1337 | |||
1338 | if((old_max_eff_prio != new_max_eff_prio) && | ||
1339 | (effective_priority(owner) == old_max_eff_prio)) | ||
1340 | { | ||
1341 | // Need to set new effective_priority for owner | ||
1342 | |||
1343 | struct task_struct *decreased_prio; | ||
1344 | |||
1345 | TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident); | ||
1346 | |||
1347 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
1348 | TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident); | ||
1349 | decreased_prio = new_max_eff_prio; | ||
1350 | } | ||
1351 | else { | ||
1352 | TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident); | ||
1353 | decreased_prio = NULL; | ||
1354 | } | ||
1355 | |||
1356 | // beware: recursion | ||
1357 | nested_decrease_priority_inheritance(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
1358 | } | ||
1359 | else { | ||
1360 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1361 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1362 | } | ||
1363 | } | ||
1364 | else { | ||
1365 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
1366 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1367 | } | ||
1368 | } | ||
1369 | else { | ||
1370 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1371 | |||
1372 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1373 | if(still_blocked) { | ||
1374 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1375 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
1376 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1377 | we know that task 't' has not released any locks behind us in this | ||
1378 | chain. propagation just needs to catch up with task 't' */ | ||
1379 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1380 | } | ||
1381 | else { | ||
1382 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1383 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1384 | } | ||
1385 | } | ||
1386 | else { | ||
1387 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1388 | } | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | |||
1393 | |||
1019 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) | 1394 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) |
1020 | { | 1395 | { |
1021 | struct task_struct *t = current; | 1396 | struct task_struct *t = current; |
@@ -1024,11 +1399,13 @@ int gsnedf_rsm_mutex_close(struct litmus_lock* l) | |||
1024 | 1399 | ||
1025 | int owner; | 1400 | int owner; |
1026 | 1401 | ||
1027 | spin_lock_irqsave(&mutex->wait.lock, flags); | 1402 | raw_spin_lock_irqsave(&mutex->lock, flags); |
1403 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | ||
1028 | 1404 | ||
1029 | owner = (mutex->owner == t); | 1405 | owner = (mutex->owner == t); |
1030 | 1406 | ||
1031 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1407 | raw_spin_unlock_irqrestore(&mutex->lock, flags); |
1408 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
1032 | 1409 | ||
1033 | if (owner) | 1410 | if (owner) |
1034 | gsnedf_rsm_mutex_unlock(l); | 1411 | gsnedf_rsm_mutex_unlock(l); |
@@ -1061,7 +1438,19 @@ static struct litmus_lock* gsnedf_new_rsm_mutex(void) | |||
1061 | mutex->owner = NULL; | 1438 | mutex->owner = NULL; |
1062 | mutex->hp_waiter = NULL; | 1439 | mutex->hp_waiter = NULL; |
1063 | init_waitqueue_head(&mutex->wait); | 1440 | init_waitqueue_head(&mutex->wait); |
1441 | |||
1442 | |||
1443 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
1444 | { | ||
1445 | __raw_spin_lock_init(&mutex->lock, ((struct litmus_lock*)mutex)->cheat_lockdep, &((struct litmus_lock*)mutex)->key); | ||
1446 | } | ||
1447 | #else | ||
1448 | raw_spin_lock_init(&mutex->lock); | ||
1449 | #endif | ||
1450 | |||
1064 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; | 1451 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; |
1452 | |||
1453 | ((struct litmus_lock*)mutex)->hp_waiter_ptr = &mutex->hp_waiter; | ||
1065 | 1454 | ||
1066 | return &mutex->litmus_lock; | 1455 | return &mutex->litmus_lock; |
1067 | } | 1456 | } |
@@ -1223,7 +1612,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
1223 | sem->owner = NULL; | 1612 | sem->owner = NULL; |
1224 | 1613 | ||
1225 | /* we lose the benefit of priority inheritance (if any) */ | 1614 | /* we lose the benefit of priority inheritance (if any) */ |
1226 | if (tsk_rt(t)->eff_prio) | 1615 | if (tsk_rt(t)->inh_task) |
1227 | decrease_priority_inheritance(t, NULL); | 1616 | decrease_priority_inheritance(t, NULL); |
1228 | 1617 | ||
1229 | out: | 1618 | out: |