diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-04-06 17:04:32 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-04-06 17:04:32 -0400 |
commit | a4d63ac7ff059f81a0488d1cda4de16142508189 (patch) | |
tree | 92f9bc1e154eec27145ba11c8498913bf1694116 /litmus/ikglp_lock.c | |
parent | 92d266b64e0def8554ebece70318a5ae0050573a (diff) |
IKGLP reject/reissue on budget exhaustion.
Added support to IKGLP to handle budget exhaustion
of blocked waiter.
NOTE: CODE IS UNTESTED...
Diffstat (limited to 'litmus/ikglp_lock.c')
-rw-r--r-- | litmus/ikglp_lock.c | 320 |
1 files changed, 241 insertions, 79 deletions
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 160998f466ed..6d7ea24ce79b 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -427,7 +427,8 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, | |||
427 | // hp_waiter has decreased | 427 | // hp_waiter has decreased |
428 | static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, | 428 | static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, |
429 | struct ikglp_semaphore *sem, | 429 | struct ikglp_semaphore *sem, |
430 | unsigned long flags) | 430 | unsigned long flags, |
431 | int budget_triggered) | ||
431 | { | 432 | { |
432 | struct task_struct *owner = fq->owner; | 433 | struct task_struct *owner = fq->owner; |
433 | 434 | ||
@@ -484,7 +485,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, | |||
484 | } | 485 | } |
485 | 486 | ||
486 | // beware: recursion | 487 | // beware: recursion |
487 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags, 0); // will unlock mutex->lock | 488 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags, budget_triggered); // will unlock mutex->lock |
488 | } | 489 | } |
489 | else { | 490 | else { |
490 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | 491 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); |
@@ -623,19 +624,20 @@ static void ikglp_get_immediate(struct task_struct* t, | |||
623 | 624 | ||
624 | 625 | ||
625 | static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, | 626 | static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, |
626 | struct fifo_queue* fq, | 627 | struct fifo_queue *fq, |
627 | struct task_struct* t, | 628 | ikglp_wait_state_t *wait, |
628 | wait_queue_t *wait, | ||
629 | ikglp_heap_node_t *global_heap_node, | 629 | ikglp_heap_node_t *global_heap_node, |
630 | ikglp_donee_heap_node_t *donee_heap_node) | 630 | ikglp_donee_heap_node_t *donee_heap_node) |
631 | { | 631 | { |
632 | struct task_struct *t = wait->task; | ||
633 | |||
632 | /* resource is not free => must suspend and wait */ | 634 | /* resource is not free => must suspend and wait */ |
633 | TRACE_TASK(t, "Enqueuing on fq %d.\n", | 635 | TRACE_TASK(t, "Enqueuing on fq %d.\n", |
634 | ikglp_get_idx(sem, fq)); | 636 | ikglp_get_idx(sem, fq)); |
635 | 637 | ||
636 | init_waitqueue_entry(wait, t); | 638 | init_waitqueue_entry(&wait->fq_node, t); |
637 | 639 | ||
638 | __add_wait_queue_tail_exclusive(&fq->wait, wait); | 640 | __add_wait_queue_tail_exclusive(&fq->wait, &wait->fq_node); |
639 | 641 | ||
640 | ++(fq->count); | 642 | ++(fq->count); |
641 | ++(sem->nr_in_fifos); | 643 | ++(sem->nr_in_fifos); |
@@ -649,29 +651,25 @@ static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, | |||
649 | ikglp_add_global_list(sem, t, global_heap_node); | 651 | ikglp_add_global_list(sem, t, global_heap_node); |
650 | } | 652 | } |
651 | // update donor eligiblity list. | 653 | // update donor eligiblity list. |
652 | if(likely(donee_heap_node)) { | 654 | if(likely(donee_heap_node)) |
653 | // if(binheap_is_in_heap(&donee_heap_node->node)) { | ||
654 | // WARN_ON(1); | ||
655 | // } | ||
656 | ikglp_add_donees(sem, fq, t, donee_heap_node); | 655 | ikglp_add_donees(sem, fq, t, donee_heap_node); |
657 | } | ||
658 | 656 | ||
659 | if(sem->shortest_fifo_queue == fq) { | 657 | if(sem->shortest_fifo_queue == fq) |
660 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); | 658 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); |
661 | } | ||
662 | 659 | ||
663 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 660 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
664 | if(sem->aff_obs) { | 661 | if(sem->aff_obs) |
665 | sem->aff_obs->ops->notify_enqueue(sem->aff_obs, fq, t); | 662 | sem->aff_obs->ops->notify_enqueue(sem->aff_obs, fq, t); |
666 | } | ||
667 | #endif | 663 | #endif |
668 | 664 | ||
665 | wait->cur_q = IKGLP_FQ; | ||
666 | wait->fq = fq; | ||
667 | |||
669 | TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); | 668 | TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); |
670 | } | 669 | } |
671 | 670 | ||
672 | 671 | ||
673 | static void ikglp_enqueue_on_fq( | 672 | static void ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, |
674 | struct ikglp_semaphore *sem, | ||
675 | struct fifo_queue *fq, | 673 | struct fifo_queue *fq, |
676 | ikglp_wait_state_t *wait, | 674 | ikglp_wait_state_t *wait, |
677 | unsigned long flags) | 675 | unsigned long flags) |
@@ -683,7 +681,7 @@ static void ikglp_enqueue_on_fq( | |||
683 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | 681 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); |
684 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | 682 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); |
685 | 683 | ||
686 | __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, | 684 | __ikglp_enqueue_on_fq(sem, fq, wait, |
687 | &wait->global_heap_node, &wait->donee_heap_node); | 685 | &wait->global_heap_node, &wait->donee_heap_node); |
688 | 686 | ||
689 | ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock | 687 | ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock |
@@ -699,6 +697,8 @@ static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, | |||
699 | 697 | ||
700 | binheap_add(&wait->pq_node.node, &sem->priority_queue, | 698 | binheap_add(&wait->pq_node.node, &sem->priority_queue, |
701 | ikglp_heap_node_t, node); | 699 | ikglp_heap_node_t, node); |
700 | |||
701 | wait->cur_q = IKGLP_PQ; | ||
702 | } | 702 | } |
703 | 703 | ||
704 | static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, | 704 | static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, |
@@ -866,16 +866,18 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, | |||
866 | unlock_fine_irqrestore(&sem->lock, flags); | 866 | unlock_fine_irqrestore(&sem->lock, flags); |
867 | } | 867 | } |
868 | 868 | ||
869 | wait->cur_q = IKGLP_DONOR; | ||
869 | 870 | ||
870 | // TRACE_CUR("donors After:\n"); | 871 | // TRACE_CUR("donors After:\n"); |
871 | // print_donors(sem->donors.root, 1); | 872 | // print_donors(sem->donors.root, 1); |
872 | } | 873 | } |
873 | 874 | ||
875 | |||
874 | int ikglp_lock(struct litmus_lock* l) | 876 | int ikglp_lock(struct litmus_lock* l) |
875 | { | 877 | { |
876 | struct task_struct* t = current; | 878 | struct task_struct* t = current; |
877 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | 879 | struct ikglp_semaphore *sem = ikglp_from_lock(l); |
878 | unsigned long flags = 0, real_flags; | 880 | unsigned long flags = 0, more_flags; |
879 | struct fifo_queue *fq = NULL; | 881 | struct fifo_queue *fq = NULL; |
880 | int replica = -EINVAL; | 882 | int replica = -EINVAL; |
881 | 883 | ||
@@ -888,13 +890,14 @@ int ikglp_lock(struct litmus_lock* l) | |||
888 | if (!is_realtime(t)) | 890 | if (!is_realtime(t)) |
889 | return -EPERM; | 891 | return -EPERM; |
890 | 892 | ||
893 | memset(&wait, 0, sizeof(wait)); | ||
894 | |||
891 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 895 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
892 | dgl_lock = litmus->get_dgl_spinlock(t); | 896 | dgl_lock = litmus->get_dgl_spinlock(t); |
893 | #endif | 897 | #endif |
894 | 898 | ||
895 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
896 | |||
897 | lock_global_irqsave(dgl_lock, flags); | 899 | lock_global_irqsave(dgl_lock, flags); |
900 | raw_spin_lock_irqsave(&sem->real_lock, more_flags); | ||
898 | lock_fine_irqsave(&sem->lock, flags); | 901 | lock_fine_irqsave(&sem->lock, flags); |
899 | 902 | ||
900 | TRACE_CUR("Requesting a replica from lock %d.\n", l->ident); | 903 | TRACE_CUR("Requesting a replica from lock %d.\n", l->ident); |
@@ -916,8 +919,8 @@ int ikglp_lock(struct litmus_lock* l) | |||
916 | 919 | ||
917 | ikglp_get_immediate(t, fq, sem, flags); // unlocks sem->lock | 920 | ikglp_get_immediate(t, fq, sem, flags); // unlocks sem->lock |
918 | 921 | ||
922 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
919 | unlock_global_irqrestore(dgl_lock, flags); | 923 | unlock_global_irqrestore(dgl_lock, flags); |
920 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
921 | goto acquired; | 924 | goto acquired; |
922 | } | 925 | } |
923 | else { | 926 | else { |
@@ -959,8 +962,10 @@ int ikglp_lock(struct litmus_lock* l) | |||
959 | } | 962 | } |
960 | } | 963 | } |
961 | 964 | ||
965 | tsk_rt(t)->blocked_lock_data = (unsigned long)&wait; | ||
966 | |||
967 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
962 | unlock_global_irqrestore(dgl_lock, flags); | 968 | unlock_global_irqrestore(dgl_lock, flags); |
963 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
964 | 969 | ||
965 | TRACE_CUR("Suspending for replica.\n"); | 970 | TRACE_CUR("Suspending for replica.\n"); |
966 | 971 | ||
@@ -970,8 +975,11 @@ int ikglp_lock(struct litmus_lock* l) | |||
970 | 975 | ||
971 | TS_LOCK_RESUME; | 976 | TS_LOCK_RESUME; |
972 | 977 | ||
973 | fq = ikglp_get_queue(sem, t); | 978 | fq = wait.fq; |
974 | BUG_ON(!fq); | 979 | |
980 | BUG_ON(fq != ikglp_get_queue(sem, t)); | ||
981 | |||
982 | tsk_rt(t)->blocked_lock_data = 0; | ||
975 | 983 | ||
976 | replica = ikglp_get_idx(sem, fq); | 984 | replica = ikglp_get_idx(sem, fq); |
977 | 985 | ||
@@ -988,6 +996,17 @@ acquired: | |||
988 | return replica; | 996 | return replica; |
989 | } | 997 | } |
990 | 998 | ||
999 | static void __drop_from_donor(struct ikglp_semaphore *sem, | ||
1000 | ikglp_wait_state_t *wait) | ||
1001 | { | ||
1002 | BUG_ON(wait->cur_q != IKGLP_DONOR); | ||
1003 | |||
1004 | TRACE_TASK(wait->task, "is being dropped from donor heap.\n"); | ||
1005 | |||
1006 | binheap_delete(&wait->node, &sem->donors); | ||
1007 | wait->cur_q = IKGLP_INVL; | ||
1008 | } | ||
1009 | |||
991 | static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, | 1010 | static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, |
992 | struct fifo_queue *fq, | 1011 | struct fifo_queue *fq, |
993 | ikglp_wait_state_t *donor_info) | 1012 | ikglp_wait_state_t *donor_info) |
@@ -999,17 +1018,24 @@ static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, | |||
999 | t->pid, | 1018 | t->pid, |
1000 | ikglp_get_idx(sem, fq)); | 1019 | ikglp_get_idx(sem, fq)); |
1001 | 1020 | ||
1002 | binheap_delete(&donor_info->node, &sem->donors); | 1021 | __drop_from_donor(sem, donor_info); |
1003 | 1022 | __ikglp_enqueue_on_fq(sem, fq, donor_info, | |
1004 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
1005 | &donor_info->fq_node, | ||
1006 | NULL, // already in global_list, so pass null to prevent adding 2nd time. | 1023 | NULL, // already in global_list, so pass null to prevent adding 2nd time. |
1007 | &donor_info->donee_heap_node); | 1024 | &donor_info->donee_heap_node); |
1008 | |||
1009 | // warning: | 1025 | // warning: |
1010 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | 1026 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. |
1011 | } | 1027 | } |
1012 | 1028 | ||
1029 | static void __drop_from_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) | ||
1030 | { | ||
1031 | BUG_ON(wait->cur_q != IKGLP_PQ); | ||
1032 | |||
1033 | TRACE_TASK(wait->task, "is being dropped from the PQ.\n"); | ||
1034 | |||
1035 | binheap_delete(&wait->pq_node.node, &sem->priority_queue); | ||
1036 | wait->cur_q = IKGLP_INVL; | ||
1037 | } | ||
1038 | |||
1013 | static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, | 1039 | static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, |
1014 | struct fifo_queue *fq, | 1040 | struct fifo_queue *fq, |
1015 | ikglp_wait_state_t *wait) | 1041 | ikglp_wait_state_t *wait) |
@@ -1021,10 +1047,8 @@ static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, | |||
1021 | t->pid, | 1047 | t->pid, |
1022 | ikglp_get_idx(sem, fq)); | 1048 | ikglp_get_idx(sem, fq)); |
1023 | 1049 | ||
1024 | binheap_delete(&wait->pq_node.node, &sem->priority_queue); | 1050 | __drop_from_pq(sem, wait); |
1025 | 1051 | __ikglp_enqueue_on_fq(sem, fq, wait, | |
1026 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
1027 | &wait->fq_node, | ||
1028 | &wait->global_heap_node, | 1052 | &wait->global_heap_node, |
1029 | &wait->donee_heap_node); | 1053 | &wait->donee_heap_node); |
1030 | // warning: | 1054 | // warning: |
@@ -1094,48 +1118,51 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal( | |||
1094 | return(NULL); | 1118 | return(NULL); |
1095 | } | 1119 | } |
1096 | 1120 | ||
1097 | static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, | 1121 | static void __drop_from_fq(struct ikglp_semaphore *sem, |
1098 | struct fifo_queue *fq, | 1122 | ikglp_wait_state_t *wait) |
1099 | ikglp_wait_state_t *fq_wait) | ||
1100 | { | 1123 | { |
1101 | struct task_struct *t = fq_wait->task; | 1124 | struct task_struct *t = wait->task; |
1102 | struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq; | 1125 | struct fifo_queue *fq = wait->fq; |
1103 | |||
1104 | TRACE_CUR("FQ request %s/%d being moved to fq %d\n", | ||
1105 | t->comm, | ||
1106 | t->pid, | ||
1107 | ikglp_get_idx(sem, fq)); | ||
1108 | 1126 | ||
1109 | fq_wait->donee_heap_node.fq = fq; // just to be safe | 1127 | BUG_ON(wait->cur_q != IKGLP_FQ); |
1128 | BUG_ON(!fq); | ||
1110 | 1129 | ||
1130 | TRACE_TASK(t, "is being dropped from fq.\n"); | ||
1111 | 1131 | ||
1112 | __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); | 1132 | __remove_wait_queue(&fq->wait, &wait->fq_node); |
1113 | --(fq_steal->count); | 1133 | --(fq->count); |
1114 | 1134 | ||
1115 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 1135 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
1116 | if(sem->aff_obs) { | 1136 | if(sem->aff_obs) { |
1117 | sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq_steal, t); | 1137 | sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); |
1118 | } | 1138 | } |
1119 | #endif | 1139 | #endif |
1120 | 1140 | ||
1121 | if(t == fq_steal->hp_waiter) { | 1141 | if(t == fq->hp_waiter) { |
1122 | fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL); | 1142 | fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); |
1123 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | 1143 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", |
1124 | ikglp_get_idx(sem, fq_steal), | 1144 | ikglp_get_idx(sem, fq), |
1125 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "null", | 1145 | (fq->hp_waiter) ? fq->hp_waiter->comm : "null", |
1126 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : 0); | 1146 | (fq->hp_waiter) ? fq->hp_waiter->pid : 0); |
1127 | } | 1147 | } |
1128 | 1148 | ||
1129 | |||
1130 | // Update shortest. | 1149 | // Update shortest. |
1131 | if(fq_steal->count < sem->shortest_fifo_queue->count) { | 1150 | if(fq->count < sem->shortest_fifo_queue->count) |
1132 | sem->shortest_fifo_queue = fq_steal; | 1151 | sem->shortest_fifo_queue = fq; |
1133 | } | 1152 | |
1153 | wait->cur_q = IKGLP_INVL; | ||
1154 | } | ||
1155 | |||
1156 | static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, | ||
1157 | struct fifo_queue *fq, | ||
1158 | ikglp_wait_state_t *fq_wait) | ||
1159 | { | ||
1160 | WARN_ON(fq_wait->fq != fq_wait->donee_heap_node.fq); | ||
1161 | // __drop_from_fq(sem, fq_wait->donee_heap_node.fq, fq_wait); | ||
1162 | __drop_from_fq(sem, fq_wait); | ||
1134 | 1163 | ||
1135 | __ikglp_enqueue_on_fq(sem, fq, t, | 1164 | fq_wait->donee_heap_node.fq = fq; // just to be safe |
1136 | &fq_wait->fq_node, | 1165 | __ikglp_enqueue_on_fq(sem, fq, fq_wait, NULL, NULL); |
1137 | NULL, | ||
1138 | NULL); | ||
1139 | 1166 | ||
1140 | // warning: We have not checked the priority inheritance of fq's owner yet. | 1167 | // warning: We have not checked the priority inheritance of fq's owner yet. |
1141 | } | 1168 | } |
@@ -1192,7 +1219,7 @@ int ikglp_unlock(struct litmus_lock* l) | |||
1192 | raw_spinlock_t *dgl_lock; | 1219 | raw_spinlock_t *dgl_lock; |
1193 | #endif | 1220 | #endif |
1194 | 1221 | ||
1195 | unsigned long flags = 0, real_flags; | 1222 | unsigned long flags = 0, more_flags; |
1196 | 1223 | ||
1197 | int err = 0; | 1224 | int err = 0; |
1198 | 1225 | ||
@@ -1203,17 +1230,17 @@ int ikglp_unlock(struct litmus_lock* l) | |||
1203 | goto out; | 1230 | goto out; |
1204 | } | 1231 | } |
1205 | 1232 | ||
1233 | BUG_ON(l != tsk_rt(t)->outermost_lock); | ||
1234 | |||
1206 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1235 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
1207 | dgl_lock = litmus->get_dgl_spinlock(t); | 1236 | dgl_lock = litmus->get_dgl_spinlock(t); |
1208 | #endif | 1237 | #endif |
1209 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | 1238 | lock_global_irqsave(dgl_lock, flags); |
1210 | 1239 | raw_spin_lock_irqsave(&sem->real_lock, more_flags); | |
1211 | lock_global_irqsave(dgl_lock, flags); // TODO: Push this deeper | ||
1212 | lock_fine_irqsave(&sem->lock, flags); | 1240 | lock_fine_irqsave(&sem->lock, flags); |
1213 | 1241 | ||
1214 | TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); | 1242 | TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); |
1215 | 1243 | ||
1216 | |||
1217 | // Remove 't' from the heaps, but data in nodes will still be good. | 1244 | // Remove 't' from the heaps, but data in nodes will still be good. |
1218 | ikglp_del_global_list(sem, t, &fq->global_heap_node); | 1245 | ikglp_del_global_list(sem, t, &fq->global_heap_node); |
1219 | binheap_delete(&fq->donee_heap_node.node, &sem->donees); | 1246 | binheap_delete(&fq->donee_heap_node.node, &sem->donees); |
@@ -1435,7 +1462,7 @@ int ikglp_unlock(struct litmus_lock* l) | |||
1435 | (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "null", | 1462 | (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "null", |
1436 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : 0); | 1463 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : 0); |
1437 | 1464 | ||
1438 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. | 1465 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags, 0); // unlocks sem->lock. reacquire it. |
1439 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | 1466 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! |
1440 | } | 1467 | } |
1441 | } | 1468 | } |
@@ -1445,7 +1472,7 @@ int ikglp_unlock(struct litmus_lock* l) | |||
1445 | ikglp_get_idx(sem, to_steal)); | 1472 | ikglp_get_idx(sem, to_steal)); |
1446 | 1473 | ||
1447 | if(need_steal_prio_reeval) { | 1474 | if(need_steal_prio_reeval) { |
1448 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. | 1475 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags, 0); // unlocks sem->lock. reacquire it. |
1449 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | 1476 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! |
1450 | } | 1477 | } |
1451 | } | 1478 | } |
@@ -1481,6 +1508,7 @@ int ikglp_unlock(struct litmus_lock* l) | |||
1481 | } | 1508 | } |
1482 | } | 1509 | } |
1483 | 1510 | ||
1511 | |||
1484 | wake_kludge: | 1512 | wake_kludge: |
1485 | if(waitqueue_active(&fq->wait)) | 1513 | if(waitqueue_active(&fq->wait)) |
1486 | { | 1514 | { |
@@ -1509,7 +1537,6 @@ wake_kludge: | |||
1509 | 1537 | ||
1510 | /* determine new hp_waiter if necessary */ | 1538 | /* determine new hp_waiter if necessary */ |
1511 | if (next == fq->hp_waiter) { | 1539 | if (next == fq->hp_waiter) { |
1512 | |||
1513 | TRACE_TASK(next, "was highest-prio waiter\n"); | 1540 | TRACE_TASK(next, "was highest-prio waiter\n"); |
1514 | /* next has the highest priority --- it doesn't need to | 1541 | /* next has the highest priority --- it doesn't need to |
1515 | * inherit. However, we need to make sure that the | 1542 | * inherit. However, we need to make sure that the |
@@ -1595,10 +1622,9 @@ wake_kludge: | |||
1595 | } | 1622 | } |
1596 | 1623 | ||
1597 | unlock_fine_irqrestore(&sem->lock, flags); | 1624 | unlock_fine_irqrestore(&sem->lock, flags); |
1625 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
1598 | unlock_global_irqrestore(dgl_lock, flags); | 1626 | unlock_global_irqrestore(dgl_lock, flags); |
1599 | 1627 | ||
1600 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
1601 | |||
1602 | TRACE_CUR("done with freeing replica.\n"); | 1628 | TRACE_CUR("done with freeing replica.\n"); |
1603 | 1629 | ||
1604 | out: | 1630 | out: |
@@ -1608,7 +1634,145 @@ out: | |||
1608 | 1634 | ||
1609 | void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) | 1635 | void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) |
1610 | { | 1636 | { |
1611 | TRACE_TASK(t, "TODO!\n"); | 1637 | /* |
1638 | * PRE: (1) Our deadline has already been postponed. | ||
1639 | * (2) DLG lock is already held of DGLs are supported. | ||
1640 | * | ||
1641 | * Exhaustion Response: Remove request from locks and re-issue it. | ||
1642 | * | ||
1643 | * step 1: first check that we are actually blocked. | ||
1644 | * step 2: remove our request from ANY data structure: | ||
1645 | * - donor heap | ||
1646 | * - pq | ||
1647 | * - fq | ||
1648 | * step 3: reissue the request | ||
1649 | */ | ||
1650 | |||
1651 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
1652 | struct litmus_lock* blocked_lock; | ||
1653 | unsigned long flags = 0, more_flags; | ||
1654 | |||
1655 | raw_spin_lock_irqsave(&sem->real_lock, more_flags); | ||
1656 | lock_fine_irqsave(&sem->lock, flags); | ||
1657 | |||
1658 | blocked_lock = tsk_rt(t)->blocked_lock; | ||
1659 | if (blocked_lock == l) { | ||
1660 | ikglp_wait_state_t *wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; | ||
1661 | ikglp_donee_heap_node_t *donee_info; | ||
1662 | struct task_struct *donee; | ||
1663 | struct fifo_queue *donee_fq; | ||
1664 | BUG_ON(!wait); | ||
1665 | |||
1666 | /* drop the request from the proper IKGLP data structure and re-eval | ||
1667 | * priority relations */ | ||
1668 | switch(wait->cur_q) | ||
1669 | { | ||
1670 | case IKGLP_PQ: | ||
1671 | // No one inherits from waiters in PQ. Just drop the request. | ||
1672 | __drop_from_pq(sem, wait); | ||
1673 | break; | ||
1674 | case IKGLP_FQ: | ||
1675 | __drop_from_fq(sem, wait); | ||
1676 | ikglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq, sem, flags, 1); // unlocks sem->lock. reacquire it. | ||
1677 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
1678 | break; | ||
1679 | case IKGLP_DONOR: | ||
1680 | __drop_from_donor(sem, wait); | ||
1681 | /* update donee */ | ||
1682 | donee_info = wait->donee_info; | ||
1683 | donee_info->donor_info = NULL; // clear the cross-link | ||
1684 | binheap_decrease(&donee_info->node, &sem->donees); | ||
1685 | |||
1686 | donee = donee_info->task; | ||
1687 | donee_fq = donee_info->fq; | ||
1688 | if (donee == donee_fq->owner) { | ||
1689 | TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", | ||
1690 | donee->comm, donee->pid, | ||
1691 | ikglp_get_idx(sem, donee_fq)); | ||
1692 | ikglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node, donee_fq, sem, flags); // unlocks sem->lock. reacquire it. | ||
1693 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
1694 | } | ||
1695 | else { | ||
1696 | TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n", | ||
1697 | donee->comm, donee->pid, | ||
1698 | ikglp_get_idx(sem, donee_fq)); | ||
1699 | |||
1700 | ikglp_remove_donation_from_fq_waiter(donee, &wait->prio_donation.hp_binheap_node); | ||
1701 | if(donee == donee_fq->hp_waiter) { | ||
1702 | TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", | ||
1703 | donee->comm, donee->pid, | ||
1704 | ikglp_get_idx(sem, donee_fq)); | ||
1705 | |||
1706 | donee_fq->hp_waiter = ikglp_find_hp_waiter(donee_fq, NULL); | ||
1707 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
1708 | ikglp_get_idx(sem, donee_fq), | ||
1709 | (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null", | ||
1710 | (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0); | ||
1711 | |||
1712 | ikglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1); // unlocks sem->lock. reacquire it. | ||
1713 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
1714 | } | ||
1715 | } | ||
1716 | |||
1717 | break; | ||
1718 | default: | ||
1719 | BUG(); | ||
1720 | } | ||
1721 | |||
1722 | BUG_ON(wait->cur_q != IKGLP_INVL); /* state should now be invalid */ | ||
1723 | |||
1724 | /* now re-issue the request */ | ||
1725 | |||
1726 | TRACE_TASK(t, "Reissuing a request for replica from lock %d.\n", l->ident); | ||
1727 | |||
1728 | if(sem->nr_in_fifos < sem->max_in_fifos) { | ||
1729 | struct fifo_queue *fq; | ||
1730 | |||
1731 | // enqueue somwhere | ||
1732 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
1733 | fq = (sem->aff_obs) ? | ||
1734 | sem->aff_obs->ops->advise_enqueue(sem->aff_obs, t) : | ||
1735 | sem->shortest_fifo_queue; | ||
1736 | #else | ||
1737 | fq = sem->shortest_fifo_queue; | ||
1738 | #endif | ||
1739 | TRACE_TASK(t, "is going to an FQ.\n"); | ||
1740 | /* if this were true, then we should have been blocked */ | ||
1741 | BUG_ON(fq->count == 0); | ||
1742 | ikglp_enqueue_on_fq(sem, fq, wait, flags); // unlocks sem->lock | ||
1743 | } | ||
1744 | else if(litmus->__compare(ikglp_mth_highest(sem), BASE, t, BASE)) { | ||
1745 | TRACE_TASK(t, "is going to PQ.\n"); | ||
1746 | // enqueue on PQ | ||
1747 | ikglp_enqueue_on_pq(sem, wait); | ||
1748 | unlock_fine_irqrestore(&sem->lock, flags); | ||
1749 | } | ||
1750 | else { | ||
1751 | // enqueue as donor | ||
1752 | TRACE_TASK(t, "is going to donor heap.\n"); | ||
1753 | ikglp_enqueue_on_donor(sem, wait, flags); // unlocks sem->lock | ||
1754 | } | ||
1755 | |||
1756 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
1757 | } | ||
1758 | else if (blocked_lock) { | ||
1759 | unlock_fine_irqrestore(&sem->lock, flags); | ||
1760 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
1761 | |||
1762 | TRACE_TASK(t, "is blocked, but not on IKGLP. Redirecting...\n"); | ||
1763 | if(blocked_lock->ops->supports_budget_exhaustion) { | ||
1764 | TRACE_TASK(t, "Lock %d supports budget exhaustion.\n", | ||
1765 | blocked_lock->ident); | ||
1766 | blocked_lock->ops->budget_exhausted(blocked_lock, t); | ||
1767 | } | ||
1768 | } | ||
1769 | else { | ||
1770 | TRACE_TASK(t, "appears to be no longer blocked.\n"); | ||
1771 | unlock_fine_irqrestore(&sem->lock, flags); | ||
1772 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
1773 | } | ||
1774 | |||
1775 | return; | ||
1612 | } | 1776 | } |
1613 | 1777 | ||
1614 | void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t) | 1778 | void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t) |
@@ -2593,19 +2757,17 @@ void gpu_ikglp_notify_dequeue(struct ikglp_affinity* aff, struct fifo_queue* fq, | |||
2593 | int gpu_ikglp_notify_exit(struct ikglp_affinity* aff, struct task_struct* t) | 2757 | int gpu_ikglp_notify_exit(struct ikglp_affinity* aff, struct task_struct* t) |
2594 | { | 2758 | { |
2595 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); | 2759 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); |
2596 | unsigned long flags = 0, real_flags; | 2760 | unsigned long flags = 0, more_flags; |
2597 | int aff_rsrc; | 2761 | int aff_rsrc; |
2598 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 2762 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
2599 | raw_spinlock_t *dgl_lock; | 2763 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); |
2600 | |||
2601 | dgl_lock = litmus->get_dgl_spinlock(t); | ||
2602 | #endif | 2764 | #endif |
2603 | 2765 | ||
2604 | if (tsk_rt(t)->last_gpu < 0) | 2766 | if (tsk_rt(t)->last_gpu < 0) |
2605 | return 0; | 2767 | return 0; |
2606 | 2768 | ||
2607 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
2608 | lock_global_irqsave(dgl_lock, flags); | 2769 | lock_global_irqsave(dgl_lock, flags); |
2770 | raw_spin_lock_irqsave(&sem->real_lock, more_flags); | ||
2609 | lock_fine_irqsave(&sem->lock, flags); | 2771 | lock_fine_irqsave(&sem->lock, flags); |
2610 | 2772 | ||
2611 | // decrement affinity count on old GPU | 2773 | // decrement affinity count on old GPU |
@@ -2618,8 +2780,8 @@ int gpu_ikglp_notify_exit(struct ikglp_affinity* aff, struct task_struct* t) | |||
2618 | } | 2780 | } |
2619 | 2781 | ||
2620 | unlock_fine_irqrestore(&sem->lock, flags); | 2782 | unlock_fine_irqrestore(&sem->lock, flags); |
2783 | raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); | ||
2621 | unlock_global_irqrestore(dgl_lock, flags); | 2784 | unlock_global_irqrestore(dgl_lock, flags); |
2622 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
2623 | 2785 | ||
2624 | return 0; | 2786 | return 0; |
2625 | } | 2787 | } |