aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-04 23:05:47 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-04 23:05:47 -0400
commit0ccecdaf12334b2241ee5185b04eda4f91f95fe2 (patch)
tree65bf14f17db14e42c57aa3d3a591bf8dc95e2d66
parentd2f4875d7a183cc3c95c27c193af2c0cd1d1c555 (diff)
Untested implementation of IKGLP.
I don't like coding so much w/o testing, but it's sort of hard to do without both lock() and unlock().
-rw-r--r--include/litmus/binheap.h12
-rw-r--r--include/litmus/locking.h17
-rw-r--r--include/litmus/rt_param.h2
-rw-r--r--litmus/edf_common.c8
-rw-r--r--litmus/litmus.c4
-rw-r--r--litmus/locking.c7
-rw-r--r--litmus/sched_gsn_edf.c1339
7 files changed, 1277 insertions, 112 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h
index ce5539d72f4d..87dbaee9f27c 100644
--- a/include/litmus/binheap.h
+++ b/include/litmus/binheap.h
@@ -166,6 +166,18 @@ static inline int binheap_is_in_heap(struct binheap_node *node)
166 return (node->parent != BINHEAP_POISON); 166 return (node->parent != BINHEAP_POISON);
167} 167}
168 168
169static inline int binheap_is_in_this_heap(struct binheap_node *node, struct binheap_handle* heap)
170{
171 if(!binheap_is_in_heap(node)) {
172 return 0;
173 }
174
175 while(node->parent != NULL) {
176 node = node->parent;
177 }
178
179 return (node == heap->root);
180}
169 181
170/* Update the node reference pointers. Same logic as Litmus binomial heap. */ 182/* Update the node reference pointers. Same logic as Litmus binomial heap. */
171static inline void __update_ref(struct binheap_node *parent, 183static inline void __update_ref(struct binheap_node *parent,
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 57d2abfe2a12..e0c13f4c31e5 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -5,6 +5,17 @@
5 5
6struct litmus_lock_ops; 6struct litmus_lock_ops;
7 7
8#ifdef CONFIG_LITMUS_NESTED_LOCKING
9struct nested_info
10{
11 struct litmus_lock *lock;
12 struct task_struct *hp_waiter_eff_prio;
13 struct task_struct **hp_waiter_ptr;
14 struct binheap_node hp_binheap_node;
15};
16#endif
17
18
8/* Generic base struct for LITMUS^RT userspace semaphores. 19/* Generic base struct for LITMUS^RT userspace semaphores.
9 * This structure should be embedded in protocol-specific semaphores. 20 * This structure should be embedded in protocol-specific semaphores.
10 */ 21 */
@@ -13,11 +24,9 @@ struct litmus_lock {
13 int type; 24 int type;
14 25
15#ifdef CONFIG_LITMUS_NESTED_LOCKING 26#ifdef CONFIG_LITMUS_NESTED_LOCKING
16 struct task_struct *hp_waiter_eff_prio;
17 struct task_struct **hp_waiter_ptr;
18 struct binheap_node hp_binheap_node;
19
20 int ident; 27 int ident;
28
29 struct nested_info nest;
21 30
22//#ifdef CONFIG_DEBUG_SPINLOCK 31//#ifdef CONFIG_DEBUG_SPINLOCK
23 char cheat_lockdep[2]; 32 char cheat_lockdep[2];
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 3a961005e23b..307de81db0fd 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -146,7 +146,7 @@ struct rt_param {
146 struct litmus_lock* blocked_lock; 146 struct litmus_lock* blocked_lock;
147 147
148 148
149 struct task_struct* doner; 149 //void* donor_data;
150#endif 150#endif
151 151
152 152
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 7a3a9a02a703..f275610bf052 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -111,8 +111,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
111 111
112int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b) 112int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b)
113{ 113{
114 struct litmus_lock *l_a = (struct litmus_lock *)binheap_entry(a, struct litmus_lock, hp_binheap_node); 114 struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
115 struct litmus_lock *l_b = (struct litmus_lock *)binheap_entry(b, struct litmus_lock, hp_binheap_node); 115 struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
116 116
117 return __edf_higher_prio(l_a->hp_waiter_eff_prio, INHERITED, l_b->hp_waiter_eff_prio, INHERITED); 117 return __edf_higher_prio(l_a->hp_waiter_eff_prio, INHERITED, l_b->hp_waiter_eff_prio, INHERITED);
118} 118}
@@ -124,8 +124,8 @@ int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b)
124 124
125int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) 125int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
126{ 126{
127 struct litmus_lock *l_a = (struct litmus_lock *)binheap_entry(a, struct litmus_lock, hp_binheap_node); 127 struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
128 struct litmus_lock *l_b = (struct litmus_lock *)binheap_entry(b, struct litmus_lock, hp_binheap_node); 128 struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
129 129
130 return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE); 130 return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE);
131} 131}
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 3dcefe7a08d6..68285b319160 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -317,7 +317,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
317 WARN_ON(p->rt_param.blocked_lock); 317 WARN_ON(p->rt_param.blocked_lock);
318 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); 318 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks));
319 319
320 WARN_ON(p->rt_param.doner); 320 //WARN_ON(p->rt_param.donor_data);
321#endif 321#endif
322 322
323 /* Cleanup everything else. */ 323 /* Cleanup everything else. */
@@ -493,7 +493,7 @@ void litmus_exec(void)
493 493
494 if (is_realtime(p)) { 494 if (is_realtime(p)) {
495 WARN_ON(p->rt_param.inh_task); 495 WARN_ON(p->rt_param.inh_task);
496 WARN_ON(p->rt_param.doner); 496 //WARN_ON(p->rt_param.donor_data);
497 if (tsk_rt(p)->ctrl_page) { 497 if (tsk_rt(p)->ctrl_page) {
498 free_page((unsigned long) tsk_rt(p)->ctrl_page); 498 free_page((unsigned long) tsk_rt(p)->ctrl_page);
499 tsk_rt(p)->ctrl_page = NULL; 499 tsk_rt(p)->ctrl_page = NULL;
diff --git a/litmus/locking.c b/litmus/locking.c
index 78d609409536..19ed5a8e16e9 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -42,10 +42,11 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar
42 err = litmus->allocate_lock(&lock, type, arg); 42 err = litmus->allocate_lock(&lock, type, arg);
43 if (err == 0) { 43 if (err == 0) {
44#ifdef CONFIG_LITMUS_NESTED_LOCKING 44#ifdef CONFIG_LITMUS_NESTED_LOCKING
45 lock->hp_waiter_eff_prio = NULL; 45 lock->nest.lock = lock;
46 lock->nest.hp_waiter_eff_prio = NULL;
46 47
47 INIT_BINHEAP_NODE(&lock->hp_binheap_node); 48 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node);
48 WARN_ON(!(lock->hp_waiter_ptr)); 49 WARN_ON(!(lock->nest.hp_waiter_ptr));
49 50
50 lock->ident = atomic_inc_return(&lock_id_gen); 51 lock->ident = atomic_inc_return(&lock_id_gen);
51 52
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 10e14613655b..7e50445ad1a9 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -793,7 +793,8 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str
793 793
794void print_hp_waiters(struct binheap_node* n, int depth) 794void print_hp_waiters(struct binheap_node* n, int depth)
795{ 795{
796 struct litmus_lock* l; 796 struct litmus_lock *l;
797 struct nested_info *nest;
797 char padding[81] = " "; 798 char padding[81] = " ";
798 struct task_struct *hp = NULL; 799 struct task_struct *hp = NULL;
799 struct task_struct *hp_eff = NULL; 800 struct task_struct *hp_eff = NULL;
@@ -805,20 +806,21 @@ void print_hp_waiters(struct binheap_node* n, int depth)
805 return; 806 return;
806 } 807 }
807 808
808 l = binheap_entry(n, struct litmus_lock, hp_binheap_node); 809 nest = binheap_entry(n, struct nested_info, hp_binheap_node);
810 l = nest->lock;
809 811
810 if(depth*2 <= 80) 812 if(depth*2 <= 80)
811 padding[depth*2] = '\0'; 813 padding[depth*2] = '\0';
812 814
813 if(*(l->hp_waiter_ptr)) { 815 if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) {
814 hp = *(l->hp_waiter_ptr); 816 hp = *(nest->hp_waiter_ptr);
815 817
816 if(tsk_rt(hp)->inh_task) { 818 if(tsk_rt(hp)->inh_task) {
817 hp_eff = tsk_rt(hp)->inh_task; 819 hp_eff = tsk_rt(hp)->inh_task;
818 } 820 }
819 } 821 }
820 822
821 node_prio = l->hp_waiter_eff_prio; 823 node_prio = nest->hp_waiter_eff_prio;
822 824
823 TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", 825 TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n",
824 padding, 826 padding,
@@ -844,7 +846,9 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t
844{ 846{
845 struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; 847 struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;
846 848
847 increase_priority_inheritance(t, prio_inh); // increase our prio. 849 if(tsk_rt(t)->inh_task != prio_inh) { // shield redundent calls.
850 increase_priority_inheritance(t, prio_inh); // increase our prio.
851 }
848 852
849 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. 853 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap.
850 854
@@ -943,7 +947,7 @@ struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex,
943 947
944static inline struct task_struct* top_priority(struct binheap_handle* handle) { 948static inline struct task_struct* top_priority(struct binheap_handle* handle) {
945 if(!binheap_empty(handle)) { 949 if(!binheap_empty(handle)) {
946 return (struct task_struct*)(binheap_top_entry(handle, struct litmus_lock, hp_binheap_node)->hp_waiter_eff_prio); 950 return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio);
947 } 951 }
948 return NULL; 952 return NULL;
949} 953}
@@ -999,9 +1003,9 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
999 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 1003 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1000 1004
1001 mutex->hp_waiter = t; 1005 mutex->hp_waiter = t;
1002 l->hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); 1006 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
1003 1007
1004 binheap_decrease(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 1008 binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1005 1009
1006 //TRACE_TASK(owner, "Heap After:\n"); 1010 //TRACE_TASK(owner, "Heap After:\n");
1007 //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); 1011 //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0);
@@ -1066,7 +1070,7 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
1066 mutex->owner = t; 1070 mutex->owner = t;
1067 1071
1068 raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); 1072 raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock);
1069 binheap_add(&l->hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); 1073 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1070 raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); 1074 raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock);
1071 1075
1072 raw_spin_unlock_irqrestore(&mutex->lock, flags); 1076 raw_spin_unlock_irqrestore(&mutex->lock, flags);
@@ -1104,9 +1108,9 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1104 //TRACE_TASK(t, "Heap Before:\n"); 1108 //TRACE_TASK(t, "Heap Before:\n");
1105 //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); 1109 //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0);
1106 1110
1107 //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node)->hp_waiter_ptr); 1111 //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node)->hp_waiter_ptr);
1108 old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); 1112 old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks);
1109 binheap_delete(&l->hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); 1113 binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks);
1110 1114
1111 //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 1115 //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
1112 1116
@@ -1163,7 +1167,7 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1163 * next-highest priority in the queue is reflected in 1167 * next-highest priority in the queue is reflected in
1164 * hp_waiter. */ 1168 * hp_waiter. */
1165 mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); 1169 mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next);
1166 l->hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; 1170 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL;
1167 1171
1168 if (mutex->hp_waiter) 1172 if (mutex->hp_waiter)
1169 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); 1173 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
@@ -1175,7 +1179,7 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1175 //TRACE_TASK(next, "Heap Before:\n"); 1179 //TRACE_TASK(next, "Heap Before:\n");
1176 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1180 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
1177 1181
1178 binheap_add(&l->hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); 1182 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1179 1183
1180 //TRACE_TASK(next, "Heap After:\n"); 1184 //TRACE_TASK(next, "Heap After:\n");
1181 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1185 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
@@ -1192,8 +1196,8 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1192 //TRACE_TASK(next, "Heap Before:\n"); 1196 //TRACE_TASK(next, "Heap Before:\n");
1193 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1197 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
1194 1198
1195 binheap_add(&l->hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, 1199 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks,
1196 struct litmus_lock, hp_binheap_node); 1200 struct nested_info, hp_binheap_node);
1197 1201
1198 //TRACE_TASK(next, "Heap After:\n"); 1202 //TRACE_TASK(next, "Heap After:\n");
1199 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1203 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
@@ -1206,9 +1210,9 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1206 * since the effective priority of hp_waiter can change (and the 1210 * since the effective priority of hp_waiter can change (and the
1207 * update has not made it to this lock).) 1211 * update has not made it to this lock).)
1208 */ 1212 */
1209 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->hp_waiter_eff_prio)) 1213 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio))
1210 { 1214 {
1211 increase_priority_inheritance(next, l->hp_waiter_eff_prio); 1215 increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio);
1212 } 1216 }
1213 1217
1214 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 1218 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
@@ -1257,8 +1261,8 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1257 } 1261 }
1258 if(t == mutex->hp_waiter) { 1262 if(t == mutex->hp_waiter) {
1259 // reflect the decreased priority in the heap node. 1263 // reflect the decreased priority in the heap node.
1260 l->hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); 1264 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
1261 binheap_decrease(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 1265 binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1262 } 1266 }
1263 1267
1264 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 1268 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
@@ -1332,10 +1336,10 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
1332 1336
1333 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 1337 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1334 1338
1335 binheap_delete(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 1339 binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1336 mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); 1340 mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL);
1337 l->hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; 1341 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL;
1338 binheap_add(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); 1342 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1339 1343
1340 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 1344 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1341 1345
@@ -1454,7 +1458,7 @@ static struct litmus_lock* gsnedf_new_rsm_mutex(void)
1454 1458
1455 mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; 1459 mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops;
1456 1460
1457 ((struct litmus_lock*)mutex)->hp_waiter_ptr = &mutex->hp_waiter; 1461 ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter;
1458 1462
1459 return &mutex->litmus_lock; 1463 return &mutex->litmus_lock;
1460} 1464}
@@ -1469,78 +1473,139 @@ static struct litmus_lock* gsnedf_new_rsm_mutex(void)
1469 1473
1470/* ******************** IKGLP ********************** */ 1474/* ******************** IKGLP ********************** */
1471 1475
1472/* struct for semaphore with priority inheritance */
1473struct fifo_queue
1474{
1475 wait_queue_head_t wait;
1476 struct task_struct* owner;
1477 struct task_struct* hp_waiter;
1478 int count; /* number of waiters + holder */
1479};
1480 1476
1481struct ikglp_donor_node 1477typedef struct ikglp_heap_node
1482{ 1478{
1483 struct task_struct *donor; 1479 struct task_struct *task;
1484 struct task_struct *donee;
1485 struct binheap_node node; 1480 struct binheap_node node;
1486}; 1481} ikglp_heap_node_t;
1487 1482
1488static int ikglp_doner_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) 1483static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
1489{ 1484{
1490 struct ikglp_donor_node *d_a = (struct ikglp_donor_node*)binheap_entry(a, struct donor_node, node); 1485 ikglp_heap_node_t *d_a = (ikglp_heap_node_t*)binheap_entry(a, ikglp_heap_node_t, node);
1491 struct ikglp_donor_node *d_b = (struct ikglp_donor_node*)binheap_entry(b, struct donor_node, node); 1486 ikglp_heap_node_t *d_b = (ikglp_heap_node_t*)binheap_entry(b, ikglp_heap_node_t, node);
1492 1487
1493 return __edf_higher_prio(d_a->donor, BASE, d_b->donor, BASE); 1488 return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE);
1494} 1489}
1495 1490
1491static int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
1492{
1493 ikglp_heap_node_t *d_a = (ikglp_heap_node_t*)binheap_entry(a, ikglp_heap_node_t, node);
1494 ikglp_heap_node_t *d_b = (ikglp_heap_node_t*)binheap_entry(b, ikglp_heap_node_t, node);
1495
1496 return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE);
1497}
1498
1499struct fifo_queue;
1500struct ikglp_wait_state;
1496 1501
1497struct ikglp_heap_node 1502typedef struct ikglp_donee_heap_node
1498{ 1503{
1499 struct task_struct *task; 1504 struct task_struct *task;
1505 struct fifo_queue *fq;
1506 struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor
1507
1500 struct binheap_node node; 1508 struct binheap_node node;
1501}; 1509} ikglp_donee_heap_node_t;
1502 1510
1503static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) 1511
1512// Maintains the state of a request as it goes through the IKGLP
1513typedef struct ikglp_wait_state {
1514 struct task_struct *task; // pointer back to the requesting task
1515
1516 // Data for while waiting in FIFO Queue
1517 wait_queue_t fq_node;
1518 ikglp_heap_node_t global_heap_node;
1519 ikglp_donee_heap_node_t donee_heap_node;
1520
1521 // Data for while waiting in PQ
1522 ikglp_heap_node_t pq_node;
1523
1524 // Data for while waiting as a donor
1525 ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t
1526 struct nested_info prio_donation;
1527 struct binheap_node node;
1528} ikglp_wait_state_t;
1529
1530
1531static int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
1504{ 1532{
1505 struct ikglp_heap_node *d_a = (struct ikglp_heap_node*)binheap_entry(a, struct ikglp_heap_node, node); 1533 ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node);
1506 struct ikglp_heap_node *d_b = (struct ikglp_heap_node*)binheap_entry(b, struct ikglp_heap_node, node); 1534 ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node);
1507 1535
1508 return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); 1536 return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE);
1509} 1537}
1510 1538
1511static int ikglp_edf_min_heap_order(struct binheap_node *a, struct binheap_node *b) 1539
1540static int ikglp_edf_min_heap_donee_order(struct binheap_node *a, struct binheap_node *b)
1512{ 1541{
1513 struct ikglp_heap_node *d_a = (struct ikglp_heap_node*)binheap_entry(a, struct ikglp_heap_node, node); 1542 struct task_struct *prio_a, *prio_b;
1514 struct ikglp_heap_node *d_b = (struct ikglp_heap_node*)binheap_entry(b, struct ikglp_heap_node, node); 1543
1544 ikglp_donee_heap_node_t *d_a = (ikglp_donee_heap_node_t*)binheap_entry(a, ikglp_donee_heap_node_t, node);
1545 ikglp_donee_heap_node_t *d_b = (ikglp_donee_heap_node_t*)binheap_entry(b, ikglp_donee_heap_node_t, node);
1546
1547 if(!d_a->donor_info) {
1548 prio_a = d_a->task;
1549 }
1550 else {
1551 prio_a = d_a->donor_info->task;
1552 BUG_ON(d_a->task != d_a->donor_info->donee_info->task);
1553 }
1554
1555 if(!d_b->donor_info) {
1556 prio_b = d_b->task;
1557 }
1558 else {
1559 prio_b = d_b->donor_info->task;
1560 BUG_ON(d_b->task != d_b->donor_info->donee_info->task);
1561 }
1515 1562
1516 // note reversed order 1563 // note reversed order
1517 return edf_higher_prio(d_b->task, d_a->task); 1564 return __edf_higher_prio(prio_b, BASE, prio_a, BASE);
1518} 1565}
1519 1566
1520 1567
1568/* struct for semaphore with priority inheritance */
1569struct fifo_queue
1570{
1571 wait_queue_head_t wait;
1572 struct task_struct* owner;
1573
1574 // used for bookkeepping
1575 ikglp_heap_node_t global_heap_node;
1576 ikglp_donee_heap_node_t donee_heap_node;
1577
1578 struct task_struct* hp_waiter;
1579 int count; /* number of waiters + holder */
1580
1581 struct nested_info nest;
1582};
1583
1584
1521struct ikglp_semaphore 1585struct ikglp_semaphore
1522{ 1586{
1523 struct litmus_lock litmus_lock; 1587 struct litmus_lock litmus_lock;
1524 1588
1525 raw_spinlock_t lock; 1589 raw_spinlock_t lock;
1590 raw_spinlock_t real_lock;
1526 1591
1527 int nr_replicas; // AKA k 1592 int nr_replicas; // AKA k
1528 int nr_queued; // num requests in fifos + holders 1593 int m;
1529 int max_nr_queued; // capacity of fifos + holders 1594
1530 int max_fifo_len; // max len of a fifo queue 1595 int max_fifo_len; // max len of a fifo queue
1531 1596
1532 struct binheap_handle global_heap; // max-heap, base prio 1597 struct binheap_handle top_m; // min heap, base prio
1533 struct ikglp_heap_node *global_holder_nodes; // holders need their nodes allocated dynamically 1598 int top_m_size; // number of nodes in top_m
1534 1599
1535 struct binheap_handle fifos_heap; // min-heap, effective prio 1600 struct binheap_handle not_top_m; // max heap, base prio
1536 struct ikglp_heap_node *fifos_holder_nodes; // holders need their nodes allocated dynamically 1601
1537 1602 struct binheap_handle donees; // min-heap, base prio
1538 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue 1603 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue
1539 1604
1540 /* data structures for holding requests */ 1605 /* data structures for holding requests */
1541 struct fifo_queue *fifo_queues; // array nr_replicas in length 1606 struct fifo_queue *fifo_queues; // array nr_replicas in length
1542 struct binheap_handle priority_queue; // max-heap, base prio 1607 struct binheap_handle priority_queue; // max-heap, base prio
1543 struct binheap_handle doners; // max-heap, base prio 1608 struct binheap_handle donors; // max-heap, base prio
1544}; 1609};
1545 1610
1546static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) 1611static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
@@ -1583,7 +1648,7 @@ static struct task_struct* ikglp_find_hp_waiter(
1583 return found; 1648 return found;
1584} 1649}
1585 1650
1586static inline struct fifo_queue* ikglp_find_shortest( 1651static struct fifo_queue* ikglp_find_shortest(
1587 struct ikglp_semaphore *sem, 1652 struct ikglp_semaphore *sem,
1588 struct fifo_queue *search_start) 1653 struct fifo_queue *search_start)
1589{ 1654{
@@ -1607,21 +1672,1108 @@ static inline struct fifo_queue* ikglp_find_shortest(
1607 return(shortest); 1672 return(shortest);
1608} 1673}
1609 1674
1675static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem)
1676{
1677 return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task;
1678}
1679
1680void print_global_list(struct binheap_node* n, int depth)
1681{
1682 ikglp_heap_node_t *global_heap_node;
1683 char padding[81] = " ";
1684
1685 if(n == NULL) {
1686 TRACE_CUR("+-> %p\n", NULL);
1687 return;
1688 }
1689
1690 global_heap_node = binheap_entry(n, ikglp_heap_node_t, node);
1691
1692 if(depth*2 <= 80)
1693 padding[depth*2] = '\0';
1694
1695 TRACE_CUR("%s+-> %s/%d\n",
1696 padding,
1697 global_heap_node->task->comm,
1698 global_heap_node->task->pid);
1699
1700 if(n->left) print_global_list(n->left, depth+1);
1701 if(n->right) print_global_list(n->right, depth+1);
1702}
1703
1704
1705static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node)
1706{
1707
1708
1709 node->task = t;
1710 INIT_BINHEAP_NODE(&node->node);
1711
1712 if(sem->top_m_size < sem->m) {
1713 TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", t->comm, t->pid);
1714 TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size);
1715 print_global_list(sem->top_m.root, 1);
1716
1717 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node);
1718 ++(sem->top_m_size);
1719
1720 TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size);
1721 print_global_list(sem->top_m.root, 1);
1722 }
1723 else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) {
1724 ikglp_heap_node_t *evicted = binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node);
1725
1726 TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n",
1727 t->comm, t->pid,
1728 evicted->comm, evicted->pid);
1729
1730 TRACE_CUR("Not-Top-M Before:\n");
1731 print_global_list(sem->not_top_m.root, 1);
1732 TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size);
1733 print_global_list(sem->top_m.root, 1);
1734
1735
1736 binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node);
1737 INIT_BINHEAP_NODE(&evicted->node);
1738 binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node);
1739
1740 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node);
1741
1742 TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size);
1743 print_global_list(sem->top_m.root, 1);
1744 TRACE_CUR("Not-Top-M After:\n");
1745 print_global_list(sem->not_top_m.root, 1);
1746 }
1747 else {
1748 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", t->comm, t->pid);
1749 TRACE_CUR("Not-Top-M Before:\n");
1750 print_global_list(sem->not_top_m.root, 1);
1751
1752 binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node);
1753
1754 TRACE_CUR("Not-Top-M After:\n");
1755 print_global_list(sem->not_top_m.root, 1);
1756 }
1757}
1758
1759
1760static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node)
1761{
1762 BUG_ON(!binheap_is_in_heap(&node->node));
1763
1764 TRACE_CUR("Removing %s/%d from global list.\n", t->comm, t->pid);
1765
1766 if(binheap_is_in_this_heap(&node->node, &sem->top_m)) {
1767 TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid);
1768
1769 TRACE_CUR("Not-Top-M Before:\n");
1770 print_global_list(sem->not_top_m.root, 1);
1771 TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size);
1772 print_global_list(sem->top_m.root, 1);
1773
1774
1775 binheap_delete(&node->node, &sem->top_m);
1776
1777 if(!binheap_empty(&sem->not_top_m)) {
1778 ikglp_heap_node_t *promoted = binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node);
1779
1780 TRACE_CUR("Promoting %s/%d to top-m\n", promoted->comm, promoted->pid);
1781
1782 binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node);
1783 INIT_BINHEAP_NODE(&promoted->node);
1784
1785 binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node);
1786 }
1787 else {
1788 TRACE_CUR("No one to promote to top-m.\n");
1789 --(sem->top_m_size);
1790 }
1791
1792 TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size);
1793 print_global_list(sem->top_m.root, 1);
1794 TRACE_CUR("Not-Top-M After:\n");
1795 print_global_list(sem->not_top_m.root, 1);
1796 }
1797 else {
1798 TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid);
1799 TRACE_CUR("Not-Top-M Before:\n");
1800 print_global_list(sem->not_top_m.root, 1);
1801
1802 binheap_delete(&node->node, &sem->not_top_m);
1803
1804 TRACE_CUR("Not-Top-M After:\n");
1805 print_global_list(sem->not_top_m.root, 1);
1806 }
1807}
1808
1809
1810void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth)
1811{
1812 ikglp_donee_heap_node_t *donee_node;
1813 char padding[81] = " ";
1814 struct task_struct* donor = NULL;
1815
1816 if(n == NULL) {
1817 TRACE_CUR("+-> %p\n", NULL);
1818 return;
1819 }
1820
1821 donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node);
1822
1823 if(depth*2 <= 80)
1824 padding[depth*2] = '\0';
1825
1826 if(donee_node->donor_info) {
1827 donor = donee_node->donor_info->task;
1828 }
1829
1830 TRACE_CUR("%s+-> %s/%d (d: %s/%d) (fq: %d)\n",
1831 padding,
1832 donee_node->task->comm,
1833 donee_node->task->pid,
1834 (donor) ? donor->comm : "nil",
1835 (donor) ? donor->pid : -1,
1836 ikglp_get_idx(sem, donee_node->fq));
1837
1838 if(n->left) print_donees(sem, n->left, depth+1);
1839 if(n->right) print_donees(sem, n->right, depth+1);
1840}
1841
1842
1843static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq, struct task_struct *t, ikglp_donee_heap_node_t* node)
1844{
1845 TRACE_CUR("Adding %s/%d to donor list.\n", t->comm, t->pid);
1846 TRACE_CUR("donees Before:\n");
1847 print_donees(sem, sem->donees.root, 1);
1848
1849 node->task = t;
1850 node->donor_info = NULL;
1851 node->fq = fq;
1852 INIT_BINHEAP_NODE(&node->node);
1853
1854 binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node);
1855
1856 TRACE_CUR("donees After:\n");
1857 print_donees(sem, sem->donees.root, 1);
1858}
1859
1860
1861static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags)
1862{
1863 // hp_waiter has increased
1864 if (edf_higher_prio(t, fq->hp_waiter)) {
1865 struct task_struct *old_max_eff_prio;
1866 struct task_struct *new_max_eff_prio;
1867 struct task_struct *new_prio = NULL;
1868 struct task_struct *owner = fq->owner;
1869
1870 if(fq->hp_waiter)
1871 TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid);
1872 else
1873 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
1874
1875 if(owner)
1876 {
1877 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1878
1879 //TRACE_TASK(owner, "Heap Before:\n");
1880 //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0);
1881
1882 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1883
1884 fq->hp_waiter = t;
1885 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter);
1886
1887 binheap_decrease(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1888
1889 //TRACE_TASK(owner, "Heap After:\n");
1890 //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0);
1891
1892 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1893
1894 if(new_max_eff_prio != old_max_eff_prio) {
1895 TRACE_TASK(t, "is new hp_waiter.\n");
1896
1897 if ((effective_priority(owner) == old_max_eff_prio) ||
1898 (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))){
1899 new_prio = new_max_eff_prio;
1900 }
1901 }
1902 else {
1903 TRACE_TASK(t, "no change in max_eff_prio of heap.\n");
1904 }
1905
1906 if(new_prio) {
1907 // set new inheritance and propagate
1908 TRACE_TASK(t, "Effective priority changed for owner %s/%d to %s/%d\n",
1909 owner->comm, owner->pid,
1910 new_prio->comm, new_prio->pid);
1911 nested_increase_priority_inheritance(owner, new_prio, &sem->lock, flags); // unlocks lock.
1912 }
1913 else {
1914 TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n",
1915 new_max_eff_prio->comm, new_max_eff_prio->pid);
1916 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1917 raw_spin_unlock_irqrestore(&sem->lock, flags);
1918 }
1919 }
1920 else {
1921 fq->hp_waiter = t;
1922 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter);
1923
1924 TRACE_TASK(t, "no owner??\n");
1925 raw_spin_unlock_irqrestore(&sem->lock, flags);
1926 }
1927 }
1928 else {
1929 TRACE_TASK(t, "no change in hp_waiter.\n");
1930 raw_spin_unlock_irqrestore(&sem->lock, flags);
1931 }
1932}
1933
1934// hp_waiter has decreased
1935static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags)
1936{
1937 struct task_struct *owner = fq->owner;
1938
1939 struct task_struct *old_max_eff_prio;
1940 struct task_struct *new_max_eff_prio;
1941
1942 if(!owner) {
1943 raw_spin_unlock_irqrestore(&sem->lock, flags);
1944 return;
1945 }
1946
1947 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1610 1948
1949 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1950
1951 binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
1952 fq->nest.hp_waiter_eff_prio = fq->hp_waiter;
1953 binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
1954
1955 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1956
1957 if((old_max_eff_prio != new_max_eff_prio) &&
1958 (effective_priority(owner) == old_max_eff_prio))
1959 {
1960 // Need to set new effective_priority for owner
1961 struct task_struct *decreased_prio;
1962
1963 TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident);
1964
1965 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) {
1966 TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident);
1967 decreased_prio = new_max_eff_prio;
1968 }
1969 else {
1970 TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident);
1971 decreased_prio = NULL;
1972 }
1973
1974 // beware: recursion
1975 nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock
1976 }
1977 else {
1978 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1979 raw_spin_unlock_irqrestore(&sem->lock, flags);
1980 }
1981}
1982
1983
1984static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags)
1985{
1986 struct task_struct *owner = fq->owner;
1987
1988 struct task_struct *old_max_eff_prio;
1989 struct task_struct *new_max_eff_prio;
1990
1991 BUG_ON(!owner);
1992
1993 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1994
1995 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
1996
1997 binheap_delete(n, &tsk_rt(owner)->hp_blocked_tasks);
1998
1999 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
2000
2001 if((old_max_eff_prio != new_max_eff_prio) &&
2002 (effective_priority(owner) == old_max_eff_prio))
2003 {
2004 // Need to set new effective_priority for owner
2005 struct task_struct *decreased_prio;
2006
2007 TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident);
2008
2009 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) {
2010 TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident);
2011 decreased_prio = new_max_eff_prio;
2012 }
2013 else {
2014 TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident);
2015 decreased_prio = NULL;
2016 }
2017
2018 // beware: recursion
2019 nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock
2020 }
2021 else {
2022 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
2023 raw_spin_unlock_irqrestore(&sem->lock, flags);
2024 }
2025}
2026
2027static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, struct binheap_node *n)
2028{
2029 struct task_struct *old_max_eff_prio;
2030 struct task_struct *new_max_eff_prio;
2031
2032 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
2033
2034 old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks);
2035
2036 binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks);
2037
2038 new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks);
2039
2040 if((old_max_eff_prio != new_max_eff_prio) &&
2041 (effective_priority(t) == old_max_eff_prio))
2042 {
2043 // Need to set new effective_priority for owner
2044 struct task_struct *decreased_prio;
2045
2046 if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) {
2047 decreased_prio = new_max_eff_prio;
2048 }
2049 else {
2050 decreased_prio = NULL;
2051 }
2052
2053 tsk_rt(t)->inh_task = decreased_prio;
2054 }
2055
2056 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
2057}
2058
2059static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags)
2060{
2061 // resource available now
2062 TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq));
2063
2064 fq->owner = t;
2065
2066 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
2067 binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
2068 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
2069
2070 ++(fq->count);
2071
2072 ikglp_add_global_list(sem, t, &fq->global_heap_node);
2073 ikglp_add_donees(sem, fq, t, &fq->donee_heap_node);
2074
2075 sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue);
2076
2077 raw_spin_unlock_irqrestore(&sem->lock, flags);
2078}
2079
2080
2081
2082static void __ikglp_enqueue_on_fq(
2083 struct ikglp_semaphore *sem,
2084 struct fifo_queue* fq,
2085 struct task_struct* t,
2086 wait_queue_t *wait,
2087 ikglp_heap_node_t *global_heap_node,
2088 ikglp_donee_heap_node_t *donee_heap_node)
2089{
2090 /* resource is not free => must suspend and wait */
2091 TRACE_TASK(t, "Enqueuing on fq %d.\n",
2092 ikglp_get_idx(sem, fq));
2093
2094 init_waitqueue_entry(wait, t);
2095
2096 __add_wait_queue_tail_exclusive(&fq->wait, wait);
2097
2098 ++(fq->count);
2099
2100 // update global list.
2101 if(likely(global_heap_node)) {
2102 ikglp_add_global_list(sem, t, global_heap_node);
2103 }
2104 // update donor eligiblity list.
2105 if(likely(donee_heap_node)) {
2106 ikglp_add_donees(sem, fq, t, donee_heap_node);
2107 }
2108
2109 if(likely(sem->shortest_fifo_queue == fq)) {
2110 sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq);
2111 }
2112
2113 TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq));
2114}
2115
2116
2117static void ikglp_enqueue_on_fq(
2118 struct ikglp_semaphore *sem,
2119 struct fifo_queue *fq,
2120 ikglp_wait_state_t *wait,
2121 unsigned long flags)
2122{
2123 /* resource is not free => must suspend and wait */
2124 TRACE_TASK(t, "queue %d: Resource is not free => must suspend and wait.\n",
2125 ikglp_get_idx(sem, fq));
2126
2127 __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, &wait->global_heap_node, &wait->donee_heap_node);
2128
2129 ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock
2130}
2131
2132
2133static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait)
2134{
2135 TRACE_TASK(wait->task, "goes to PQ.\n");
2136
2137 INIT_BINHEAP_NODE(&wait->pq_node.node);
2138
2139 binheap_add(&wait->pq_node.node, &sem->priority_queue, ikglp_heap_node_t, node);
2140}
2141
2142
2143void print_donors(struct binheap_node *n, int depth)
2144{
2145 ikglp_wait_state_t *donor_node;
2146 char padding[81] = " ";
2147
2148 if(n == NULL) {
2149 TRACE_CUR("+-> %p\n", NULL);
2150 return;
2151 }
2152
2153 donor_node = binheap_entry(n, ikglp_wait_state_t, node);
2154
2155 if(depth*2 <= 80)
2156 padding[depth*2] = '\0';
2157
2158
2159 TRACE_CUR("%s+-> %s/%d (donee: %s/%d)\n",
2160 padding,
2161 donor_node->task->comm,
2162 donor_node->task->pid,
2163 donor_node->donee_info->task->comm,
2164 donor_node->donee_info->task->pid);
2165
2166 if(n->left) print_donors(n->left, depth+1);
2167 if(n->right) print_donors(n->right, depth+1);
2168}
2169
2170
2171static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state_t* wait, unsigned long flags)
2172{
2173 struct task_struct *t = wait->task;
2174 ikglp_donee_heap_node_t *donee_node = NULL;
2175 struct task_struct *donee;
2176
2177 struct task_struct *old_max_eff_prio;
2178 struct task_struct *new_max_eff_prio;
2179 struct task_struct *new_prio = NULL;
2180
2181 TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid);
2182 TRACE_CUR("donors Before:\n");
2183 print_donors(sem->donors.root, 1);
2184
2185
2186
2187 // Add donor to the global list.
2188 ikglp_add_global_list(sem, t, &wait->global_heap_node);
2189
2190 INIT_BINHEAP_NODE(&wait->node);
2191
2192 // Select a donee
2193 donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node);
2194 donee = donee_node->task;
2195
2196 TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid);
2197
2198 TRACE_CUR("Temporarily removing %s/%d to donor list.\n", donee->comm, donee->pid);
2199 TRACE_CUR("donees Before:\n");
2200 print_donees(sem, sem->donees.root, 1);
2201
2202 binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly
2203
2204 TRACE_CUR("donees After:\n");
2205 print_donees(sem, sem->donees.root, 1);
2206
2207
2208 wait->donee_info = donee_node;
2209
2210 // Add t to donor heap.
2211 binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node);
2212
2213 // Now adjust the donee's priority.
2214
2215 // Lock the donee's inheritance heap.
2216 raw_spin_lock(&tsk_rt(donee)->hp_blocked_tasks_lock);
2217
2218 old_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks);
2219
2220 if(donee_node->donor_info) {
2221 // Steal donation relation. Evict old donor to PQ.
2222
2223 // Remove old donor from donor heap
2224 ikglp_wait_state_t *old_wait = donee_node->donor_info;
2225 struct task_struct *old_donor = old_wait->task;
2226
2227 TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. Moving old donor to PQ.\n",
2228 donee->comm, donee->pid, old_donor->comm, old_donor->pid);
2229
2230 binheap_delete(&old_wait->node, &sem->donors);
2231
2232 // Remove donation from donee's inheritance heap.
2233 binheap_delete(&old_wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks);
2234 // WARNING: have not updated inh_prio!
2235
2236 // Add old donor to PQ.
2237 __ikglp_enqueue_on_pq(sem, old_wait);
2238
2239 // Remove old donor from the global heap.
2240 ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node);
2241 }
2242
2243 // Add back donee's node to the donees heap with increased prio
2244 donee_node->donor_info = wait;
2245 INIT_BINHEAP_NODE(&donee_node->node);
2246
2247
2248 TRACE_CUR("Adding %s/%d back to donor list.\n", donee->comm, donee->pid);
2249 TRACE_CUR("donees Before:\n");
2250 print_donees(sem, sem->donees.root, 1);
2251
2252 binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node);
2253
2254 TRACE_CUR("donees After:\n");
2255 print_donees(sem, sem->donees.root, 1);
2256
2257
2258
2259 // Add an inheritance/donation to the donee's inheritance heap.
2260 wait->prio_donation.lock = (struct litmus_lock*)sem;
2261 wait->prio_donation.hp_waiter_eff_prio = t;
2262 wait->prio_donation.hp_waiter_ptr = NULL;
2263 INIT_BINHEAP_NODE(&wait->prio_donation.hp_binheap_node);
2264
2265 binheap_add(&wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
2266
2267 new_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks);
2268
2269 if(new_max_eff_prio != old_max_eff_prio) {
2270 if ((effective_priority(donee) == old_max_eff_prio) ||
2271 (__edf_higher_prio(new_max_eff_prio, BASE, donee, INHERITED))){
2272 TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid);
2273 new_prio = new_max_eff_prio;
2274 }
2275 else {
2276 // should be bug. donor would not be in top-m.
2277 TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid);
2278 WARN_ON(1);
2279 }
2280 }
2281 else {
2282 // should be bug. donor would not be in top-m.
2283 TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid);
2284 WARN_ON(1);
2285 }
2286
2287 if(new_prio) {
2288 struct fifo_queue *donee_fq = donee_node->fq;
2289
2290 if(donee != donee_fq->owner) {
2291 TRACE_TASK(t, "%s/%d is not the owner. Propagating priority to owner %s/%d.\n",
2292 donee->comm, donee->pid,
2293 donee_fq->owner->comm, donee_fq->owner->pid);
2294
2295 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock);
2296 ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock
2297 }
2298 else {
2299 TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n",
2300 donee->comm, donee->pid);
2301 nested_increase_priority_inheritance(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock
2302 }
2303 }
2304 else {
2305 TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n",
2306 new_max_eff_prio->comm, new_max_eff_prio->pid);
2307 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock);
2308 raw_spin_unlock_irqrestore(&sem->lock, flags);
2309 }
2310
2311
2312 TRACE_CUR("donors After:\n");
2313 print_donors(sem->donors.root, 1);
2314}
1611 2315
1612 2316
1613static int gsnedf_ikglp_lock(struct litmus_lock* l) 2317static int gsnedf_ikglp_lock(struct litmus_lock* l)
1614{ 2318{
2319 struct task_struct* t = current;
1615 struct ikglp_semaphore *sem = ikglp_from_lock(l); 2320 struct ikglp_semaphore *sem = ikglp_from_lock(l);
2321 unsigned long flags, real_flags;
2322 struct fifo_queue *fq = NULL;
2323
2324 ikglp_wait_state_t wait;
2325
2326 if (!is_realtime(t))
2327 return -EPERM;
2328
2329 raw_spin_lock_irqsave(&sem->real_lock, real_flags);
2330 raw_spin_lock_irqsave(&sem->lock, flags);
2331
2332 if(sem->shortest_fifo_queue->count == 0) {
2333 // take available resource
2334 ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock
2335 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2336 return 0;
2337 }
2338
2339 // we have to suspend.
2340
2341 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!!
2342
2343 tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked
2344 mb();
2345
2346 /* FIXME: interruptible would be nice some day */
2347 set_task_state(t, TASK_UNINTERRUPTIBLE);
2348
2349 if(sem->shortest_fifo_queue->count < sem->max_fifo_len) {
2350 // enqueue on fq
2351 ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock
2352 }
2353 else {
2354 // no room in fifos. Go to PQ or donors.
2355
2356 if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) {
2357 // enqueue on PQ
2358 __ikglp_enqueue_on_pq(sem, &wait);
2359 raw_spin_unlock_irqrestore(&sem->lock, flags);
2360 }
2361 else {
2362 // enqueue as donor
2363 ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock
2364 }
2365 }
2366
2367 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2368
2369 TS_LOCK_SUSPEND;
2370
2371 schedule();
2372
2373 TS_LOCK_RESUME;
2374
2375 fq = ikglp_get_queue(sem, t);
2376 BUG_ON(!fq);
2377
2378 TRACE_CUR("Acquired lock %d, queue %d\n",
2379 l->ident, ikglp_get_idx(sem, fq));
2380
1616 return 0; 2381 return 0;
1617} 2382}
1618 2383
2384static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *donor_info)
2385{
2386 struct task_struct *t = donor_info->task;
2387
2388 TRACE_CUR("Donor %s/%d being moved to fq %d\n",
2389 t->comm,
2390 t->pid;
2391 ikglp_get_idx(sem, fq));
2392
2393 binheap_delete(&donor_info->node, &sem->donors);
2394
2395 __ikglp_enqueue_on_fq(sem, fq, t,
2396 &donor_info->fq_node,
2397 &donor_info->global_heap_node,
2398 &donor_info->donee_heap_node);
2399
2400 // warning:
2401 // ikglp_update_owners_prio(t, fq, sem, flags) has not been called.
2402}
2403
2404static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *wait)
2405{
2406 struct task_struct *t = wait->task;
2407
2408 TRACE_CUR("PQ request %s/%d being moved to fq %d\n",
2409 t->comm,
2410 t->pid;
2411 ikglp_get_idx(sem, fq));
2412
2413 binheap_delete(&wait->pq_node.node, &sem->priority_queue);
2414
2415 __ikglp_enqueue_on_fq(sem, fq, t,
2416 &wait->fq_node,
2417 &wait->global_heap_node,
2418 &wait->donee_heap_node);
2419 // warning:
2420 // ikglp_update_owners_prio(t, fq, sem, flags) has not been called.
2421}
2422
2423static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(struct ikglp_semaphore* sem)
2424{
2425 /* must hold sem->lock */
2426
2427 struct fifo_queue *fq = NULL;
2428 struct task_struct *max_hp = NULL;
2429
2430 struct list_head *pos;
2431 struct task_struct *queued;
2432 int i;
2433
2434 for(i = 0; i < sem->nr_replicas; ++i)
2435 {
2436 if( (sem->fifo_queues[i].count > 1) &&
2437 ((fq == NULL) ||
2438 (edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter))) )
2439 {
2440 fq = &sem->fifo_queues[i];
2441 }
2442 }
2443
2444 if(fq) {
2445 list_for_each(pos, &fq->wait.task_list)
2446 {
2447 wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list);
2448
2449 queued = (struct task_struct*) wait->private;
2450 /* Compare task prios, find high prio task. */
2451 if (queued == max_hp)
2452 {
2453 return container_of(wait, ikglp_wait_state_t, fq_node);
2454 }
2455 }
2456 BUG();
2457 }
2458
2459 return(NULL);
2460}
2461
2462static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *fq_wait)
2463{
2464 struct task_struct *t = fq_wait->task;
2465 struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq;
2466
2467 WARN_ON(t != fq_steal->hp_waiter);
2468
2469 TRACE_CUR("FQ request %s/%d being moved to fq %d\n",
2470 t->comm,
2471 t->pid;
2472 ikglp_get_idx(sem, fq));
2473
2474 fq_wait->donee_heap_node.fq = fq; // just to be safe
2475
2476
2477 __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node);
2478 --fq_steal->count;
2479
2480 // Update shortest.
2481 if(fq_steal->count < sem->shortest_fifo_queue->count) {
2482 sem->shortest_fifo_queue = fq_steal;
2483 }
2484
2485 __ikglp_enqueue_on_fq(sem, fq, t,
2486 &fq_wait->fq_node,
2487 NULL,
2488 NULL);
2489
2490 // warning: We have not checked the priority inheritance of fq's owner yet.
2491}
2492
2493
2494static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *old_wait)
2495{
2496 struct task_struct *t = old_wait->task;
2497
2498 BUG_ON(old_wait->donee_heap_node.fq != fq);
2499
2500 // need to migrate global_heap_node and donee_heap_node off of the stack
2501 // to the nodes allocated for the owner of this fq.
2502
2503 // TODO: Enhance binheap() to perform this operation in place.
2504
2505 ikglp_del_global_list(sem, t, &old_wait->global_heap_node); // remove
2506 fq->global_heap_node = old_wait->global_heap_node; // copy
2507 ikglp_add_global_list(sem, t, &fq->global_heap_node); // re-add
2508
2509 binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); // remove
2510 fq->donee_heap_node = old_wait->donee_heap_node; // copy
2511
2512 if(fq->donee_heap_node.donor_info) {
2513 // let donor know that our location has changed
2514 BUG_ON(fq->donee_heap_node.donor_info->donee_info->task != t); // validate cross-link
2515 fq->donee_heap_node.donor_info->donee_info = &fq->donee_heap_node;
2516 }
2517 INIT_BINHEAP_NODE(&fq->donee_heap_node.node);
2518 binheap_add(&fq->donee_heap_node.node, &sem->donees, ikglp_donee_heap_node_t, node); // re-add
2519}
2520
1619static int gsnedf_ikglp_unlock(struct litmus_lock* l) 2521static int gsnedf_ikglp_unlock(struct litmus_lock* l)
1620{ 2522{
1621 struct ikglp_semaphore *sem = ikglp_from_lock(l); 2523 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1622 return 0; 2524 struct task_struct *t = current;
2525 struct task_struct *donee = NULL;
2526 struct task_struct *next = NULL;
2527 ikglp_wait_state_t *other_donor_info = NULL;
2528 struct fifo_queue *to_steal = NULL;
2529 struct fifo_queue *fq;
2530
2531 unsigned long flags, real_flags;
2532
2533 int err = 0;
2534
2535 raw_spin_lock_irqsave(&sem->real_lock, real_flags);
2536 raw_spin_lock_irqsave(&sem->lock, flags);
2537
2538 fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner.
2539
2540 if (!fq) {
2541 err = -EINVAL;
2542 goto out;
2543 }
2544
2545 TRACE_TASK(t, "Freeing replica %d.\n", ikglp_to_idx(sem, fq));
2546
2547 if(fq->donee_heap_node.donor_info) { // move my doner to FQ
2548 ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info;
2549
2550 TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n",
2551 donor_info->task->comm, donor_info->task->pid,
2552 ikglp_to_idx(sem, fq));
2553 // donor moved to FQ
2554 donee = t;
2555 ikglp_move_donor_to_fq(sem, fq, donor_info);
2556 // TODO: Terminate donation
2557 }
2558 else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ
2559 // move other donor to FQ
2560 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node);
2561
2562 donee = other_donor_info->donee_info->task;
2563
2564 TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n",
2565 other_donor_info->task->comm, other_donor_info->task->pid,
2566 ikglp_to_idx(sem, fq));
2567
2568 ikglp_move_donor_to_fq(sem, fq, other_donor_info);
2569
2570 // TODO: Terminate donation
2571 }
2572 else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ
2573 ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, ikglp_heap_node_t, node);
2574 ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, pq_node);
2575
2576 TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n",
2577 pq_wait->task->comm, pq_wait->task->pid,
2578 ikglp_to_idx(sem, fq));
2579
2580 ikglp_move_pq_to_fq(sem, fq, pq_wait);
2581 }
2582 else if(fq->count == 1) { // No PQ and this queue is empty, so steal
2583 // steal.
2584 ikglp_wait_state_t *fq_wait;
2585
2586 TRACE_TASK(t, "Looking to steal a request for fq %d...\n",
2587 ikglp_to_idx(sem, fq));
2588
2589 fq_wait = ikglp_find_hp_waiter_to_steal(sem);
2590 if(fq_wait) {
2591 to_steal = fq_wait->donee_heap_node.fq;
2592
2593 TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n",
2594 fq_wait->task->comm, fq_wait->task->pid,
2595 ikglp_to_idx(sem, to_steal)
2596 ikglp_to_idx(sem, fq));
2597
2598 ikglp_steal_to_fq(sem, fq, fq_wait);
2599
2600 // TODO: Update prio of old queue.
2601 }
2602 else {
2603 TRACE_TASK(t, "Found nothing to steal for fq %d.\n",
2604 ikglp_to_idx(sem, fq));
2605 }
2606 }
2607 else { // move no one
2608 }
2609
2610 // 't' must drop all priority and clean up data structures before hand-off.
2611
2612 // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST
2613 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
2614 {
2615 int count = 0;
2616 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
2617 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
2618 ++count;
2619 }
2620 decrease_priority_inheritance(t, NULL);
2621 WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible.
2622 }
2623 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
2624
2625 ikglp_del_global_list(sem, t, &fq->global_heap_node);
2626 binheap_delete(&fq->donee_heap_node.node, &sem->donees);
2627 fq->owner = NULL; // no longer owned!!
2628
2629 // Now patch up other priorities.
2630 //
2631 // At most one of the following:
2632 // if(donee && donee != t), decrease prio, propagate to owner, or onward
2633 // if(to_steal), update owner's prio (hp_waiter has already been set)
2634 //
2635
2636 BUG_ON((other_donor_info != NULL) && (to_steal != NULL));
2637
2638 if(other_donor_info) {
2639 struct fifo_queue *other_fq = other_donor_info->donee_info->fq;
2640
2641 BUG_ON(!donee);
2642 BUG_ON(donee == t);
2643
2644 TRACE_TASK(t, "Terminating donation relation of donor %s/%d to donee %s/%d!\n",
2645 other_donor_info->task->comm, other_donor_info->task->pid,
2646 donee->comm, donee->pid);
2647
2648 // need to terminate donation relation.
2649 if(donee == other_fq->owner) {
2650 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n",
2651 donee->comm, donee->pid,
2652 ikglp_to_idx(sem, other_fq));
2653
2654 ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags);
2655 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!!
2656 }
2657 else {
2658 TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n",
2659 donee->comm, donee->pid,
2660 ikglp_to_idx(sem, other_fq));
2661
2662 ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node);
2663 if(donee == other_fq->hp_waiter) {
2664 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n",
2665 donee->comm, donee->pid,
2666 ikglp_to_idx(sem, other_fq));
2667
2668 other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL);
2669 ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it.
2670 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!!
2671 }
2672 }
2673 }
2674 else if(to_steal) {
2675 TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n",
2676 ikglp_to_idx(sem, to_steal));
2677
2678 ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it.
2679 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!!
2680 }
2681
2682
2683 if(waitqueue_active(&fq->wait))
2684 {
2685 wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list);
2686 ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node);
2687
2688 next = __waitqueue_remove_first(&fq->wait);
2689
2690 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
2691 ikglp_get_idx(sem, fq),
2692 next->comm, next->pid);
2693
2694 ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait);
2695
2696 /* next becomes the resouce holder */
2697 fq->owner = next;
2698 --fq->count;
2699 tsk_rt(next)->blocked_lock = NULL;
2700
2701 // TODO: UPDATE TO USE NEW HEAP NODES.
2702
2703 if(fq->count < sem->shortest_fifo_queue->count) {
2704 sem->shortest_fifo_queue = fq;
2705 }
2706
2707
2708 /* determine new hp_waiter if necessary */
2709 if (next == fq->hp_waiter) {
2710
2711 TRACE_TASK(next, "was highest-prio waiter\n");
2712 /* next has the highest priority --- it doesn't need to
2713 * inherit. However, we need to make sure that the
2714 * next-highest priority in the queue is reflected in
2715 * hp_waiter. */
2716 fq->hp_waiter = ikglp_find_hp_waiter(fq, next);
2717 fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? effective_priority(fq->hp_waiter) : NULL;
2718
2719 if (fq->hp_waiter)
2720 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
2721 else
2722 TRACE("no further waiters\n");
2723
2724 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
2725
2726 //TRACE_TASK(next, "Heap Before:\n");
2727 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
2728
2729 binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node);
2730
2731 //TRACE_TASK(next, "Heap After:\n");
2732 //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
2733
2734 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
2735 }
2736 else {
2737 /* Well, if 'next' is not the highest-priority waiter,
2738 * then it (probably) ought to inherit the highest-priority
2739 * waiter's priority. */
2740 TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident);
2741
2742 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
2743
2744 binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks,
2745 struct nested_info, hp_binheap_node);
2746
2747 /* It is possible that 'next' *should* be the hp_waiter, but isn't
2748 * because that update hasn't yet executed (update operation is
2749 * probably blocked on mutex->lock). So only inherit if the top of
2750 * 'next's top heap node is indeed the effective prio. of hp_waiter.
2751 * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter)
2752 * since the effective priority of hp_waiter can change (and the
2753 * update has not made it to this lock).)
2754 */
2755 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio))
2756 {
2757 increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio);
2758 }
2759
2760 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
2761 }
2762 }
2763
2764
2765 /* wake up next */
2766 wake_up_process(next);
2767
2768out:
2769 raw_spin_unlock_irqrestore(&sem->lock, flags);
2770
2771 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2772
2773 return err;
1623} 2774}
1624 2775
2776
1625static int gsnedf_ikglp_close(struct litmus_lock* l) 2777static int gsnedf_ikglp_close(struct litmus_lock* l)
1626{ 2778{
1627 struct task_struct *t = current; 2779 struct task_struct *t = current;
@@ -1653,8 +2805,6 @@ static void gsnedf_ikglp_free(struct litmus_lock* l)
1653 struct ikglp_semaphore *sem = ikglp_from_lock(l); 2805 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1654 2806
1655 kfree(sem->fifo_queues); 2807 kfree(sem->fifo_queues);
1656 kfree(sem->fifos_holder_nodes);
1657 kfree(sem->global_holder_nodes);
1658 kfree(sem); 2808 kfree(sem);
1659} 2809}
1660 2810
@@ -1699,32 +2849,14 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
1699 return NULL; 2849 return NULL;
1700 } 2850 }
1701 2851
1702 sem->global_holder_nodes = kmalloc(sizeof(struct ikglp_heap_node)*nr_replicas, GFP_KERNEL);
1703 if(!sem->global_holder_nodes)
1704 {
1705 kfree(sem);
1706 *ret_code = -ENOMEM;
1707 return NULL;
1708 }
1709
1710 sem->fifos_holder_nodes = kmalloc(sizeof(struct ikglp_heap_node)*nr_replicas, GFP_KERNEL);
1711 if(!sem->fifos_holder_nodes)
1712 {
1713 kfree(sem->global_holder_nodes);
1714 kfree(sem);
1715 *ret_code = -ENOMEM;
1716 return NULL;
1717 }
1718
1719 sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); 2852 sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL);
1720 if(!sem->fifo_queues) 2853 if(!sem->fifo_queues)
1721 { 2854 {
1722 kfree(sem->fifos_holder_nodes);
1723 kfree(sem->global_holder_nodes);
1724 kfree(sem); 2855 kfree(sem);
1725 *ret_code = -ENOMEM; 2856 *ret_code = -ENOMEM;
1726 return NULL; 2857 return NULL;
1727 } 2858 }
2859
1728 2860
1729 sem->litmus_lock.ops = &gsnedf_ikglp_lock_ops; 2861 sem->litmus_lock.ops = &gsnedf_ikglp_lock_ops;
1730 2862
@@ -1734,36 +2866,47 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
1734 } 2866 }
1735#else 2867#else
1736 raw_spin_lock_init(&sem->lock); 2868 raw_spin_lock_init(&sem->lock);
1737#endif 2869#endif
2870
2871 raw_spin_lock_init(&sem->real_lock);
1738 2872
1739 sem->nr_replicas = nr_replicas; 2873 sem->nr_replicas = nr_replicas;
1740 sem->nr_queued = 0; 2874 sem->m = num_online_cpus();
1741 2875 sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0);
1742 sem->max_fifo_len = (num_online_cpus()/nr_replicas) + ((num_online_cpus()%nr_replicas) != 0) - 1; // -1 to exclude holder
1743 sem->max_nr_queued = (sem->max_fifo_len * nr_replicas) + nr_replicas; // capacity of fifos + holders
1744 2876
1745 for(i = 0; i < nr_replicas; ++i) 2877 for(i = 0; i < nr_replicas; ++i)
1746 { 2878 {
1747 sem->fifo_queues[i].owner = NULL; 2879 struct fifo_queue* q = &(sem->fifo_queues[i]);
1748 sem->fifo_queues[i].hp_waiter = NULL; 2880
1749 init_waitqueue_head(&sem->fifo_queues[i].wait); 2881 q->owner = NULL;
1750 sem->fifo_queues[i].count = 0; 2882 q->hp_waiter = NULL;
2883 init_waitqueue_head(&q->wait);
2884 q->count = 0;
2885
2886 q->global_heap_node.task = NULL;
2887 INIT_BINHEAP_NODE(&q->global_heap_node.node);
2888
2889 q->donee_heap_node.task = NULL;
2890 q->donee_heap_node.donor_info = NULL;
2891 q->donee_heap_node.fq = NULL;
2892 INIT_BINHEAP_NODE(&q->donee_heap_node.node);
2893
2894 q->nest.lock = (struct litmus_lock*)sem;
2895 q->nest.hp_waiter_eff_prio = NULL;
2896 q->nest.hp_waiter_ptr = &q->hp_waiter;
2897 INIT_BINHEAP_NODE(&q->nest.hp_binheap_node);
1751 } 2898 }
1752 2899
1753 sem->shortest_fifo_queue = &sem->fifo_queues[0]; 2900 sem->shortest_fifo_queue = &sem->fifo_queues[0];
1754 2901
2902 sem->top_m_size = 0;
1755 2903
1756 // init heaps 2904 // init heaps
1757 INIT_BINHEAP_HANDLE(&sem->global_heap, ikglp_edf_max_heap_base_priority_order); 2905 INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order);
1758 INIT_BINHEAP_HANDLE(&sem->fifos_heap, ikglp_edf_min_heap_order); 2906 INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order);
2907 INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order);
1759 INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); 2908 INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order);
1760 INIT_BINHEAP_HANDLE(&sem->doners, ikglp_doner_edf_max_heap_base_priority_order); 2909 INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order);
1761
1762 // init heap nodes used by holders
1763 for(i = 0; i < nr_replicas; ++i) {
1764 INIT_BINHEAP_NODE(&(sem->global_holder_nodes[i].node));
1765 INIT_BINHEAP_NODE(&(sem->fifos_holder_nodes[i].node));
1766 }
1767 2910
1768 *ret_code = 0; 2911 *ret_code = 0;
1769 return &sem->litmus_lock; 2912 return &sem->litmus_lock;