aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-09 00:18:24 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-09 00:18:24 -0400
commit0c80d0acbbc2103a744f2b2b76cb66ddeb28ebbf (patch)
treed40fa8fcebfb8e5cf3015b0b582d922f62639257
parentf5c9f29c1d17131870ec113cc357b40d2f087bc2 (diff)
Fix IKGLP bugs discovered in test.
Apply fixes to the IKGLP. Also, break binheap.h into binheap.h/.c
-rw-r--r--include/litmus/binheap.h373
-rw-r--r--include/litmus/edf_common.h3
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/binheap.c387
-rw-r--r--litmus/edf_common.c37
-rw-r--r--litmus/sched_gsn_edf.c505
6 files changed, 766 insertions, 540 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h
index 87dbaee9f27c..9e966e3886cb 100644
--- a/include/litmus/binheap.h
+++ b/include/litmus/binheap.h
@@ -1,6 +1,8 @@
1#ifndef LITMUS_BINARY_HEAP_H 1#ifndef LITMUS_BINARY_HEAP_H
2#define LITMUS_BINARY_HEAP_H 2#define LITMUS_BINARY_HEAP_H
3 3
4#include <linux/kernel.h>
5
4/** 6/**
5 * Simple binary heap with add, arbitrary delete, delete_root, and top 7 * Simple binary heap with add, arbitrary delete, delete_root, and top
6 * operations. 8 * operations.
@@ -128,7 +130,7 @@ __binheap_add((new_node), (handle), container_of((new_node), type, member))
128#define binheap_decrease(orig_node, handle) \ 130#define binheap_decrease(orig_node, handle) \
129__binheap_decrease((orig_node), (handle)) 131__binheap_decrease((orig_node), (handle))
130 132
131#define BINHEAP_NODE_INIT() { NULL, NULL, NULL, NULL , NULL, NULL} 133#define BINHEAP_NODE_INIT() { NULL, BINHEAP_POISON, NULL, NULL , NULL, NULL}
132 134
133#define BINHEAP_NODE(name) \ 135#define BINHEAP_NODE(name) \
134 struct binheap_node name = BINHEAP_NODE_INIT() 136 struct binheap_node name = BINHEAP_NODE_INIT()
@@ -166,273 +168,14 @@ static inline int binheap_is_in_heap(struct binheap_node *node)
166 return (node->parent != BINHEAP_POISON); 168 return (node->parent != BINHEAP_POISON);
167} 169}
168 170
169static inline int binheap_is_in_this_heap(struct binheap_node *node, struct binheap_handle* heap)
170{
171 if(!binheap_is_in_heap(node)) {
172 return 0;
173 }
174
175 while(node->parent != NULL) {
176 node = node->parent;
177 }
178
179 return (node == heap->root);
180}
181
182/* Update the node reference pointers. Same logic as Litmus binomial heap. */
183static inline void __update_ref(struct binheap_node *parent,
184 struct binheap_node *child)
185{
186 *(parent->ref_ptr) = child;
187 *(child->ref_ptr) = parent;
188
189 swap(parent->ref_ptr, child->ref_ptr);
190}
191
192/* Swaps data between two nodes. */
193static inline void __binheap_swap(struct binheap_node *parent,
194 struct binheap_node *child)
195{
196 swap(parent->data, child->data);
197 __update_ref(parent, child);
198}
199
200/* Swaps memory and data between two nodes. Actual nodes swap instead of
201 * just data. Needed when we delete nodes from the heap.
202 */
203static inline void __binheap_swap_safe(struct binheap_handle *handle,
204 struct binheap_node *a,
205 struct binheap_node *b)
206{
207 swap(a->data, b->data);
208 __update_ref(a, b);
209
210 if((a->parent != NULL) && (a->parent == b->parent)) {
211 /* special case: shared parent */
212 swap(a->parent->left, a->parent->right);
213 }
214 else {
215 /* Update pointers to swap parents. */
216
217 if(a->parent) {
218 if(a == a->parent->left) {
219 a->parent->left = b;
220 }
221 else {
222 a->parent->right = b;
223 }
224 }
225
226 if(b->parent) {
227 if(b == b->parent->left) {
228 b->parent->left = a;
229 }
230 else {
231 b->parent->right = a;
232 }
233 }
234
235 swap(a->parent, b->parent);
236 }
237
238 /* swap children */
239
240 if(a->left) {
241 a->left->parent = b;
242
243 if(a->right) {
244 a->right->parent = b;
245 }
246 }
247
248 if(b->left) {
249 b->left->parent = a;
250
251 if(b->right) {
252 b->right->parent = a;
253 }
254 }
255
256 swap(a->left, b->left);
257 swap(a->right, b->right);
258
259
260 /* update next/last/root pointers */
261
262 if(a == handle->next) {
263 handle->next = b;
264 }
265 else if(b == handle->next) {
266 handle->next = a;
267 }
268
269 if(a == handle->last) {
270 handle->last = b;
271 }
272 else if(b == handle->last) {
273 handle->last = a;
274 }
275
276 if(a == handle->root) {
277 handle->root = b;
278 }
279 else if(b == handle->root) {
280 handle->root = a;
281 }
282}
283
284 171
285/** 172int binheap_is_in_this_heap(struct binheap_node *node, struct binheap_handle* heap);
286 * Update the pointer to the last node in the complete binary tree.
287 * Called internally after the root node has been deleted.
288 */
289static inline void __binheap_update_last(struct binheap_handle *handle)
290{
291 struct binheap_node *temp = handle->last;
292
293 /* find a "bend" in the tree. */
294 while(temp->parent && (temp == temp->parent->left)) {
295 temp = temp->parent;
296 }
297
298 /* step over to sibling if we're not at root */
299 if(temp->parent != NULL) {
300 temp = temp->parent->left;
301 }
302
303 /* now travel right as far as possible. */
304 while(temp->right != NULL) {
305 temp = temp->right;
306 }
307
308 /* take one step to the left if we're not at the bottom-most level. */
309 if(temp->left != NULL) {
310 temp = temp->left;
311 }
312
313 //BUG_ON(!(temp->left == NULL && temp->right == NULL));
314
315 handle->last = temp;
316}
317
318/**
319 * Update the pointer to the node that will take the next inserted node.
320 * Called internally after a node has been inserted.
321 */
322static inline void __binheap_update_next(struct binheap_handle *handle)
323{
324 struct binheap_node *temp = handle->next;
325 173
326 /* find a "bend" in the tree. */
327 while(temp->parent && (temp == temp->parent->right)) {
328 temp = temp->parent;
329 }
330 174
331 /* step over to sibling if we're not at root */
332 if(temp->parent != NULL) {
333 temp = temp->parent->right;
334 }
335 175
336 /* now travel left as far as possible. */ 176void __binheap_add(struct binheap_node *new_node,
337 while(temp->left != NULL) {
338 temp = temp->left;
339 }
340
341 handle->next = temp;
342}
343
344
345
346/* bubble node up towards root */
347static inline void __binheap_bubble_up(
348 struct binheap_handle *handle, 177 struct binheap_handle *handle,
349 struct binheap_node *node) 178 void *data);
350{
351 /* Note: NULL data pointers are used internally for arbitrary delete */
352 while((node->parent != NULL) &&
353 ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ ||
354 handle->compare(node, node->parent))) {
355 __binheap_swap(node->parent, node);
356 node = node->parent;
357 }
358}
359
360
361/* bubble node down, swapping with min-child */
362static inline void __binheap_bubble_down(struct binheap_handle *handle)
363{
364 struct binheap_node *node = handle->root;
365
366 while(node->left != NULL) {
367 if(node->right && handle->compare(node->right, node->left)) {
368 if(handle->compare(node->right, node)) {
369 __binheap_swap(node, node->right);
370 node = node->right;
371 }
372 else {
373 break;
374 }
375 }
376 else {
377 if(handle->compare(node->left, node)) {
378 __binheap_swap(node, node->left);
379 node = node->left;
380 }
381 else {
382 break;
383 }
384 }
385 }
386}
387
388
389
390static inline void __binheap_add(struct binheap_node *new_node,
391 struct binheap_handle *handle,
392 void *data)
393{
394 new_node->data = data;
395 new_node->ref = new_node;
396 new_node->ref_ptr = &(new_node->ref);
397
398 if(!binheap_empty(handle)) {
399 /* insert left side first */
400 if(handle->next->left == NULL) {
401 handle->next->left = new_node;
402 new_node->parent = handle->next;
403 new_node->left = NULL;
404 new_node->right = NULL;
405
406 handle->last = new_node;
407
408 __binheap_bubble_up(handle, new_node);
409 }
410 else {
411 /* left occupied. insert right. */
412 handle->next->right = new_node;
413 new_node->parent = handle->next;
414 new_node->left = NULL;
415 new_node->right = NULL;
416
417 handle->last = new_node;
418
419 __binheap_update_next(handle);
420 __binheap_bubble_up(handle, new_node);
421 }
422 }
423 else {
424 /* first node in heap */
425
426 new_node->parent = NULL;
427 new_node->left = NULL;
428 new_node->right = NULL;
429
430 handle->root = new_node;
431 handle->next = new_node;
432 handle->last = new_node;
433 }
434}
435
436 179
437 180
438/** 181/**
@@ -442,111 +185,23 @@ static inline void __binheap_add(struct binheap_node *new_node,
442 * The 'last' node in the tree is then swapped up to the root and bubbled 185 * The 'last' node in the tree is then swapped up to the root and bubbled
443 * down. 186 * down.
444 */ 187 */
445static inline void __binheap_delete_root(struct binheap_handle *handle, 188void __binheap_delete_root(struct binheap_handle *handle,
446 struct binheap_node *container) 189 struct binheap_node *container);
447{
448 struct binheap_node *root = handle->root;
449
450 if(root != container) {
451 /* coalesce */
452 __binheap_swap_safe(handle, root, container);
453 root = container;
454 }
455
456 if(handle->last != root) {
457 /* swap 'last' node up to root and bubble it down. */
458
459 struct binheap_node *to_move = handle->last;
460
461 if(to_move->parent != root) {
462 handle->next = to_move->parent;
463
464 if(handle->next->right == to_move) {
465 /* disconnect from parent */
466 to_move->parent->right = NULL;
467 handle->last = handle->next->left;
468 }
469 else {
470 /* find new 'last' before we disconnect */
471 __binheap_update_last(handle);
472
473 /* disconnect from parent */
474 to_move->parent->left = NULL;
475 }
476 }
477 else {
478 /* 'last' is direct child of root */
479
480 handle->next = to_move;
481
482 if(to_move == to_move->parent->right) {
483 to_move->parent->right = NULL;
484 handle->last = to_move->parent->left;
485 }
486 else {
487 to_move->parent->left = NULL;
488 handle->last = to_move;
489 }
490 }
491 to_move->parent = NULL;
492
493 /* reconnect as root. We can't just swap data ptrs since root node
494 * may be freed after this function returns.
495 */
496 to_move->left = root->left;
497 to_move->right = root->right;
498 if(to_move->left != NULL) {
499 to_move->left->parent = to_move;
500 }
501 if(to_move->right != NULL) {
502 to_move->right->parent = to_move;
503 }
504
505 handle->root = to_move;
506
507 /* bubble down */
508 __binheap_bubble_down(handle);
509 }
510 else {
511 /* removing last node in tree */
512 handle->root = NULL;
513 handle->next = NULL;
514 handle->last = NULL;
515 }
516}
517
518 190
519/** 191/**
520 * Delete an arbitrary node. Bubble node to delete up to the root, 192 * Delete an arbitrary node. Bubble node to delete up to the root,
521 * and then delete to root. 193 * and then delete to root.
522 */ 194 */
523static inline void __binheap_delete( 195void __binheap_delete(
524 struct binheap_node *node_to_delete, 196 struct binheap_node *node_to_delete,
525 struct binheap_handle *handle) 197 struct binheap_handle *handle);
526{
527 struct binheap_node *target = node_to_delete->ref;
528 void *temp_data = target->data;
529
530 /* temporarily set data to null to allow node to bubble up to the top. */
531 target->data = BINHEAP_POISON;
532
533 __binheap_bubble_up(handle, target);
534 __binheap_delete_root(handle, node_to_delete);
535
536 node_to_delete->data = temp_data; /* restore node data pointer */
537 node_to_delete->parent = BINHEAP_POISON; /* poison the node */
538}
539 198
540/** 199/**
541 * Bubble up a node whose pointer has decreased in value. 200 * Bubble up a node whose pointer has decreased in value.
542 */ 201 */
543static inline void __binheap_decrease( 202void __binheap_decrease(struct binheap_node *orig_node,
544 struct binheap_node *orig_node, 203 struct binheap_handle *handle);
545 struct binheap_handle *handle) 204
546{
547 struct binheap_node *target = orig_node->ref;
548 __binheap_bubble_up(handle, target);
549}
550 205
551#endif 206#endif
552 207
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
index aacfab14a254..818f4094b53c 100644
--- a/include/litmus/edf_common.h
+++ b/include/litmus/edf_common.h
@@ -30,11 +30,12 @@ int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node
30typedef enum 30typedef enum
31{ 31{
32 BASE, 32 BASE,
33 INHERITED 33 EFFECTIVE
34} comparison_mode_t; 34} comparison_mode_t;
35 35
36int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode, 36int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode,
37 struct task_struct* second, comparison_mode_t second_mode); 37 struct task_struct* second, comparison_mode_t second_mode);
38
38#endif 39#endif
39 40
40int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); 41int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
diff --git a/litmus/Makefile b/litmus/Makefile
index 7338180f196f..b05f7982d823 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -15,6 +15,7 @@ obj-y = sched_plugin.o litmus.o \
15 locking.o \ 15 locking.o \
16 srp.o \ 16 srp.o \
17 bheap.o \ 17 bheap.o \
18 binheap.o \
18 ctrldev.o \ 19 ctrldev.o \
19 sched_gsn_edf.o \ 20 sched_gsn_edf.o \
20 sched_psn_edf.o 21 sched_psn_edf.o
diff --git a/litmus/binheap.c b/litmus/binheap.c
new file mode 100644
index 000000000000..f76260e64b0b
--- /dev/null
+++ b/litmus/binheap.c
@@ -0,0 +1,387 @@
1#include <litmus/binheap.h>
2
3
4int binheap_is_in_this_heap(struct binheap_node *node,
5 struct binheap_handle* heap)
6{
7 if(!binheap_is_in_heap(node)) {
8 return 0;
9 }
10
11 while(node->parent != NULL) {
12 node = node->parent;
13 }
14
15 return (node == heap->root);
16}
17
18/* Update the node reference pointers. Same logic as Litmus binomial heap. */
19static void __update_ref(struct binheap_node *parent,
20 struct binheap_node *child)
21{
22 *(parent->ref_ptr) = child;
23 *(child->ref_ptr) = parent;
24
25 swap(parent->ref_ptr, child->ref_ptr);
26}
27
28/* Swaps data between two nodes. */
29static void __binheap_swap(struct binheap_node *parent,
30 struct binheap_node *child)
31{
32 swap(parent->data, child->data);
33 __update_ref(parent, child);
34}
35
36
37/* Swaps memory and data between two nodes. Actual nodes swap instead of
38 * just data. Needed when we delete nodes from the heap.
39 */
40static void __binheap_swap_safe(struct binheap_handle *handle,
41 struct binheap_node *a,
42 struct binheap_node *b)
43{
44 swap(a->data, b->data);
45 __update_ref(a, b);
46
47 if((a->parent != NULL) && (a->parent == b->parent)) {
48 /* special case: shared parent */
49 swap(a->parent->left, a->parent->right);
50 }
51 else {
52 /* Update pointers to swap parents. */
53
54 if(a->parent) {
55 if(a == a->parent->left) {
56 a->parent->left = b;
57 }
58 else {
59 a->parent->right = b;
60 }
61 }
62
63 if(b->parent) {
64 if(b == b->parent->left) {
65 b->parent->left = a;
66 }
67 else {
68 b->parent->right = a;
69 }
70 }
71
72 swap(a->parent, b->parent);
73 }
74
75 /* swap children */
76
77 if(a->left) {
78 a->left->parent = b;
79
80 if(a->right) {
81 a->right->parent = b;
82 }
83 }
84
85 if(b->left) {
86 b->left->parent = a;
87
88 if(b->right) {
89 b->right->parent = a;
90 }
91 }
92
93 swap(a->left, b->left);
94 swap(a->right, b->right);
95
96
97 /* update next/last/root pointers */
98
99 if(a == handle->next) {
100 handle->next = b;
101 }
102 else if(b == handle->next) {
103 handle->next = a;
104 }
105
106 if(a == handle->last) {
107 handle->last = b;
108 }
109 else if(b == handle->last) {
110 handle->last = a;
111 }
112
113 if(a == handle->root) {
114 handle->root = b;
115 }
116 else if(b == handle->root) {
117 handle->root = a;
118 }
119}
120
121
122/**
123 * Update the pointer to the last node in the complete binary tree.
124 * Called internally after the root node has been deleted.
125 */
126static void __binheap_update_last(struct binheap_handle *handle)
127{
128 struct binheap_node *temp = handle->last;
129
130 /* find a "bend" in the tree. */
131 while(temp->parent && (temp == temp->parent->left)) {
132 temp = temp->parent;
133 }
134
135 /* step over to sibling if we're not at root */
136 if(temp->parent != NULL) {
137 temp = temp->parent->left;
138 }
139
140 /* now travel right as far as possible. */
141 while(temp->right != NULL) {
142 temp = temp->right;
143 }
144
145 /* take one step to the left if we're not at the bottom-most level. */
146 if(temp->left != NULL) {
147 temp = temp->left;
148 }
149
150 //BUG_ON(!(temp->left == NULL && temp->right == NULL));
151
152 handle->last = temp;
153}
154
155/**
156 * Update the pointer to the node that will take the next inserted node.
157 * Called internally after a node has been inserted.
158 */
159static void __binheap_update_next(struct binheap_handle *handle)
160{
161 struct binheap_node *temp = handle->next;
162
163 /* find a "bend" in the tree. */
164 while(temp->parent && (temp == temp->parent->right)) {
165 temp = temp->parent;
166 }
167
168 /* step over to sibling if we're not at root */
169 if(temp->parent != NULL) {
170 temp = temp->parent->right;
171 }
172
173 /* now travel left as far as possible. */
174 while(temp->left != NULL) {
175 temp = temp->left;
176 }
177
178 handle->next = temp;
179}
180
181
182
183/* bubble node up towards root */
184static void __binheap_bubble_up(
185 struct binheap_handle *handle,
186 struct binheap_node *node)
187{
188 /* Note: NULL data pointers are used internally for arbitrary delete */
189 while((node->parent != NULL) &&
190 ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ ||
191 handle->compare(node, node->parent))) {
192 __binheap_swap(node->parent, node);
193 node = node->parent;
194 }
195}
196
197
198/* bubble node down, swapping with min-child */
199static void __binheap_bubble_down(struct binheap_handle *handle)
200{
201 struct binheap_node *node = handle->root;
202
203 while(node->left != NULL) {
204 if(node->right && handle->compare(node->right, node->left)) {
205 if(handle->compare(node->right, node)) {
206 __binheap_swap(node, node->right);
207 node = node->right;
208 }
209 else {
210 break;
211 }
212 }
213 else {
214 if(handle->compare(node->left, node)) {
215 __binheap_swap(node, node->left);
216 node = node->left;
217 }
218 else {
219 break;
220 }
221 }
222 }
223}
224
225
226
227void __binheap_add(struct binheap_node *new_node,
228 struct binheap_handle *handle,
229 void *data)
230{
231 new_node->data = data;
232 new_node->ref = new_node;
233 new_node->ref_ptr = &(new_node->ref);
234
235 if(!binheap_empty(handle)) {
236 /* insert left side first */
237 if(handle->next->left == NULL) {
238 handle->next->left = new_node;
239 new_node->parent = handle->next;
240 new_node->left = NULL;
241 new_node->right = NULL;
242
243 handle->last = new_node;
244
245 __binheap_bubble_up(handle, new_node);
246 }
247 else {
248 /* left occupied. insert right. */
249 handle->next->right = new_node;
250 new_node->parent = handle->next;
251 new_node->left = NULL;
252 new_node->right = NULL;
253
254 handle->last = new_node;
255
256 __binheap_update_next(handle);
257 __binheap_bubble_up(handle, new_node);
258 }
259 }
260 else {
261 /* first node in heap */
262
263 new_node->parent = NULL;
264 new_node->left = NULL;
265 new_node->right = NULL;
266
267 handle->root = new_node;
268 handle->next = new_node;
269 handle->last = new_node;
270 }
271}
272
273
274
275/**
276 * Removes the root node from the heap. The node is removed after coalescing
277 * the binheap_node with its original data pointer at the root of the tree.
278 *
279 * The 'last' node in the tree is then swapped up to the root and bubbled
280 * down.
281 */
282void __binheap_delete_root(struct binheap_handle *handle,
283 struct binheap_node *container)
284{
285 struct binheap_node *root = handle->root;
286
287 if(root != container) {
288 /* coalesce */
289 __binheap_swap_safe(handle, root, container);
290 root = container;
291 }
292
293 if(handle->last != root) {
294 /* swap 'last' node up to root and bubble it down. */
295
296 struct binheap_node *to_move = handle->last;
297
298 if(to_move->parent != root) {
299 handle->next = to_move->parent;
300
301 if(handle->next->right == to_move) {
302 /* disconnect from parent */
303 to_move->parent->right = NULL;
304 handle->last = handle->next->left;
305 }
306 else {
307 /* find new 'last' before we disconnect */
308 __binheap_update_last(handle);
309
310 /* disconnect from parent */
311 to_move->parent->left = NULL;
312 }
313 }
314 else {
315 /* 'last' is direct child of root */
316
317 handle->next = to_move;
318
319 if(to_move == to_move->parent->right) {
320 to_move->parent->right = NULL;
321 handle->last = to_move->parent->left;
322 }
323 else {
324 to_move->parent->left = NULL;
325 handle->last = to_move;
326 }
327 }
328 to_move->parent = NULL;
329
330 /* reconnect as root. We can't just swap data ptrs since root node
331 * may be freed after this function returns.
332 */
333 to_move->left = root->left;
334 to_move->right = root->right;
335 if(to_move->left != NULL) {
336 to_move->left->parent = to_move;
337 }
338 if(to_move->right != NULL) {
339 to_move->right->parent = to_move;
340 }
341
342 handle->root = to_move;
343
344 /* bubble down */
345 __binheap_bubble_down(handle);
346 }
347 else {
348 /* removing last node in tree */
349 handle->root = NULL;
350 handle->next = NULL;
351 handle->last = NULL;
352 }
353
354 /* mark as removed */
355 container->parent = BINHEAP_POISON;
356}
357
358
359/**
360 * Delete an arbitrary node. Bubble node to delete up to the root,
361 * and then delete to root.
362 */
363void __binheap_delete(struct binheap_node *node_to_delete,
364 struct binheap_handle *handle)
365{
366 struct binheap_node *target = node_to_delete->ref;
367 void *temp_data = target->data;
368
369 /* temporarily set data to null to allow node to bubble up to the top. */
370 target->data = BINHEAP_POISON;
371
372 __binheap_bubble_up(handle, target);
373 __binheap_delete_root(handle, node_to_delete);
374
375 node_to_delete->data = temp_data; /* restore node data pointer */
376 node_to_delete->parent = BINHEAP_POISON; /* poison the node */
377}
378
379/**
380 * Bubble up a node whose pointer has decreased in value.
381 */
382void __binheap_decrease(struct binheap_node *orig_node,
383 struct binheap_handle *handle)
384{
385 struct binheap_node *target = orig_node->ref;
386 __binheap_bubble_up(handle, target);
387}
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index f275610bf052..5ea6b1bc7f24 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -19,6 +19,7 @@
19#include <litmus/edf_common.h> 19#include <litmus/edf_common.h>
20 20
21 21
22
22/* edf_higher_prio - returns true if first has a higher EDF priority 23/* edf_higher_prio - returns true if first has a higher EDF priority
23 * than second. Deadline ties are broken by PID. 24 * than second. Deadline ties are broken by PID.
24 * 25 *
@@ -37,8 +38,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
37 38
38 /* There is no point in comparing a task to itself. */ 39 /* There is no point in comparing a task to itself. */
39 if (first && first == second) { 40 if (first && first == second) {
40 TRACE_TASK(first, 41 TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid);
41 "WARNING: pointless edf priority comparison.\n"); 42 WARN_ON(1);
42 return 0; 43 return 0;
43 } 44 }
44 45
@@ -48,19 +49,19 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
48 return first && !second; 49 return first && !second;
49 50
50#ifdef CONFIG_LITMUS_LOCKING 51#ifdef CONFIG_LITMUS_LOCKING
51 /* Check for inherited priorities. Change task 52 /* Check for EFFECTIVE priorities. Change task
52 * used for comparison in such a case. 53 * used for comparison in such a case.
53 */ 54 */
54 if (unlikely(first->rt_param.inh_task) 55 if (unlikely(first->rt_param.inh_task)
55#ifdef CONFIG_LITMUS_NESTED_LOCKING 56#ifdef CONFIG_LITMUS_NESTED_LOCKING
56 && (first_mode == INHERITED) 57 && (first_mode == EFFECTIVE)
57#endif 58#endif
58 ) { 59 ) {
59 first_task = first->rt_param.inh_task; 60 first_task = first->rt_param.inh_task;
60 } 61 }
61 if (unlikely(second->rt_param.inh_task) 62 if (unlikely(second->rt_param.inh_task)
62#ifdef CONFIG_LITMUS_NESTED_LOCKING 63#ifdef CONFIG_LITMUS_NESTED_LOCKING
63 && (second_mode == INHERITED) 64 && (second_mode == EFFECTIVE)
64#endif 65#endif
65 ) { 66 ) {
66 second_task = second->rt_param.inh_task; 67 second_task = second->rt_param.inh_task;
@@ -82,6 +83,26 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
82 83
83#endif 84#endif
84 85
86// // rate-monotonic for testing
87// return !is_realtime(second_task) ||
88//
89// /* is the deadline of the first task earlier?
90// * Then it has higher priority.
91// */
92// shorter_period(first_task, second_task) ||
93//
94// /* Do we have a deadline tie?
95// * Then break by PID.
96// */
97// (get_period(first_task) == get_period(second_task) &&
98// (first_task->pid < second_task->pid ||
99//
100// /* If the PIDs are the same then the task with the EFFECTIVE
101// * priority wins.
102// */
103// (first_task->pid == second_task->pid &&
104// !second->rt_param.inh_task)));
105
85 return !is_realtime(second_task) || 106 return !is_realtime(second_task) ||
86 107
87 /* is the deadline of the first task earlier? 108 /* is the deadline of the first task earlier?
@@ -95,7 +116,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
95 (get_deadline(first_task) == get_deadline(second_task) && 116 (get_deadline(first_task) == get_deadline(second_task) &&
96 (first_task->pid < second_task->pid || 117 (first_task->pid < second_task->pid ||
97 118
98 /* If the PIDs are the same then the task with the inherited 119 /* If the PIDs are the same then the task with the EFFECTIVE
99 * priority wins. 120 * priority wins.
100 */ 121 */
101 (first_task->pid == second_task->pid && 122 (first_task->pid == second_task->pid &&
@@ -106,7 +127,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
106#ifdef CONFIG_LITMUS_NESTED_LOCKING 127#ifdef CONFIG_LITMUS_NESTED_LOCKING
107int edf_higher_prio(struct task_struct* first, struct task_struct* second) 128int edf_higher_prio(struct task_struct* first, struct task_struct* second)
108{ 129{
109 return __edf_higher_prio(first, INHERITED, second, INHERITED); 130 return __edf_higher_prio(first, EFFECTIVE, second, EFFECTIVE);
110} 131}
111 132
112int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b) 133int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b)
@@ -114,7 +135,7 @@ int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b)
114 struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); 135 struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
115 struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); 136 struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
116 137
117 return __edf_higher_prio(l_a->hp_waiter_eff_prio, INHERITED, l_b->hp_waiter_eff_prio, INHERITED); 138 return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE);
118} 139}
119 140
120int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b) 141int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b)
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 0050d375ab6c..3d653bdca357 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -108,7 +108,6 @@ typedef struct {
108 int cpu; 108 int cpu;
109 struct task_struct* linked; /* only RT tasks */ 109 struct task_struct* linked; /* only RT tasks */
110 struct task_struct* scheduled; /* only RT tasks */ 110 struct task_struct* scheduled; /* only RT tasks */
111 int in_heap; /* == 0/1: not in / in heap*/
112 struct binheap_node hn; 111 struct binheap_node hn;
113} cpu_entry_t; 112} cpu_entry_t;
114DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 113DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
@@ -144,10 +143,10 @@ static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b)
144 */ 143 */
145static void update_cpu_position(cpu_entry_t *entry) 144static void update_cpu_position(cpu_entry_t *entry)
146{ 145{
147 if (likely(entry->in_heap)) 146 if (likely(binheap_is_in_heap(&entry->hn))) {
148 binheap_delete(&entry->hn, &gsnedf_cpu_heap); 147 binheap_delete(&entry->hn, &gsnedf_cpu_heap);
148 }
149 binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn); 149 binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn);
150 entry->in_heap = 1;
151} 150}
152 151
153/* caller must hold gsnedf lock */ 152/* caller must hold gsnedf lock */
@@ -664,7 +663,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
664 raw_spin_lock(&gsnedf_lock); 663 raw_spin_lock(&gsnedf_lock);
665 664
666 /* this sanity check allows for weaker locking in protocols */ 665 /* this sanity check allows for weaker locking in protocols */
667 if(__edf_higher_prio(prio_inh, BASE, t, INHERITED)) { 666 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
668 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 667 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
669 tsk_rt(t)->inh_task = prio_inh; 668 tsk_rt(t)->inh_task = prio_inh;
670 669
@@ -737,11 +736,11 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str
737{ 736{
738 raw_spin_lock(&gsnedf_lock); 737 raw_spin_lock(&gsnedf_lock);
739 738
740 if(__edf_higher_prio(t, INHERITED, prio_inh, BASE)) { 739 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
741 /* A job only stops inheriting a priority when it releases a 740 /* A job only stops inheriting a priority when it releases a
742 * resource. Thus we can make the following assumption.*/ 741 * resource. Thus we can make the following assumption.*/
743 if(prio_inh) 742 if(prio_inh)
744 TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); 743 TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid);
745 else 744 else
746 TRACE_TASK(t, "base priority restored.\n"); 745 TRACE_TASK(t, "base priority restored.\n");
747 746
@@ -1016,7 +1015,7 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l)
1016 TRACE_TASK(t, "is new hp_waiter.\n"); 1015 TRACE_TASK(t, "is new hp_waiter.\n");
1017 1016
1018 if ((effective_priority(owner) == old_max_eff_prio) || 1017 if ((effective_priority(owner) == old_max_eff_prio) ||
1019 (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))){ 1018 (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){
1020 new_prio = new_max_eff_prio; 1019 new_prio = new_max_eff_prio;
1021 } 1020 }
1022 } 1021 }
@@ -1126,7 +1125,7 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l)
1126 { 1125 {
1127 // old_max_eff_prio > new_max_eff_prio 1126 // old_max_eff_prio > new_max_eff_prio
1128 1127
1129 if(__edf_higher_prio(new_max_eff_prio, BASE, t, INHERITED)) { 1128 if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) {
1130 TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", 1129 TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n",
1131 new_max_eff_prio->comm, new_max_eff_prio->pid, 1130 new_max_eff_prio->comm, new_max_eff_prio->pid,
1132 t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); 1131 t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid);
@@ -1271,15 +1270,15 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1271 if(new_max_eff_prio != old_max_eff_prio) { 1270 if(new_max_eff_prio != old_max_eff_prio) {
1272 // new_max_eff_prio > old_max_eff_prio holds. 1271 // new_max_eff_prio > old_max_eff_prio holds.
1273 if ((effective_priority(owner) == old_max_eff_prio) || 1272 if ((effective_priority(owner) == old_max_eff_prio) ||
1274 (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))) { 1273 (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) {
1275 1274
1276 TRACE_TASK(new_max_eff_prio, "Propagating inheritance to holder of lock %d.\n", l->ident); 1275 TRACE_CUR("Propagating inheritance to holder of lock %d.\n", l->ident);
1277 1276
1278 // beware: recursion 1277 // beware: recursion
1279 nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock 1278 nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock
1280 } 1279 }
1281 else { 1280 else {
1282 TRACE_TASK(new_max_eff_prio, "Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); 1281 TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid);
1283 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 1282 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1284 raw_spin_unlock_irqrestore(&mutex->lock, irqflags); 1283 raw_spin_unlock_irqrestore(&mutex->lock, irqflags);
1285 } 1284 }
@@ -1350,14 +1349,26 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
1350 1349
1351 struct task_struct *decreased_prio; 1350 struct task_struct *decreased_prio;
1352 1351
1353 TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident); 1352 TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", l->ident);
1354 1353
1355 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { 1354 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) {
1356 TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident); 1355 TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n",
1356 (new_max_eff_prio) ? new_max_eff_prio->comm : "nil",
1357 (new_max_eff_prio) ? new_max_eff_prio->pid : -1,
1358 owner->comm,
1359 owner->pid,
1360 l->ident);
1361
1357 decreased_prio = new_max_eff_prio; 1362 decreased_prio = new_max_eff_prio;
1358 } 1363 }
1359 else { 1364 else {
1360 TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident); 1365 TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n",
1366 (new_max_eff_prio) ? new_max_eff_prio->comm : "nil",
1367 (new_max_eff_prio) ? new_max_eff_prio->pid : -1,
1368 owner->comm,
1369 owner->pid,
1370 l->ident);
1371
1361 decreased_prio = NULL; 1372 decreased_prio = NULL;
1362 } 1373 }
1363 1374
@@ -1485,6 +1496,9 @@ static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct
1485 ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); 1496 ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node);
1486 ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); 1497 ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node);
1487 1498
1499 BUG_ON(!d_a);
1500 BUG_ON(!d_b);
1501
1488 return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); 1502 return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE);
1489} 1503}
1490 1504
@@ -1711,8 +1725,8 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struc
1711 1725
1712 if(sem->top_m_size < sem->m) { 1726 if(sem->top_m_size < sem->m) {
1713 TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", t->comm, t->pid); 1727 TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", t->comm, t->pid);
1714 TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); 1728// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size);
1715 print_global_list(sem->top_m.root, 1); 1729// print_global_list(sem->top_m.root, 1);
1716 1730
1717 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); 1731 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node);
1718 ++(sem->top_m_size); 1732 ++(sem->top_m_size);
@@ -1725,12 +1739,12 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struc
1725 1739
1726 TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", 1740 TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n",
1727 t->comm, t->pid, 1741 t->comm, t->pid,
1728 evicted->comm, evicted->pid); 1742 evicted->task->comm, evicted->task->pid);
1729 1743
1730 TRACE_CUR("Not-Top-M Before:\n"); 1744// TRACE_CUR("Not-Top-M Before:\n");
1731 print_global_list(sem->not_top_m.root, 1); 1745// print_global_list(sem->not_top_m.root, 1);
1732 TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); 1746// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size);
1733 print_global_list(sem->top_m.root, 1); 1747// print_global_list(sem->top_m.root, 1);
1734 1748
1735 1749
1736 binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); 1750 binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node);
@@ -1746,8 +1760,8 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struc
1746 } 1760 }
1747 else { 1761 else {
1748 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", t->comm, t->pid); 1762 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", t->comm, t->pid);
1749 TRACE_CUR("Not-Top-M Before:\n"); 1763// TRACE_CUR("Not-Top-M Before:\n");
1750 print_global_list(sem->not_top_m.root, 1); 1764// print_global_list(sem->not_top_m.root, 1);
1751 1765
1752 binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); 1766 binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node);
1753 1767
@@ -1766,10 +1780,10 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struc
1766 if(binheap_is_in_this_heap(&node->node, &sem->top_m)) { 1780 if(binheap_is_in_this_heap(&node->node, &sem->top_m)) {
1767 TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid); 1781 TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid);
1768 1782
1769 TRACE_CUR("Not-Top-M Before:\n"); 1783// TRACE_CUR("Not-Top-M Before:\n");
1770 print_global_list(sem->not_top_m.root, 1); 1784// print_global_list(sem->not_top_m.root, 1);
1771 TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); 1785// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size);
1772 print_global_list(sem->top_m.root, 1); 1786// print_global_list(sem->top_m.root, 1);
1773 1787
1774 1788
1775 binheap_delete(&node->node, &sem->top_m); 1789 binheap_delete(&node->node, &sem->top_m);
@@ -1777,7 +1791,7 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struc
1777 if(!binheap_empty(&sem->not_top_m)) { 1791 if(!binheap_empty(&sem->not_top_m)) {
1778 ikglp_heap_node_t *promoted = binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); 1792 ikglp_heap_node_t *promoted = binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node);
1779 1793
1780 TRACE_CUR("Promoting %s/%d to top-m\n", promoted->comm, promoted->pid); 1794 TRACE_CUR("Promoting %s/%d to top-m\n", promoted->task->comm, promoted->task->pid);
1781 1795
1782 binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); 1796 binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node);
1783 INIT_BINHEAP_NODE(&promoted->node); 1797 INIT_BINHEAP_NODE(&promoted->node);
@@ -1795,9 +1809,9 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struc
1795 print_global_list(sem->not_top_m.root, 1); 1809 print_global_list(sem->not_top_m.root, 1);
1796 } 1810 }
1797 else { 1811 else {
1798 TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); 1812// TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid);
1799 TRACE_CUR("Not-Top-M Before:\n"); 1813// TRACE_CUR("Not-Top-M Before:\n");
1800 print_global_list(sem->not_top_m.root, 1); 1814// print_global_list(sem->not_top_m.root, 1);
1801 1815
1802 binheap_delete(&node->node, &sem->not_top_m); 1816 binheap_delete(&node->node, &sem->not_top_m);
1803 1817
@@ -1842,9 +1856,9 @@ void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth
1842 1856
1843static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq, struct task_struct *t, ikglp_donee_heap_node_t* node) 1857static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq, struct task_struct *t, ikglp_donee_heap_node_t* node)
1844{ 1858{
1845 TRACE_CUR("Adding %s/%d to donor list.\n", t->comm, t->pid); 1859// TRACE_CUR("Adding %s/%d to donee list.\n", t->comm, t->pid);
1846 TRACE_CUR("donees Before:\n"); 1860// TRACE_CUR("donees Before:\n");
1847 print_donees(sem, sem->donees.root, 1); 1861// print_donees(sem, sem->donees.root, 1);
1848 1862
1849 node->task = t; 1863 node->task = t;
1850 node->donor_info = NULL; 1864 node->donor_info = NULL;
@@ -1860,8 +1874,8 @@ static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq,
1860 1874
1861static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) 1875static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags)
1862{ 1876{
1863 // hp_waiter has increased 1877 // priority of 't' has increased (note: 't' might already be hp_waiter).
1864 if (edf_higher_prio(t, fq->hp_waiter)) { 1878 if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) {
1865 struct task_struct *old_max_eff_prio; 1879 struct task_struct *old_max_eff_prio;
1866 struct task_struct *new_max_eff_prio; 1880 struct task_struct *new_max_eff_prio;
1867 struct task_struct *new_prio = NULL; 1881 struct task_struct *new_prio = NULL;
@@ -1895,7 +1909,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif
1895 TRACE_TASK(t, "is new hp_waiter.\n"); 1909 TRACE_TASK(t, "is new hp_waiter.\n");
1896 1910
1897 if ((effective_priority(owner) == old_max_eff_prio) || 1911 if ((effective_priority(owner) == old_max_eff_prio) ||
1898 (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))){ 1912 (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){
1899 new_prio = new_max_eff_prio; 1913 new_prio = new_max_eff_prio;
1900 } 1914 }
1901 } 1915 }
@@ -1926,7 +1940,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif
1926 } 1940 }
1927 } 1941 }
1928 else { 1942 else {
1929 TRACE_TASK(t, "no change in hp_waiter.\n"); 1943 TRACE_TASK(t, "hp_waiter is unaffected.\n");
1930 raw_spin_unlock_irqrestore(&sem->lock, flags); 1944 raw_spin_unlock_irqrestore(&sem->lock, flags);
1931 } 1945 }
1932} 1946}
@@ -1940,6 +1954,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg
1940 struct task_struct *new_max_eff_prio; 1954 struct task_struct *new_max_eff_prio;
1941 1955
1942 if(!owner) { 1956 if(!owner) {
1957 TRACE_CUR("No owner. Returning.\n");
1943 raw_spin_unlock_irqrestore(&sem->lock, flags); 1958 raw_spin_unlock_irqrestore(&sem->lock, flags);
1944 return; 1959 return;
1945 } 1960 }
@@ -1960,14 +1975,26 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg
1960 // Need to set new effective_priority for owner 1975 // Need to set new effective_priority for owner
1961 struct task_struct *decreased_prio; 1976 struct task_struct *decreased_prio;
1962 1977
1963 TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident); 1978 TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq));
1964 1979
1965 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { 1980 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) {
1966 TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident); 1981 TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n",
1982 (new_max_eff_prio) ? new_max_eff_prio->comm : "nil",
1983 (new_max_eff_prio) ? new_max_eff_prio->pid : -1,
1984 owner->comm,
1985 owner->pid,
1986 ikglp_get_idx(sem, fq));
1987
1967 decreased_prio = new_max_eff_prio; 1988 decreased_prio = new_max_eff_prio;
1968 } 1989 }
1969 else { 1990 else {
1970 TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident); 1991 TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of fq %d.\n",
1992 (new_max_eff_prio) ? new_max_eff_prio->comm : "nil",
1993 (new_max_eff_prio) ? new_max_eff_prio->pid : -1,
1994 owner->comm,
1995 owner->pid,
1996 ikglp_get_idx(sem, fq));
1997
1971 decreased_prio = NULL; 1998 decreased_prio = NULL;
1972 } 1999 }
1973 2000
@@ -1975,6 +2002,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg
1975 nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock 2002 nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock
1976 } 2003 }
1977 else { 2004 else {
2005 TRACE_TASK(owner, "No need to propagate priority decrease forward.\n");
1978 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 2006 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
1979 raw_spin_unlock_irqrestore(&sem->lock, flags); 2007 raw_spin_unlock_irqrestore(&sem->lock, flags);
1980 } 2008 }
@@ -2004,14 +2032,14 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo
2004 // Need to set new effective_priority for owner 2032 // Need to set new effective_priority for owner
2005 struct task_struct *decreased_prio; 2033 struct task_struct *decreased_prio;
2006 2034
2007 TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident); 2035 TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq));
2008 2036
2009 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { 2037 if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) {
2010 TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident); 2038 TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq));
2011 decreased_prio = new_max_eff_prio; 2039 decreased_prio = new_max_eff_prio;
2012 } 2040 }
2013 else { 2041 else {
2014 TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident); 2042 TRACE_CUR("has lesser base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq));
2015 decreased_prio = NULL; 2043 decreased_prio = NULL;
2016 } 2044 }
2017 2045
@@ -2019,6 +2047,7 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo
2019 nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock 2047 nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock
2020 } 2048 }
2021 else { 2049 else {
2050 TRACE_TASK(owner, "No need to propagate priority decrease forward.\n");
2022 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 2051 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
2023 raw_spin_unlock_irqrestore(&sem->lock, flags); 2052 raw_spin_unlock_irqrestore(&sem->lock, flags);
2024 } 2053 }
@@ -2099,10 +2128,17 @@ static void __ikglp_enqueue_on_fq(
2099 2128
2100 // update global list. 2129 // update global list.
2101 if(likely(global_heap_node)) { 2130 if(likely(global_heap_node)) {
2131 if(binheap_is_in_heap(&global_heap_node->node)) {
2132 WARN_ON(1);
2133 ikglp_del_global_list(sem, t, global_heap_node);
2134 }
2102 ikglp_add_global_list(sem, t, global_heap_node); 2135 ikglp_add_global_list(sem, t, global_heap_node);
2103 } 2136 }
2104 // update donor eligiblity list. 2137 // update donor eligiblity list.
2105 if(likely(donee_heap_node)) { 2138 if(likely(donee_heap_node)) {
2139 if(binheap_is_in_heap(&donee_heap_node->node)) {
2140 WARN_ON(1);
2141 }
2106 ikglp_add_donees(sem, fq, t, donee_heap_node); 2142 ikglp_add_donees(sem, fq, t, donee_heap_node);
2107 } 2143 }
2108 2144
@@ -2121,9 +2157,12 @@ static void ikglp_enqueue_on_fq(
2121 unsigned long flags) 2157 unsigned long flags)
2122{ 2158{
2123 /* resource is not free => must suspend and wait */ 2159 /* resource is not free => must suspend and wait */
2124 TRACE_TASK(t, "queue %d: Resource is not free => must suspend and wait.\n", 2160 TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend and wait.\n",
2125 ikglp_get_idx(sem, fq)); 2161 ikglp_get_idx(sem, fq));
2126 2162
2163 INIT_BINHEAP_NODE(&wait->global_heap_node.node);
2164 INIT_BINHEAP_NODE(&wait->donee_heap_node.node);
2165
2127 __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, &wait->global_heap_node, &wait->donee_heap_node); 2166 __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, &wait->global_heap_node, &wait->donee_heap_node);
2128 2167
2129 ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock 2168 ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock
@@ -2133,12 +2172,20 @@ static void ikglp_enqueue_on_fq(
2133static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) 2172static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait)
2134{ 2173{
2135 TRACE_TASK(wait->task, "goes to PQ.\n"); 2174 TRACE_TASK(wait->task, "goes to PQ.\n");
2136 2175
2137 INIT_BINHEAP_NODE(&wait->pq_node.node); 2176 wait->pq_node.task = wait->task; // copy over task (little redundant...)
2138 2177
2139 binheap_add(&wait->pq_node.node, &sem->priority_queue, ikglp_heap_node_t, node); 2178 binheap_add(&wait->pq_node.node, &sem->priority_queue, ikglp_heap_node_t, node);
2140} 2179}
2141 2180
2181static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait)
2182{
2183 INIT_BINHEAP_NODE(&wait->global_heap_node.node);
2184 INIT_BINHEAP_NODE(&wait->donee_heap_node.node);
2185 INIT_BINHEAP_NODE(&wait->pq_node.node);
2186
2187 __ikglp_enqueue_on_pq(sem, wait);
2188}
2142 2189
2143void print_donors(struct binheap_node *n, int depth) 2190void print_donors(struct binheap_node *n, int depth)
2144{ 2191{
@@ -2178,26 +2225,27 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state
2178 struct task_struct *new_max_eff_prio; 2225 struct task_struct *new_max_eff_prio;
2179 struct task_struct *new_prio = NULL; 2226 struct task_struct *new_prio = NULL;
2180 2227
2181 TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid); 2228 INIT_BINHEAP_NODE(&wait->global_heap_node.node);
2182 TRACE_CUR("donors Before:\n"); 2229 INIT_BINHEAP_NODE(&wait->donee_heap_node.node);
2183 print_donors(sem->donors.root, 1); 2230 INIT_BINHEAP_NODE(&wait->pq_node.node);
2184 2231 INIT_BINHEAP_NODE(&wait->node);
2185 2232
2233// TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid);
2234// TRACE_CUR("donors Before:\n");
2235// print_donors(sem->donors.root, 1);
2186 2236
2187 // Add donor to the global list. 2237 // Add donor to the global list.
2188 ikglp_add_global_list(sem, t, &wait->global_heap_node); 2238 ikglp_add_global_list(sem, t, &wait->global_heap_node);
2189 2239
2190 INIT_BINHEAP_NODE(&wait->node);
2191
2192 // Select a donee 2240 // Select a donee
2193 donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); 2241 donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node);
2194 donee = donee_node->task; 2242 donee = donee_node->task;
2195 2243
2196 TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid); 2244 TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid);
2197 2245
2198 TRACE_CUR("Temporarily removing %s/%d to donor list.\n", donee->comm, donee->pid); 2246 TRACE_CUR("Temporarily removing %s/%d to donee list.\n", donee->comm, donee->pid);
2199 TRACE_CUR("donees Before:\n"); 2247// TRACE_CUR("donees Before:\n");
2200 print_donees(sem, sem->donees.root, 1); 2248// print_donees(sem, sem->donees.root, 1);
2201 2249
2202 binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly 2250 binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly
2203 2251
@@ -2245,8 +2293,8 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state
2245 INIT_BINHEAP_NODE(&donee_node->node); 2293 INIT_BINHEAP_NODE(&donee_node->node);
2246 2294
2247 2295
2248 TRACE_CUR("Adding %s/%d back to donor list.\n", donee->comm, donee->pid); 2296 TRACE_CUR("Adding %s/%d back to donee list.\n", donee->comm, donee->pid);
2249 TRACE_CUR("donees Before:\n"); 2297// TRACE_CUR("donees Before:\n");
2250 print_donees(sem, sem->donees.root, 1); 2298 print_donees(sem, sem->donees.root, 1);
2251 2299
2252 binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); 2300 binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node);
@@ -2268,20 +2316,20 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state
2268 2316
2269 if(new_max_eff_prio != old_max_eff_prio) { 2317 if(new_max_eff_prio != old_max_eff_prio) {
2270 if ((effective_priority(donee) == old_max_eff_prio) || 2318 if ((effective_priority(donee) == old_max_eff_prio) ||
2271 (__edf_higher_prio(new_max_eff_prio, BASE, donee, INHERITED))){ 2319 (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){
2272 TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid); 2320 TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid);
2273 new_prio = new_max_eff_prio; 2321 new_prio = new_max_eff_prio;
2274 } 2322 }
2275 else { 2323// else {
2276 // should be bug. donor would not be in top-m. 2324// // should be bug. donor would not be in top-m.
2277 TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid); 2325// TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid);
2278 WARN_ON(1); 2326// WARN_ON(1);
2279 } 2327// }
2280 } 2328// }
2281 else { 2329// else {
2282 // should be bug. donor would not be in top-m. 2330// // should be bug. donor would not be in top-m.
2283 TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid); 2331// TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid);
2284 WARN_ON(1); 2332// WARN_ON(1);
2285 } 2333 }
2286 2334
2287 if(new_prio) { 2335 if(new_prio) {
@@ -2320,6 +2368,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l)
2320 struct ikglp_semaphore *sem = ikglp_from_lock(l); 2368 struct ikglp_semaphore *sem = ikglp_from_lock(l);
2321 unsigned long flags, real_flags; 2369 unsigned long flags, real_flags;
2322 struct fifo_queue *fq = NULL; 2370 struct fifo_queue *fq = NULL;
2371 int replica = -EINVAL;
2323 2372
2324 ikglp_wait_state_t wait; 2373 ikglp_wait_state_t wait;
2325 2374
@@ -2331,54 +2380,62 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l)
2331 2380
2332 if(sem->shortest_fifo_queue->count == 0) { 2381 if(sem->shortest_fifo_queue->count == 0) {
2333 // take available resource 2382 // take available resource
2383 replica = ikglp_get_idx(sem, sem->shortest_fifo_queue);
2384
2334 ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock 2385 ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock
2335 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); 2386 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2336 return 0;
2337 } 2387 }
2338 2388 else
2339 // we have to suspend. 2389 {
2340 2390 // we have to suspend.
2341 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!!
2342
2343 tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked
2344 mb();
2345
2346 /* FIXME: interruptible would be nice some day */
2347 set_task_state(t, TASK_UNINTERRUPTIBLE);
2348
2349 if(sem->shortest_fifo_queue->count < sem->max_fifo_len) {
2350 // enqueue on fq
2351 ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock
2352 }
2353 else {
2354 // no room in fifos. Go to PQ or donors.
2355 2391
2356 if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { 2392 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!!
2357 // enqueue on PQ 2393
2358 __ikglp_enqueue_on_pq(sem, &wait); 2394 tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked
2359 raw_spin_unlock_irqrestore(&sem->lock, flags); 2395 mb();
2396
2397 /* FIXME: interruptible would be nice some day */
2398 set_task_state(t, TASK_UNINTERRUPTIBLE);
2399
2400 if(sem->shortest_fifo_queue->count < sem->max_fifo_len) {
2401 // enqueue on fq
2402 ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock
2360 } 2403 }
2361 else { 2404 else {
2362 // enqueue as donor 2405
2363 ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock 2406 TRACE_CUR("IKGLP fifo queues are full.\n");
2407
2408 // no room in fifos. Go to PQ or donors.
2409
2410 if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) {
2411 // enqueue on PQ
2412 ikglp_enqueue_on_pq(sem, &wait);
2413 raw_spin_unlock_irqrestore(&sem->lock, flags);
2414 }
2415 else {
2416 // enqueue as donor
2417 ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock
2418 }
2364 } 2419 }
2420
2421 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2422
2423 TS_LOCK_SUSPEND;
2424
2425 schedule();
2426
2427 TS_LOCK_RESUME;
2428
2429 fq = ikglp_get_queue(sem, t);
2430 BUG_ON(!fq);
2431
2432 replica = ikglp_get_idx(sem, fq);
2365 } 2433 }
2366 2434
2367 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
2368
2369 TS_LOCK_SUSPEND;
2370
2371 schedule();
2372
2373 TS_LOCK_RESUME;
2374
2375 fq = ikglp_get_queue(sem, t);
2376 BUG_ON(!fq);
2377
2378 TRACE_CUR("Acquired lock %d, queue %d\n", 2435 TRACE_CUR("Acquired lock %d, queue %d\n",
2379 l->ident, ikglp_get_idx(sem, fq)); 2436 l->ident, replica);
2380 2437
2381 return 0; 2438 return replica;
2382} 2439}
2383 2440
2384static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *donor_info) 2441static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *donor_info)
@@ -2387,14 +2444,15 @@ static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queu
2387 2444
2388 TRACE_CUR("Donor %s/%d being moved to fq %d\n", 2445 TRACE_CUR("Donor %s/%d being moved to fq %d\n",
2389 t->comm, 2446 t->comm,
2390 t->pid; 2447 t->pid,
2391 ikglp_get_idx(sem, fq)); 2448 ikglp_get_idx(sem, fq));
2392 2449
2393 binheap_delete(&donor_info->node, &sem->donors); 2450 binheap_delete(&donor_info->node, &sem->donors);
2394 2451
2395 __ikglp_enqueue_on_fq(sem, fq, t, 2452 __ikglp_enqueue_on_fq(sem, fq, t,
2396 &donor_info->fq_node, 2453 &donor_info->fq_node,
2397 &donor_info->global_heap_node, 2454 NULL, // already in global_list, so pass null to prevent adding 2nd time.
2455 //&donor_info->global_heap_node,
2398 &donor_info->donee_heap_node); 2456 &donor_info->donee_heap_node);
2399 2457
2400 // warning: 2458 // warning:
@@ -2407,7 +2465,7 @@ static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *
2407 2465
2408 TRACE_CUR("PQ request %s/%d being moved to fq %d\n", 2466 TRACE_CUR("PQ request %s/%d being moved to fq %d\n",
2409 t->comm, 2467 t->comm,
2410 t->pid; 2468 t->pid,
2411 ikglp_get_idx(sem, fq)); 2469 ikglp_get_idx(sem, fq));
2412 2470
2413 binheap_delete(&wait->pq_node.node, &sem->priority_queue); 2471 binheap_delete(&wait->pq_node.node, &sem->priority_queue);
@@ -2425,35 +2483,69 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(struct ikglp_semaphore*
2425 /* must hold sem->lock */ 2483 /* must hold sem->lock */
2426 2484
2427 struct fifo_queue *fq = NULL; 2485 struct fifo_queue *fq = NULL;
2428 struct task_struct *max_hp = NULL;
2429
2430 struct list_head *pos; 2486 struct list_head *pos;
2431 struct task_struct *queued; 2487 struct task_struct *queued;
2432 int i; 2488 int i;
2433 2489
2434 for(i = 0; i < sem->nr_replicas; ++i) 2490 for(i = 0; i < sem->nr_replicas; ++i) {
2435 {
2436 if( (sem->fifo_queues[i].count > 1) && 2491 if( (sem->fifo_queues[i].count > 1) &&
2437 ((fq == NULL) || 2492 (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) {
2438 (edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter))) ) 2493
2439 { 2494 TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n",
2495 ikglp_get_idx(sem, &sem->fifo_queues[i]),
2496 sem->fifo_queues[i].hp_waiter->comm,
2497 sem->fifo_queues[i].hp_waiter->pid,
2498 (fq) ? ikglp_get_idx(sem, fq) : -1,
2499 (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "nil") : "nilXX",
2500 (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : -1) : -2);
2501
2440 fq = &sem->fifo_queues[i]; 2502 fq = &sem->fifo_queues[i];
2503
2504 WARN_ON(!(fq->hp_waiter));
2441 } 2505 }
2442 } 2506 }
2443 2507
2444 if(fq) { 2508 if(fq) {
2445 list_for_each(pos, &fq->wait.task_list) 2509 struct task_struct *max_hp = fq->hp_waiter;
2446 { 2510 ikglp_wait_state_t* ret = NULL;
2511
2512 TRACE_CUR("Searching for %s/%d on fq %d\n",
2513 max_hp->comm,
2514 max_hp->pid,
2515 ikglp_get_idx(sem, fq));
2516
2517 BUG_ON(!max_hp);
2518
2519 list_for_each(pos, &fq->wait.task_list) {
2447 wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); 2520 wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list);
2448 2521
2449 queued = (struct task_struct*) wait->private; 2522 queued = (struct task_struct*) wait->private;
2523
2524 TRACE_CUR("fq %d entry: %s/%d\n", ikglp_get_idx(sem, fq), queued->comm, queued->pid);
2525
2450 /* Compare task prios, find high prio task. */ 2526 /* Compare task prios, find high prio task. */
2451 if (queued == max_hp) 2527 if (queued == max_hp) {
2452 { 2528 TRACE_CUR("Found it!\n");
2453 return container_of(wait, ikglp_wait_state_t, fq_node); 2529 ret = container_of(wait, ikglp_wait_state_t, fq_node);
2454 } 2530 }
2531 }
2532
2533// list_for_each(pos, &fq->wait.task_list) {
2534// wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list);
2535//
2536// queued = (struct task_struct*) wait->private;
2537// /* Compare task prios, find high prio task. */
2538// if (queued == max_hp) {
2539// TRACE_CUR("Found it!\n");
2540// return container_of(wait, ikglp_wait_state_t, fq_node);
2541// }
2542// }
2543
2544 if(!ret) {
2545 WARN_ON(1);
2455 } 2546 }
2456 BUG(); 2547
2548 return ret;
2457 } 2549 }
2458 2550
2459 return(NULL); 2551 return(NULL);
@@ -2468,14 +2560,21 @@ static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq
2468 2560
2469 TRACE_CUR("FQ request %s/%d being moved to fq %d\n", 2561 TRACE_CUR("FQ request %s/%d being moved to fq %d\n",
2470 t->comm, 2562 t->comm,
2471 t->pid; 2563 t->pid,
2472 ikglp_get_idx(sem, fq)); 2564 ikglp_get_idx(sem, fq));
2473 2565
2474 fq_wait->donee_heap_node.fq = fq; // just to be safe 2566 fq_wait->donee_heap_node.fq = fq; // just to be safe
2475 2567
2476 2568
2477 __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); 2569 __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node);
2478 --fq_steal->count; 2570 --(fq_steal->count);
2571
2572 fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL);
2573 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
2574 ikglp_get_idx(sem, fq_steal),
2575 (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "nil",
2576 (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : -1);
2577
2479 2578
2480 // Update shortest. 2579 // Update shortest.
2481 if(fq_steal->count < sem->shortest_fifo_queue->count) { 2580 if(fq_steal->count < sem->shortest_fifo_queue->count) {
@@ -2497,6 +2596,8 @@ static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, st
2497 2596
2498 BUG_ON(old_wait->donee_heap_node.fq != fq); 2597 BUG_ON(old_wait->donee_heap_node.fq != fq);
2499 2598
2599 TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", ikglp_get_idx(sem, fq));
2600
2500 // need to migrate global_heap_node and donee_heap_node off of the stack 2601 // need to migrate global_heap_node and donee_heap_node off of the stack
2501 // to the nodes allocated for the owner of this fq. 2602 // to the nodes allocated for the owner of this fq.
2502 2603
@@ -2524,6 +2625,8 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2524 struct task_struct *t = current; 2625 struct task_struct *t = current;
2525 struct task_struct *donee = NULL; 2626 struct task_struct *donee = NULL;
2526 struct task_struct *next = NULL; 2627 struct task_struct *next = NULL;
2628 struct task_struct *new_on_fq = NULL;
2629
2527 ikglp_wait_state_t *other_donor_info = NULL; 2630 ikglp_wait_state_t *other_donor_info = NULL;
2528 struct fifo_queue *to_steal = NULL; 2631 struct fifo_queue *to_steal = NULL;
2529 struct fifo_queue *fq; 2632 struct fifo_queue *fq;
@@ -2542,14 +2645,23 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2542 goto out; 2645 goto out;
2543 } 2646 }
2544 2647
2545 TRACE_TASK(t, "Freeing replica %d.\n", ikglp_to_idx(sem, fq)); 2648 TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq));
2649
2650
2651 // Remove 't' from the heaps, but data in nodes will still be good.
2652 ikglp_del_global_list(sem, t, &fq->global_heap_node);
2653 binheap_delete(&fq->donee_heap_node.node, &sem->donees);
2546 2654
2655 // Move the next request into the FQ and update heaps as needed.
2656 // We defer re-evaluation of priorities to later in the function.
2547 if(fq->donee_heap_node.donor_info) { // move my doner to FQ 2657 if(fq->donee_heap_node.donor_info) { // move my doner to FQ
2548 ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; 2658 ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info;
2549 2659
2660 new_on_fq = donor_info->task;
2661
2550 TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n", 2662 TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n",
2551 donor_info->task->comm, donor_info->task->pid, 2663 new_on_fq->comm, new_on_fq->pid,
2552 ikglp_to_idx(sem, fq)); 2664 ikglp_get_idx(sem, fq));
2553 // donor moved to FQ 2665 // donor moved to FQ
2554 donee = t; 2666 donee = t;
2555 ikglp_move_donor_to_fq(sem, fq, donor_info); 2667 ikglp_move_donor_to_fq(sem, fq, donor_info);
@@ -2559,11 +2671,17 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2559 // move other donor to FQ 2671 // move other donor to FQ
2560 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 2672 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node);
2561 2673
2674 new_on_fq = other_donor_info->task;
2562 donee = other_donor_info->donee_info->task; 2675 donee = other_donor_info->donee_info->task;
2676
2677 // update the donee's heap position.
2678 other_donor_info->donee_info->donor_info = NULL; // clear the cross-link
2679 binheap_decrease(&other_donor_info->donee_info->node, &sem->donees);
2563 2680
2681
2564 TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n", 2682 TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n",
2565 other_donor_info->task->comm, other_donor_info->task->pid, 2683 new_on_fq->comm, new_on_fq->pid,
2566 ikglp_to_idx(sem, fq)); 2684 ikglp_get_idx(sem, fq));
2567 2685
2568 ikglp_move_donor_to_fq(sem, fq, other_donor_info); 2686 ikglp_move_donor_to_fq(sem, fq, other_donor_info);
2569 2687
@@ -2573,9 +2691,11 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2573 ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, ikglp_heap_node_t, node); 2691 ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, ikglp_heap_node_t, node);
2574 ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, pq_node); 2692 ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, pq_node);
2575 2693
2694 new_on_fq = pq_wait->task;
2695
2576 TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n", 2696 TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n",
2577 pq_wait->task->comm, pq_wait->task->pid, 2697 new_on_fq->comm, new_on_fq->pid,
2578 ikglp_to_idx(sem, fq)); 2698 ikglp_get_idx(sem, fq));
2579 2699
2580 ikglp_move_pq_to_fq(sem, fq, pq_wait); 2700 ikglp_move_pq_to_fq(sem, fq, pq_wait);
2581 } 2701 }
@@ -2584,16 +2704,18 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2584 ikglp_wait_state_t *fq_wait; 2704 ikglp_wait_state_t *fq_wait;
2585 2705
2586 TRACE_TASK(t, "Looking to steal a request for fq %d...\n", 2706 TRACE_TASK(t, "Looking to steal a request for fq %d...\n",
2587 ikglp_to_idx(sem, fq)); 2707 ikglp_get_idx(sem, fq));
2588 2708
2589 fq_wait = ikglp_find_hp_waiter_to_steal(sem); 2709 fq_wait = ikglp_find_hp_waiter_to_steal(sem);
2590 if(fq_wait) { 2710 if(fq_wait) {
2591 to_steal = fq_wait->donee_heap_node.fq; 2711 to_steal = fq_wait->donee_heap_node.fq;
2592 2712
2713 new_on_fq = fq_wait->task;
2714
2593 TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", 2715 TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n",
2594 fq_wait->task->comm, fq_wait->task->pid, 2716 new_on_fq->comm, new_on_fq->pid,
2595 ikglp_to_idx(sem, to_steal) 2717 ikglp_get_idx(sem, to_steal),
2596 ikglp_to_idx(sem, fq)); 2718 ikglp_get_idx(sem, fq));
2597 2719
2598 ikglp_steal_to_fq(sem, fq, fq_wait); 2720 ikglp_steal_to_fq(sem, fq, fq_wait);
2599 2721
@@ -2601,7 +2723,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2601 } 2723 }
2602 else { 2724 else {
2603 TRACE_TASK(t, "Found nothing to steal for fq %d.\n", 2725 TRACE_TASK(t, "Found nothing to steal for fq %d.\n",
2604 ikglp_to_idx(sem, fq)); 2726 ikglp_get_idx(sem, fq));
2605 } 2727 }
2606 } 2728 }
2607 else { // move no one 2729 else { // move no one
@@ -2622,9 +2744,16 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2622 } 2744 }
2623 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 2745 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
2624 2746
2625 ikglp_del_global_list(sem, t, &fq->global_heap_node); 2747
2626 binheap_delete(&fq->donee_heap_node.node, &sem->donees); 2748 // Updating the owner and updating sem->shortest_fifo_queue
2749 // could have been done sooner, but it is deffered, hoping
2750 // that it will reduce thrashing of sem->shortest_fifo_queue
2751 // assignment.
2627 fq->owner = NULL; // no longer owned!! 2752 fq->owner = NULL; // no longer owned!!
2753 --(fq->count);
2754 if(fq->count < sem->shortest_fifo_queue->count) {
2755 sem->shortest_fifo_queue = fq;
2756 }
2628 2757
2629 // Now patch up other priorities. 2758 // Now patch up other priorities.
2630 // 2759 //
@@ -2649,7 +2778,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2649 if(donee == other_fq->owner) { 2778 if(donee == other_fq->owner) {
2650 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", 2779 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n",
2651 donee->comm, donee->pid, 2780 donee->comm, donee->pid,
2652 ikglp_to_idx(sem, other_fq)); 2781 ikglp_get_idx(sem, other_fq));
2653 2782
2654 ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); 2783 ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags);
2655 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! 2784 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!!
@@ -2657,15 +2786,20 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2657 else { 2786 else {
2658 TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", 2787 TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n",
2659 donee->comm, donee->pid, 2788 donee->comm, donee->pid,
2660 ikglp_to_idx(sem, other_fq)); 2789 ikglp_get_idx(sem, other_fq));
2661 2790
2662 ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node); 2791 ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node);
2663 if(donee == other_fq->hp_waiter) { 2792 if(donee == other_fq->hp_waiter) {
2664 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", 2793 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n",
2665 donee->comm, donee->pid, 2794 donee->comm, donee->pid,
2666 ikglp_to_idx(sem, other_fq)); 2795 ikglp_get_idx(sem, other_fq));
2667 2796
2668 other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); 2797 other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL);
2798 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
2799 ikglp_get_idx(sem, other_fq),
2800 (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "nil",
2801 (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1);
2802
2669 ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. 2803 ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it.
2670 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! 2804 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!!
2671 } 2805 }
@@ -2673,38 +2807,57 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2673 } 2807 }
2674 else if(to_steal) { 2808 else if(to_steal) {
2675 TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n", 2809 TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n",
2676 ikglp_to_idx(sem, to_steal)); 2810 ikglp_get_idx(sem, to_steal));
2677 2811
2678 ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. 2812 ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it.
2679 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! 2813 raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!!
2680 } 2814 }
2681 2815
2816 // check for new HP waiter.
2817 if(new_on_fq) {
2818 // fq->owner is null, so just update the hp_waiter without locking.
2819
2820 if(new_on_fq == fq->hp_waiter) {
2821 TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", fq->hp_waiter->comm, fq->hp_waiter->pid);
2822 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure...
2823 }
2824 else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) {
2825 if(fq->hp_waiter)
2826 TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid);
2827 else
2828 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
2829
2830 fq->hp_waiter = new_on_fq;
2831 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter);
2832
2833 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
2834 ikglp_get_idx(sem, fq),
2835 (fq->hp_waiter) ? fq->hp_waiter->comm : "nil",
2836 (fq->hp_waiter) ? fq->hp_waiter->pid : -1);
2837 }
2838 }
2839
2682 2840
2683 if(waitqueue_active(&fq->wait)) 2841 if(waitqueue_active(&fq->wait))
2684 { 2842 {
2685 wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); 2843 wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list);
2686 ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); 2844 ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node);
2845 next = (struct task_struct*) wait->private;
2687 2846
2688 next = __waitqueue_remove_first(&fq->wait); 2847 __remove_wait_queue(&fq->wait, wait);
2689 2848
2690 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", 2849 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
2691 ikglp_get_idx(sem, fq), 2850 ikglp_get_idx(sem, fq),
2692 next->comm, next->pid); 2851 next->comm, next->pid);
2693 2852
2853 // migrate wait-state to fifo-memory.
2694 ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); 2854 ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait);
2695 2855
2696 /* next becomes the resouce holder */ 2856 /* next becomes the resouce holder */
2697 fq->owner = next; 2857 fq->owner = next;
2698 --fq->count;
2699 tsk_rt(next)->blocked_lock = NULL; 2858 tsk_rt(next)->blocked_lock = NULL;
2700
2701 // TODO: UPDATE TO USE NEW HEAP NODES.
2702 2859
2703 if(fq->count < sem->shortest_fifo_queue->count) {
2704 sem->shortest_fifo_queue = fq;
2705 }
2706 2860
2707
2708 /* determine new hp_waiter if necessary */ 2861 /* determine new hp_waiter if necessary */
2709 if (next == fq->hp_waiter) { 2862 if (next == fq->hp_waiter) {
2710 2863
@@ -2713,11 +2866,16 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2713 * inherit. However, we need to make sure that the 2866 * inherit. However, we need to make sure that the
2714 * next-highest priority in the queue is reflected in 2867 * next-highest priority in the queue is reflected in
2715 * hp_waiter. */ 2868 * hp_waiter. */
2716 fq->hp_waiter = ikglp_find_hp_waiter(fq, next); 2869 fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL);
2870 TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n",
2871 ikglp_get_idx(sem, fq),
2872 (fq->hp_waiter) ? fq->hp_waiter->comm : "nil",
2873 (fq->hp_waiter) ? fq->hp_waiter->pid : -1);
2874
2717 fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? effective_priority(fq->hp_waiter) : NULL; 2875 fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? effective_priority(fq->hp_waiter) : NULL;
2718 2876
2719 if (fq->hp_waiter) 2877 if (fq->hp_waiter)
2720 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); 2878 TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n");
2721 else 2879 else
2722 TRACE("no further waiters\n"); 2880 TRACE("no further waiters\n");
2723 2881
@@ -2737,7 +2895,10 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2737 /* Well, if 'next' is not the highest-priority waiter, 2895 /* Well, if 'next' is not the highest-priority waiter,
2738 * then it (probably) ought to inherit the highest-priority 2896 * then it (probably) ought to inherit the highest-priority
2739 * waiter's priority. */ 2897 * waiter's priority. */
2740 TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); 2898 TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n",
2899 ikglp_get_idx(sem, fq),
2900 (fq->hp_waiter) ? fq->hp_waiter->comm : "nil",
2901 (fq->hp_waiter) ? fq->hp_waiter->pid : -1);
2741 2902
2742 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); 2903 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
2743 2904
@@ -2748,23 +2909,26 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l)
2748 * because that update hasn't yet executed (update operation is 2909 * because that update hasn't yet executed (update operation is
2749 * probably blocked on mutex->lock). So only inherit if the top of 2910 * probably blocked on mutex->lock). So only inherit if the top of
2750 * 'next's top heap node is indeed the effective prio. of hp_waiter. 2911 * 'next's top heap node is indeed the effective prio. of hp_waiter.
2751 * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) 2912 * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter)
2752 * since the effective priority of hp_waiter can change (and the 2913 * since the effective priority of hp_waiter can change (and the
2753 * update has not made it to this lock).) 2914 * update has not made it to this lock).)
2754 */ 2915 */
2755 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) 2916 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == fq->nest.hp_waiter_eff_prio))
2756 { 2917 {
2757 increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); 2918 if(fq->nest.hp_waiter_eff_prio)
2919 increase_priority_inheritance(next, fq->nest.hp_waiter_eff_prio);
2920 else
2921 WARN_ON(1);
2758 } 2922 }
2759 2923
2760 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 2924 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
2761 } 2925 }
2926
2927
2928 // wake up the new resource holder!
2929 wake_up_process(next);
2762 } 2930 }
2763 2931
2764
2765 /* wake up next */
2766 wake_up_process(next);
2767
2768out: 2932out:
2769 raw_spin_unlock_irqrestore(&sem->lock, flags); 2933 raw_spin_unlock_irqrestore(&sem->lock, flags);
2770 2934
@@ -2820,7 +2984,7 @@ static struct litmus_lock_ops gsnedf_ikglp_lock_ops = {
2820 .deallocate = gsnedf_ikglp_free, 2984 .deallocate = gsnedf_ikglp_free,
2821}; 2985};
2822 2986
2823static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code) 2987static struct litmus_lock* gsnedf_new_ikglp(void* __user arg)
2824{ 2988{
2825 struct ikglp_semaphore* sem; 2989 struct ikglp_semaphore* sem;
2826 int nr_replicas = 0; 2990 int nr_replicas = 0;
@@ -2828,24 +2992,20 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
2828 2992
2829 if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas))) 2993 if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas)))
2830 { 2994 {
2831 *ret_code = -EINVAL;
2832 return(NULL); 2995 return(NULL);
2833 } 2996 }
2834 if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas))) 2997 if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas)))
2835 { 2998 {
2836 *ret_code = -EINVAL;
2837 return(NULL); 2999 return(NULL);
2838 } 3000 }
2839 if(nr_replicas < 1) 3001 if(nr_replicas < 1)
2840 { 3002 {
2841 *ret_code = -EINVAL;
2842 return(NULL); 3003 return(NULL);
2843 } 3004 }
2844 3005
2845 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 3006 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
2846 if(!sem) 3007 if(!sem)
2847 { 3008 {
2848 *ret_code = -ENOMEM;
2849 return NULL; 3009 return NULL;
2850 } 3010 }
2851 3011
@@ -2853,7 +3013,6 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
2853 if(!sem->fifo_queues) 3013 if(!sem->fifo_queues)
2854 { 3014 {
2855 kfree(sem); 3015 kfree(sem);
2856 *ret_code = -ENOMEM;
2857 return NULL; 3016 return NULL;
2858 } 3017 }
2859 3018
@@ -2871,9 +3030,14 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
2871 raw_spin_lock_init(&sem->real_lock); 3030 raw_spin_lock_init(&sem->real_lock);
2872 3031
2873 sem->nr_replicas = nr_replicas; 3032 sem->nr_replicas = nr_replicas;
2874 sem->m = num_online_cpus(); 3033 sem->m = num_online_cpus(); // default 'm' to number of CPUs.
2875 sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0); 3034 sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0);
2876 3035
3036 TRACE("New IKGLP Sem: m = %d, k = %d, max fifo_len = %d\n",
3037 sem->m,
3038 sem->nr_replicas,
3039 sem->max_fifo_len);
3040
2877 for(i = 0; i < nr_replicas; ++i) 3041 for(i = 0; i < nr_replicas; ++i)
2878 { 3042 {
2879 struct fifo_queue* q = &(sem->fifo_queues[i]); 3043 struct fifo_queue* q = &(sem->fifo_queues[i]);
@@ -2908,7 +3072,6 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
2908 INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); 3072 INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order);
2909 INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); 3073 INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order);
2910 3074
2911 *ret_code = 0;
2912 return &sem->litmus_lock; 3075 return &sem->litmus_lock;
2913} 3076}
2914 3077
@@ -3155,7 +3318,7 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
3155 break; 3318 break;
3156 3319
3157 case IKGLP_SEM: 3320 case IKGLP_SEM:
3158 *lock = gsnedf_new_ikglp(args, &err); 3321 *lock = gsnedf_new_ikglp(args);
3159 break; 3322 break;
3160#endif 3323#endif
3161 3324
@@ -3189,7 +3352,6 @@ static long gsnedf_activate_plugin(void)
3189 for_each_online_cpu(cpu) { 3352 for_each_online_cpu(cpu) {
3190 entry = &per_cpu(gsnedf_cpu_entries, cpu); 3353 entry = &per_cpu(gsnedf_cpu_entries, cpu);
3191 INIT_BINHEAP_NODE(&entry->hn); 3354 INIT_BINHEAP_NODE(&entry->hn);
3192 entry->in_heap = 0;
3193 entry->linked = NULL; 3355 entry->linked = NULL;
3194 entry->scheduled = NULL; 3356 entry->scheduled = NULL;
3195#ifdef CONFIG_RELEASE_MASTER 3357#ifdef CONFIG_RELEASE_MASTER
@@ -3232,13 +3394,12 @@ static int __init init_gsn_edf(void)
3232 3394
3233 INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); 3395 INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
3234 /* initialize CPU state */ 3396 /* initialize CPU state */
3235 for (cpu = 0; cpu < NR_CPUS; cpu++) { 3397 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
3236 entry = &per_cpu(gsnedf_cpu_entries, cpu); 3398 entry = &per_cpu(gsnedf_cpu_entries, cpu);
3237 gsnedf_cpus[cpu] = entry; 3399 gsnedf_cpus[cpu] = entry;
3238 entry->cpu = cpu; 3400 entry->cpu = cpu;
3239 3401
3240 INIT_BINHEAP_NODE(&entry->hn); 3402 INIT_BINHEAP_NODE(&entry->hn);
3241 entry->in_heap = 0;
3242 } 3403 }
3243 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 3404 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
3244 return register_sched_plugin(&gsn_edf_plugin); 3405 return register_sched_plugin(&gsn_edf_plugin);