diff options
-rw-r--r-- | include/litmus/binheap.h | 536 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 41 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 46 |
3 files changed, 578 insertions, 45 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h new file mode 100644 index 000000000000..fd4c0a318b96 --- /dev/null +++ b/include/litmus/binheap.h | |||
@@ -0,0 +1,536 @@ | |||
1 | #ifndef LITMUS_BINARY_HEAP_H | ||
2 | #define LITMUS_BINARY_HEAP_H | ||
3 | |||
4 | /** | ||
5 | * Simple binary heap with add, arbitrary delete, delete_root, and top | ||
6 | * operations. | ||
7 | * | ||
8 | * Style meant to conform with list.h. | ||
9 | * | ||
10 | * Motivation: Linux's prio_heap.h is of fixed size. Litmus's binomial | ||
11 | * heap may be overkill (and perhaps not general enough) for some applications. | ||
12 | * | ||
13 | * Note: In order to make node swaps fast, a node inserted with a data pointer | ||
14 | * may not always hold said data pointer. This is similar to the binomial heap | ||
15 | * implementation. This does make node deletion tricky since we have to | ||
16 | * (1) locate the node that holds the data pointer to delete, and (2) the | ||
17 | * node that was originally inserted with said data pointer. These have to be | ||
18 | * coalesced into a single node before removal (see usage of | ||
19 | * __binheap_safe_swap()). We have to track node references to accomplish this. | ||
20 | */ | ||
21 | |||
22 | struct binheap_node { | ||
23 | void *data; | ||
24 | struct binheap_node *parent; | ||
25 | struct binheap_node *left; | ||
26 | struct binheap_node *right; | ||
27 | |||
28 | /* pointer to binheap_node that holds *data for which this binheap_node | ||
29 | * was originally inserted. (*data "owns" this node) | ||
30 | */ | ||
31 | struct binheap_node *ref; | ||
32 | struct binheap_node **ref_ptr; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * Signature of compator function. Assumed 'less-than' (min-heap). | ||
37 | * Pass in 'greater-than' for max-heap. | ||
38 | * | ||
39 | * TODO: Consider macro-based implementation that allows comparator to be | ||
40 | * inlined (similar to Linux red/black tree) for greater efficiency. | ||
41 | */ | ||
42 | typedef int (*binheap_order_t)(struct binheap_node *a, | ||
43 | struct binheap_node *b); | ||
44 | |||
45 | |||
46 | struct binheap_handle { | ||
47 | struct binheap_node *root; | ||
48 | |||
49 | /* pointer to node to take next inserted child */ | ||
50 | struct binheap_node *next; | ||
51 | |||
52 | /* pointer to last node in complete binary tree */ | ||
53 | struct binheap_node *last; | ||
54 | |||
55 | /* comparator function pointer */ | ||
56 | binheap_order_t compare; | ||
57 | }; | ||
58 | |||
59 | |||
60 | /** | ||
61 | * binheap_entry - get the struct for this heap node. | ||
62 | * Only valid when called upon heap nodes other than the root handle. | ||
63 | * @ptr: the heap node. | ||
64 | * @type: the type of struct pointed to by binheap_node::data. | ||
65 | * @member: unused. | ||
66 | */ | ||
67 | #define binheap_entry(ptr, type, member) \ | ||
68 | ((type *)((ptr)->data)) | ||
69 | |||
70 | /** | ||
71 | * binheap_node_container - get the struct that contains this node. | ||
72 | * Only valid when called upon heap nodes other than the root handle. | ||
73 | * @ptr: the heap node. | ||
74 | * @type: the type of struct the node is embedded in. | ||
75 | * @member: the name of the binheap_struct within the (type) struct. | ||
76 | */ | ||
77 | #define binheap_node_container(ptr, type, member) \ | ||
78 | container_of((ptr), type, member) | ||
79 | |||
80 | /** | ||
81 | * binheap_top_entry - get the struct for the node at the top of the heap. | ||
82 | * Only valid when called upon the heap handle node. | ||
83 | * @ptr: the special heap-handle node. | ||
84 | * @type: the type of the struct the head is embedded in. | ||
85 | * @member: the name of the binheap_struct within the (type) struct. | ||
86 | */ | ||
87 | #define binheap_top_entry(ptr, type, member) \ | ||
88 | binheap_entry((ptr)->root, type, member) | ||
89 | |||
90 | /** | ||
91 | * binheap_delete_root - remove the root element from the heap. | ||
92 | * @handle: handle to the heap. | ||
93 | * @type: the type of the struct the head is embedded in. | ||
94 | * @member: the name of the binheap_struct within the (type) struct. | ||
95 | */ | ||
96 | #define binheap_delete_root(handle, type, member) \ | ||
97 | __binheap_delete_root((handle), &((type *)((handle)->root->data))->member) | ||
98 | |||
99 | /** | ||
100 | * binheap_delete - remove an arbitrary element from the heap. | ||
101 | * @to_delete: pointer to node to be removed. | ||
102 | * @handle: handle to the heap. | ||
103 | */ | ||
104 | #define binheap_delete(to_delete, handle) \ | ||
105 | __binheap_delete((to_delete), (handle)) | ||
106 | |||
107 | /** | ||
108 | * binheap_add - insert an element to the heap | ||
109 | * new_node: node to add. | ||
110 | * @handle: handle to the heap. | ||
111 | * @type: the type of the struct the head is embedded in. | ||
112 | * @member: the name of the binheap_struct within the (type) struct. | ||
113 | */ | ||
114 | #define binheap_add(new_node, handle, type, member) \ | ||
115 | __binheap_add((new_node), (handle), container_of((new_node), type, member)) | ||
116 | |||
117 | /** | ||
118 | * binheap_decrease - re-eval the position of a node (based upon its | ||
119 | * original data pointer). | ||
120 | * @handle: handle to the heap. | ||
121 | * @orig_node: node that was associated with the data pointer | ||
122 | * (whose value has changed) when said pointer was | ||
123 | * added to the heap. | ||
124 | */ | ||
125 | #define binheap_decrease(orig_node, handle) \ | ||
126 | __binheap_decrease((orig_node), (handle)) | ||
127 | |||
128 | #define BINHEAP_NODE_INIT() { NULL, NULL, NULL, NULL , NULL, NULL} | ||
129 | |||
130 | #define BINHEAP_NODE(name) \ | ||
131 | struct binheap_node name = BINHEAP_NODE_INIT() | ||
132 | |||
133 | |||
134 | static inline void INIT_BINHEAP_NODE(struct binheap_node *n) | ||
135 | { | ||
136 | n->data = NULL; | ||
137 | n->parent = NULL; | ||
138 | n->left = NULL; | ||
139 | n->right = NULL; | ||
140 | n->ref = NULL; | ||
141 | n->ref_ptr = NULL; | ||
142 | } | ||
143 | |||
144 | static inline void INIT_BINHEAP_HANDLE( | ||
145 | struct binheap_handle *handle, | ||
146 | binheap_order_t compare) | ||
147 | { | ||
148 | handle->root = NULL; | ||
149 | handle->next = NULL; | ||
150 | handle->last = NULL; | ||
151 | handle->compare = compare; | ||
152 | } | ||
153 | |||
154 | /* Returns true (1) if binheap is empty. */ | ||
155 | static inline int binheap_empty(struct binheap_handle *handle) | ||
156 | { | ||
157 | return(handle->root == NULL); | ||
158 | } | ||
159 | |||
160 | |||
161 | /* Update the node reference pointers. Same logic as Litmus binomial heap. */ | ||
162 | static inline void __update_ref(struct binheap_node *parent, | ||
163 | struct binheap_node *child) | ||
164 | { | ||
165 | *(parent->ref_ptr) = child; | ||
166 | *(child->ref_ptr) = parent; | ||
167 | |||
168 | swap(parent->ref_ptr, child->ref_ptr); | ||
169 | } | ||
170 | |||
171 | /* Swaps data between two nodes. */ | ||
172 | static inline void __binheap_swap(struct binheap_node *parent, | ||
173 | struct binheap_node *child) | ||
174 | { | ||
175 | swap(parent->data, child->data); | ||
176 | __update_ref(parent, child); | ||
177 | } | ||
178 | |||
179 | /* Swaps memory and data between two nodes. Actual nodes swap instead of | ||
180 | * just data. Needed when we delete nodes from the heap. | ||
181 | */ | ||
182 | static inline void __binheap_swap_safe(struct binheap_handle *handle, | ||
183 | struct binheap_node *a, | ||
184 | struct binheap_node *b) | ||
185 | { | ||
186 | swap(a->data, b->data); | ||
187 | __update_ref(a, b); | ||
188 | |||
189 | if((a->parent != NULL) && (a->parent == b->parent)) { | ||
190 | /* special case: shared parent */ | ||
191 | swap(a->parent->left, a->parent->right); | ||
192 | } | ||
193 | else { | ||
194 | /* Update pointers to swap parents. */ | ||
195 | |||
196 | if(a->parent) { | ||
197 | if(a == a->parent->left) { | ||
198 | a->parent->left = b; | ||
199 | } | ||
200 | else { | ||
201 | a->parent->right = b; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | if(b->parent) { | ||
206 | if(b == b->parent->left) { | ||
207 | b->parent->left = a; | ||
208 | } | ||
209 | else { | ||
210 | b->parent->right = a; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | swap(a->parent, b->parent); | ||
215 | } | ||
216 | |||
217 | /* swap children */ | ||
218 | |||
219 | if(a->left) { | ||
220 | a->left->parent = b; | ||
221 | |||
222 | if(a->right) { | ||
223 | a->right->parent = b; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | if(b->left) { | ||
228 | b->left->parent = a; | ||
229 | |||
230 | if(b->right) { | ||
231 | b->right->parent = a; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | swap(a->left, b->left); | ||
236 | swap(a->right, b->right); | ||
237 | |||
238 | |||
239 | /* update next/last/root pointers */ | ||
240 | |||
241 | if(a == handle->next) { | ||
242 | handle->next = b; | ||
243 | } | ||
244 | else if(b == handle->next) { | ||
245 | handle->next = a; | ||
246 | } | ||
247 | |||
248 | if(a == handle->last) { | ||
249 | handle->last = b; | ||
250 | } | ||
251 | else if(b == handle->last) { | ||
252 | handle->last = a; | ||
253 | } | ||
254 | |||
255 | if(a == handle->root) { | ||
256 | handle->root = b; | ||
257 | } | ||
258 | else if(b == handle->root) { | ||
259 | handle->root = a; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | |||
264 | /** | ||
265 | * Update the pointer to the last node in the complete binary tree. | ||
266 | * Called internally after the root node has been deleted. | ||
267 | */ | ||
268 | static inline void __binheap_update_last(struct binheap_handle *handle) | ||
269 | { | ||
270 | struct binheap_node *temp = handle->last; | ||
271 | |||
272 | /* find a "bend" in the tree. */ | ||
273 | while(temp->parent && (temp == temp->parent->left)) { | ||
274 | temp = temp->parent; | ||
275 | } | ||
276 | |||
277 | /* step over to sibling if we're not at root */ | ||
278 | if(temp->parent != NULL) { | ||
279 | temp = temp->parent->left; | ||
280 | } | ||
281 | |||
282 | /* now travel right as far as possible. */ | ||
283 | while(temp->right != NULL) { | ||
284 | temp = temp->right; | ||
285 | } | ||
286 | |||
287 | /* take one step to the left if we're not at the bottom-most level. */ | ||
288 | if(temp->left != NULL) { | ||
289 | temp = temp->left; | ||
290 | } | ||
291 | |||
292 | //BUG_ON(!(temp->left == NULL && temp->right == NULL)); | ||
293 | |||
294 | handle->last = temp; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * Update the pointer to the node that will take the next inserted node. | ||
299 | * Called internally after a node has been inserted. | ||
300 | */ | ||
301 | static inline void __binheap_update_next(struct binheap_handle *handle) | ||
302 | { | ||
303 | struct binheap_node *temp = handle->next; | ||
304 | |||
305 | /* find a "bend" in the tree. */ | ||
306 | while(temp->parent && (temp == temp->parent->right)) { | ||
307 | temp = temp->parent; | ||
308 | } | ||
309 | |||
310 | /* step over to sibling if we're not at root */ | ||
311 | if(temp->parent != NULL) { | ||
312 | temp = temp->parent->right; | ||
313 | } | ||
314 | |||
315 | /* now travel left as far as possible. */ | ||
316 | while(temp->left != NULL) { | ||
317 | temp = temp->left; | ||
318 | } | ||
319 | |||
320 | handle->next = temp; | ||
321 | } | ||
322 | |||
323 | |||
324 | |||
325 | /* bubble node up towards root */ | ||
326 | static inline void __binheap_bubble_up( | ||
327 | struct binheap_handle *handle, | ||
328 | struct binheap_node *node) | ||
329 | { | ||
330 | /* Note: NULL data pointers are used internally for arbitrary delete */ | ||
331 | while((node->parent != NULL) && | ||
332 | ((node->data == NULL) /* let null data bubble to the top */ || | ||
333 | handle->compare(node, node->parent))) { | ||
334 | __binheap_swap(node->parent, node); | ||
335 | node = node->parent; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | |||
340 | /* bubble node down, swapping with min-child */ | ||
341 | static inline void __binheap_bubble_down(struct binheap_handle *handle) | ||
342 | { | ||
343 | struct binheap_node *node = handle->root; | ||
344 | |||
345 | while(node->left != NULL) { | ||
346 | if(node->right && handle->compare(node->right, node->left)) { | ||
347 | if(handle->compare(node->right, node)) { | ||
348 | __binheap_swap(node, node->right); | ||
349 | node = node->right; | ||
350 | } | ||
351 | else { | ||
352 | break; | ||
353 | } | ||
354 | } | ||
355 | else { | ||
356 | if(handle->compare(node->left, node)) { | ||
357 | __binheap_swap(node, node->left); | ||
358 | node = node->left; | ||
359 | } | ||
360 | else { | ||
361 | break; | ||
362 | } | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | |||
368 | |||
369 | static inline void __binheap_add(struct binheap_node *new_node, | ||
370 | struct binheap_handle *handle, | ||
371 | void *data) | ||
372 | { | ||
373 | /* NULL data pointers are used internally */ | ||
374 | if(!data) { | ||
375 | WARN_ON(!data); | ||
376 | return; | ||
377 | } | ||
378 | |||
379 | new_node->data = data; | ||
380 | new_node->ref = new_node; | ||
381 | new_node->ref_ptr = &(new_node->ref); | ||
382 | |||
383 | if(!binheap_empty(handle)) { | ||
384 | /* insert left side first */ | ||
385 | if(handle->next->left == NULL) { | ||
386 | handle->next->left = new_node; | ||
387 | new_node->parent = handle->next; | ||
388 | new_node->left = NULL; | ||
389 | new_node->right = NULL; | ||
390 | |||
391 | handle->last = new_node; | ||
392 | |||
393 | __binheap_bubble_up(handle, new_node); | ||
394 | } | ||
395 | else { | ||
396 | /* left occupied. insert right. */ | ||
397 | handle->next->right = new_node; | ||
398 | new_node->parent = handle->next; | ||
399 | new_node->left = NULL; | ||
400 | new_node->right = NULL; | ||
401 | |||
402 | handle->last = new_node; | ||
403 | |||
404 | __binheap_update_next(handle); | ||
405 | __binheap_bubble_up(handle, new_node); | ||
406 | } | ||
407 | } | ||
408 | else { | ||
409 | /* first node in heap */ | ||
410 | |||
411 | new_node->parent = NULL; | ||
412 | new_node->left = NULL; | ||
413 | new_node->right = NULL; | ||
414 | |||
415 | handle->root = new_node; | ||
416 | handle->next = new_node; | ||
417 | handle->last = new_node; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | |||
422 | |||
423 | /** | ||
424 | * Removes the root node from the heap. The node is removed after coalescing | ||
425 | * the binheap_node with its original data pointer at the root of the tree. | ||
426 | * | ||
427 | * The 'last' node in the tree is then swapped up to the root and bubbled | ||
428 | * down. | ||
429 | */ | ||
430 | static inline void __binheap_delete_root(struct binheap_handle *handle, | ||
431 | struct binheap_node *container) | ||
432 | { | ||
433 | struct binheap_node *root = handle->root; | ||
434 | |||
435 | if(root != container) { | ||
436 | /* coalesce */ | ||
437 | __binheap_swap_safe(handle, root, container); | ||
438 | root = container; | ||
439 | } | ||
440 | |||
441 | if(handle->last != root) { | ||
442 | /* swap 'last' node up to root and bubble it down. */ | ||
443 | |||
444 | struct binheap_node *to_move = handle->last; | ||
445 | |||
446 | if(to_move->parent != root) { | ||
447 | handle->next = to_move->parent; | ||
448 | |||
449 | if(handle->next->right == to_move) { | ||
450 | /* disconnect from parent */ | ||
451 | to_move->parent->right = NULL; | ||
452 | handle->last = handle->next->left; | ||
453 | } | ||
454 | else { | ||
455 | /* find new 'last' before we disconnect */ | ||
456 | __binheap_update_last(handle); | ||
457 | |||
458 | /* disconnect from parent */ | ||
459 | to_move->parent->left = NULL; | ||
460 | } | ||
461 | } | ||
462 | else { | ||
463 | /* 'last' is direct child of root */ | ||
464 | |||
465 | handle->next = to_move; | ||
466 | |||
467 | if(to_move == to_move->parent->right) { | ||
468 | to_move->parent->right = NULL; | ||
469 | handle->last = to_move->parent->left; | ||
470 | } | ||
471 | else { | ||
472 | to_move->parent->left = NULL; | ||
473 | handle->last = to_move; | ||
474 | } | ||
475 | } | ||
476 | to_move->parent = NULL; | ||
477 | |||
478 | /* reconnect as root. We can't just swap data ptrs since root node | ||
479 | * may be freed after this function returns. | ||
480 | */ | ||
481 | to_move->left = root->left; | ||
482 | to_move->right = root->right; | ||
483 | if(to_move->left != NULL) { | ||
484 | to_move->left->parent = to_move; | ||
485 | } | ||
486 | if(to_move->right != NULL) { | ||
487 | to_move->right->parent = to_move; | ||
488 | } | ||
489 | |||
490 | handle->root = to_move; | ||
491 | |||
492 | /* bubble down */ | ||
493 | __binheap_bubble_down(handle); | ||
494 | } | ||
495 | else { | ||
496 | /* removing last node in tree */ | ||
497 | handle->root = NULL; | ||
498 | handle->next = NULL; | ||
499 | handle->last = NULL; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | |||
504 | /** | ||
505 | * Delete an arbitrary node. Bubble node to delete up to the root, | ||
506 | * and then delete to root. | ||
507 | */ | ||
508 | static inline void __binheap_delete( | ||
509 | struct binheap_node *node_to_delete, | ||
510 | struct binheap_handle *handle) | ||
511 | { | ||
512 | struct binheap_node *target = node_to_delete->ref; | ||
513 | void *temp_data = target->data; | ||
514 | |||
515 | /* temporarily set data to null to allow node to bubble up to the top. */ | ||
516 | target->data = NULL; | ||
517 | |||
518 | __binheap_bubble_up(handle, target); | ||
519 | __binheap_delete_root(handle, node_to_delete); | ||
520 | |||
521 | node_to_delete->data = temp_data; /* restore node data pointer */ | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * Bubble up a node whose pointer has decreased in value. | ||
526 | */ | ||
527 | static inline void __binheap_decrease( | ||
528 | struct binheap_node *orig_node, | ||
529 | struct binheap_handle *handle) | ||
530 | { | ||
531 | struct binheap_node *target = orig_node->ref; | ||
532 | __binheap_bubble_up(handle, target); | ||
533 | } | ||
534 | |||
535 | #endif | ||
536 | |||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 480c62bc895b..49653f1ea49d 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <litmus/clustered.h> | 42 | #include <litmus/clustered.h> |
43 | 43 | ||
44 | #include <litmus/bheap.h> | 44 | #include <litmus/bheap.h> |
45 | #include <litmus/binheap.h> | ||
45 | 46 | ||
46 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 47 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
47 | #include <litmus/affinity.h> | 48 | #include <litmus/affinity.h> |
@@ -70,7 +71,9 @@ typedef struct { | |||
70 | struct task_struct* linked; /* only RT tasks */ | 71 | struct task_struct* linked; /* only RT tasks */ |
71 | struct task_struct* scheduled; /* only RT tasks */ | 72 | struct task_struct* scheduled; /* only RT tasks */ |
72 | atomic_t will_schedule; /* prevent unneeded IPIs */ | 73 | atomic_t will_schedule; /* prevent unneeded IPIs */ |
73 | struct bheap_node* hn; | 74 | |
75 | int in_heap; /* == 0/1: not in / in heap*/ | ||
76 | struct binheap_node hn; | ||
74 | } cpu_entry_t; | 77 | } cpu_entry_t; |
75 | 78 | ||
76 | /* one cpu_entry_t per CPU */ | 79 | /* one cpu_entry_t per CPU */ |
@@ -96,8 +99,7 @@ typedef struct clusterdomain { | |||
96 | /* map of this cluster cpus */ | 99 | /* map of this cluster cpus */ |
97 | cpumask_var_t cpu_map; | 100 | cpumask_var_t cpu_map; |
98 | /* the cpus queue themselves according to priority in here */ | 101 | /* the cpus queue themselves according to priority in here */ |
99 | struct bheap_node *heap_node; | 102 | struct binheap_handle cpu_heap; |
100 | struct bheap cpu_heap; | ||
101 | /* lock for this cluster */ | 103 | /* lock for this cluster */ |
102 | #define cluster_lock domain.ready_lock | 104 | #define cluster_lock domain.ready_lock |
103 | } cedf_domain_t; | 105 | } cedf_domain_t; |
@@ -115,11 +117,11 @@ cedf_domain_t *cedf; | |||
115 | */ | 117 | */ |
116 | #define VERBOSE_INIT | 118 | #define VERBOSE_INIT |
117 | 119 | ||
118 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 120 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) |
119 | { | 121 | { |
120 | cpu_entry_t *a, *b; | 122 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
121 | a = _a->value; | 123 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
122 | b = _b->value; | 124 | |
123 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 125 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
124 | * the top of the heap. | 126 | * the top of the heap. |
125 | */ | 127 | */ |
@@ -133,20 +135,16 @@ static void update_cpu_position(cpu_entry_t *entry) | |||
133 | { | 135 | { |
134 | cedf_domain_t *cluster = entry->cluster; | 136 | cedf_domain_t *cluster = entry->cluster; |
135 | 137 | ||
136 | if (likely(bheap_node_in_heap(entry->hn))) | 138 | if (likely(entry->in_heap)) |
137 | bheap_delete(cpu_lower_prio, | 139 | binheap_delete(&entry->hn, &cluster->cpu_heap); |
138 | &cluster->cpu_heap, | 140 | binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn); |
139 | entry->hn); | 141 | entry->in_heap = 1; |
140 | |||
141 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); | ||
142 | } | 142 | } |
143 | 143 | ||
144 | /* caller must hold cedf lock */ | 144 | /* caller must hold cedf lock */ |
145 | static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) | 145 | static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) |
146 | { | 146 | { |
147 | struct bheap_node* hn; | 147 | return binheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn); |
148 | hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); | ||
149 | return hn->value; | ||
150 | } | 148 | } |
151 | 149 | ||
152 | 150 | ||
@@ -689,7 +687,6 @@ static void cleanup_cedf(void) | |||
689 | if (clusters_allocated) { | 687 | if (clusters_allocated) { |
690 | for (i = 0; i < num_clusters; i++) { | 688 | for (i = 0; i < num_clusters; i++) { |
691 | kfree(cedf[i].cpus); | 689 | kfree(cedf[i].cpus); |
692 | kfree(cedf[i].heap_node); | ||
693 | free_cpumask_var(cedf[i].cpu_map); | 690 | free_cpumask_var(cedf[i].cpu_map); |
694 | } | 691 | } |
695 | 692 | ||
@@ -749,10 +746,7 @@ static long cedf_activate_plugin(void) | |||
749 | 746 | ||
750 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), | 747 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), |
751 | GFP_ATOMIC); | 748 | GFP_ATOMIC); |
752 | cedf[i].heap_node = kmalloc( | 749 | INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio); |
753 | cluster_size * sizeof(struct bheap_node), | ||
754 | GFP_ATOMIC); | ||
755 | bheap_init(&(cedf[i].cpu_heap)); | ||
756 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | 750 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); |
757 | 751 | ||
758 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | 752 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) |
@@ -795,8 +789,9 @@ static long cedf_activate_plugin(void) | |||
795 | atomic_set(&entry->will_schedule, 0); | 789 | atomic_set(&entry->will_schedule, 0); |
796 | entry->cpu = ccpu; | 790 | entry->cpu = ccpu; |
797 | entry->cluster = &cedf[i]; | 791 | entry->cluster = &cedf[i]; |
798 | entry->hn = &(cedf[i].heap_node[cpu_count]); | 792 | |
799 | bheap_node_init(&entry->hn, entry); | 793 | INIT_BINHEAP_NODE(&entry->hn); |
794 | entry->in_heap = 0; | ||
800 | 795 | ||
801 | cpu_count++; | 796 | cpu_count++; |
802 | 797 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 91c38bbf695c..6edcc339ea5e 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <litmus/preempt.h> | 23 | #include <litmus/preempt.h> |
24 | 24 | ||
25 | #include <litmus/bheap.h> | 25 | #include <litmus/bheap.h> |
26 | #include <litmus/binheap.h> | ||
26 | 27 | ||
27 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 28 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
28 | #include <litmus/affinity.h> | 29 | #include <litmus/affinity.h> |
@@ -103,15 +104,15 @@ typedef struct { | |||
103 | int cpu; | 104 | int cpu; |
104 | struct task_struct* linked; /* only RT tasks */ | 105 | struct task_struct* linked; /* only RT tasks */ |
105 | struct task_struct* scheduled; /* only RT tasks */ | 106 | struct task_struct* scheduled; /* only RT tasks */ |
106 | struct bheap_node* hn; | 107 | int in_heap; /* == 0/1: not in / in heap*/ |
108 | struct binheap_node hn; | ||
107 | } cpu_entry_t; | 109 | } cpu_entry_t; |
108 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 110 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
109 | 111 | ||
110 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 112 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; |
111 | 113 | ||
112 | /* the cpus queue themselves according to priority in here */ | 114 | /* the cpus queue themselves according to priority in here */ |
113 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | 115 | static struct binheap_handle gsnedf_cpu_heap; |
114 | static struct bheap gsnedf_cpu_heap; | ||
115 | 116 | ||
116 | static rt_domain_t gsnedf; | 117 | static rt_domain_t gsnedf; |
117 | #define gsnedf_lock (gsnedf.ready_lock) | 118 | #define gsnedf_lock (gsnedf.ready_lock) |
@@ -122,33 +123,33 @@ static rt_domain_t gsnedf; | |||
122 | #define WANT_ALL_SCHED_EVENTS | 123 | #define WANT_ALL_SCHED_EVENTS |
123 | */ | 124 | */ |
124 | 125 | ||
125 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 126 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) |
126 | { | 127 | { |
127 | cpu_entry_t *a, *b; | 128 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
128 | a = _a->value; | 129 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
129 | b = _b->value; | 130 | |
130 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 131 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
131 | * the top of the heap. | 132 | * the top of the heap. |
132 | */ | 133 | */ |
133 | return edf_higher_prio(b->linked, a->linked); | 134 | return edf_higher_prio(b->linked, a->linked); |
134 | } | 135 | } |
135 | 136 | ||
137 | |||
136 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 138 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
137 | * order in the cpu queue. Caller must hold gsnedf lock. | 139 | * order in the cpu queue. Caller must hold gsnedf lock. |
138 | */ | 140 | */ |
139 | static void update_cpu_position(cpu_entry_t *entry) | 141 | static void update_cpu_position(cpu_entry_t *entry) |
140 | { | 142 | { |
141 | if (likely(bheap_node_in_heap(entry->hn))) | 143 | if (likely(entry->in_heap)) |
142 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | 144 | binheap_delete(&entry->hn, &gsnedf_cpu_heap); |
143 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | 145 | binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn); |
146 | entry->in_heap = 1; | ||
144 | } | 147 | } |
145 | 148 | ||
146 | /* caller must hold gsnedf lock */ | 149 | /* caller must hold gsnedf lock */ |
147 | static cpu_entry_t* lowest_prio_cpu(void) | 150 | static cpu_entry_t* lowest_prio_cpu(void) |
148 | { | 151 | { |
149 | struct bheap_node* hn; | 152 | return binheap_top_entry(&gsnedf_cpu_heap, cpu_entry_t, hn); |
150 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
151 | return hn->value; | ||
152 | } | 153 | } |
153 | 154 | ||
154 | 155 | ||
@@ -665,10 +666,9 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
665 | * We can't use heap_decrease() here since | 666 | * We can't use heap_decrease() here since |
666 | * the cpu_heap is ordered in reverse direction, so | 667 | * the cpu_heap is ordered in reverse direction, so |
667 | * it is actually an increase. */ | 668 | * it is actually an increase. */ |
668 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 669 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); |
669 | gsnedf_cpus[linked_on]->hn); | 670 | binheap_add(&gsnedf_cpus[linked_on]->hn, |
670 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 671 | &gsnedf_cpu_heap, cpu_entry_t, hn); |
671 | gsnedf_cpus[linked_on]->hn); | ||
672 | } else { | 672 | } else { |
673 | /* holder may be queued: first stop queue changes */ | 673 | /* holder may be queued: first stop queue changes */ |
674 | raw_spin_lock(&gsnedf.release_lock); | 674 | raw_spin_lock(&gsnedf.release_lock); |
@@ -1329,14 +1329,15 @@ static long gsnedf_activate_plugin(void) | |||
1329 | int cpu; | 1329 | int cpu; |
1330 | cpu_entry_t *entry; | 1330 | cpu_entry_t *entry; |
1331 | 1331 | ||
1332 | bheap_init(&gsnedf_cpu_heap); | 1332 | INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); |
1333 | #ifdef CONFIG_RELEASE_MASTER | 1333 | #ifdef CONFIG_RELEASE_MASTER |
1334 | gsnedf.release_master = atomic_read(&release_master_cpu); | 1334 | gsnedf.release_master = atomic_read(&release_master_cpu); |
1335 | #endif | 1335 | #endif |
1336 | 1336 | ||
1337 | for_each_online_cpu(cpu) { | 1337 | for_each_online_cpu(cpu) { |
1338 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 1338 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
1339 | bheap_node_init(&entry->hn, entry); | 1339 | INIT_BINHEAP_NODE(&entry->hn); |
1340 | entry->in_heap = 0; | ||
1340 | entry->linked = NULL; | 1341 | entry->linked = NULL; |
1341 | entry->scheduled = NULL; | 1342 | entry->scheduled = NULL; |
1342 | #ifdef CONFIG_RELEASE_MASTER | 1343 | #ifdef CONFIG_RELEASE_MASTER |
@@ -1377,14 +1378,15 @@ static int __init init_gsn_edf(void) | |||
1377 | int cpu; | 1378 | int cpu; |
1378 | cpu_entry_t *entry; | 1379 | cpu_entry_t *entry; |
1379 | 1380 | ||
1380 | bheap_init(&gsnedf_cpu_heap); | 1381 | INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); |
1381 | /* initialize CPU state */ | 1382 | /* initialize CPU state */ |
1382 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1383 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1383 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 1384 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
1384 | gsnedf_cpus[cpu] = entry; | 1385 | gsnedf_cpus[cpu] = entry; |
1385 | entry->cpu = cpu; | 1386 | entry->cpu = cpu; |
1386 | entry->hn = &gsnedf_heap_node[cpu]; | 1387 | |
1387 | bheap_node_init(&entry->hn, entry); | 1388 | INIT_BINHEAP_NODE(&entry->hn); |
1389 | entry->in_heap = 0; | ||
1388 | } | 1390 | } |
1389 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 1391 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
1390 | return register_sched_plugin(&gsn_edf_plugin); | 1392 | return register_sched_plugin(&gsn_edf_plugin); |