aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/bheap.h77
-rw-r--r--include/litmus/edf_common.h2
-rw-r--r--include/litmus/heap.h77
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/rt_domain.h36
-rw-r--r--include/litmus/rt_param.h4
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/bheap.c (renamed from litmus/heap.c)128
-rw-r--r--litmus/edf_common.c4
-rw-r--r--litmus/litmus.c26
-rw-r--r--litmus/rt_domain.c22
-rw-r--r--litmus/sched_gsn_edf.c38
12 files changed, 209 insertions, 209 deletions
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
new file mode 100644
index 000000000000..cf4864a498d8
--- /dev/null
+++ b/include/litmus/bheap.h
@@ -0,0 +1,77 @@
1/* bheaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef BHEAP_H
7#define BHEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct bheap_node {
12 struct bheap_node* parent;
13 struct bheap_node* next;
14 struct bheap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct bheap_node** ref;
19};
20
21struct bheap {
22 struct bheap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct bheap_node* min;
27};
28
29typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b);
30
31void bheap_init(struct bheap* heap);
32void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value);
33
34static inline int bheap_node_in_heap(struct bheap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int bheap_empty(struct bheap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void bheap_insert(bheap_prio_t higher_prio,
46 struct bheap* heap,
47 struct bheap_node* node);
48
49/* merge addition into target */
50void bheap_union(bheap_prio_t higher_prio,
51 struct bheap* target,
52 struct bheap* addition);
53
54struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
55 struct bheap* heap);
56
57struct bheap_node* bheap_take(bheap_prio_t higher_prio,
58 struct bheap* heap);
59
60void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap);
61int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node);
62
63void bheap_delete(bheap_prio_t higher_prio,
64 struct bheap* heap,
65 struct bheap_node* node);
66
67/* allocate from memcache */
68struct bheap_node* bheap_node_alloc(int gfp_flags);
69void bheap_node_free(struct bheap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int bheap_add(bheap_prio_t higher_prio, struct bheap* heap,
73 void* value, int gfp_flags);
74
75void* bheap_take_del(bheap_prio_t higher_prio,
76 struct bheap* heap);
77#endif
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
index 166cdac6bcab..80d4321cc87e 100644
--- a/include/litmus/edf_common.h
+++ b/include/litmus/edf_common.h
@@ -18,7 +18,7 @@ void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
18int edf_higher_prio(struct task_struct* first, 18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second); 19 struct task_struct* second);
20 20
21int edf_ready_order(struct heap_node* a, struct heap_node* b); 21int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
22 22
23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); 23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24 24
diff --git a/include/litmus/heap.h b/include/litmus/heap.h
deleted file mode 100644
index da959b0bec9c..000000000000
--- a/include/litmus/heap.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/* heaps.h -- Binomial Heaps
2 *
3 * (c) 2008, 2009 Bjoern Brandenburg
4 */
5
6#ifndef HEAP_H
7#define HEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct heap_node {
12 struct heap_node* parent;
13 struct heap_node* next;
14 struct heap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct heap_node** ref;
19};
20
21struct heap {
22 struct heap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct heap_node* min;
27};
28
29typedef int (*heap_prio_t)(struct heap_node* a, struct heap_node* b);
30
31void heap_init(struct heap* heap);
32void heap_node_init(struct heap_node** ref_to_heap_node_ptr, void* value);
33
34static inline int heap_node_in_heap(struct heap_node* h)
35{
36 return h->degree != NOT_IN_HEAP;
37}
38
39static inline int heap_empty(struct heap* heap)
40{
41 return heap->head == NULL && heap->min == NULL;
42}
43
44/* insert (and reinitialize) a node into the heap */
45void heap_insert(heap_prio_t higher_prio,
46 struct heap* heap,
47 struct heap_node* node);
48
49/* merge addition into target */
50void heap_union(heap_prio_t higher_prio,
51 struct heap* target,
52 struct heap* addition);
53
54struct heap_node* heap_peek(heap_prio_t higher_prio,
55 struct heap* heap);
56
57struct heap_node* heap_take(heap_prio_t higher_prio,
58 struct heap* heap);
59
60void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap);
61int heap_decrease(heap_prio_t higher_prio, struct heap_node* node);
62
63void heap_delete(heap_prio_t higher_prio,
64 struct heap* heap,
65 struct heap_node* node);
66
67/* allocate from memcache */
68struct heap_node* heap_node_alloc(int gfp_flags);
69void heap_node_free(struct heap_node* hn);
70
71/* allocate a heap node for value and insert into the heap */
72int heap_add(heap_prio_t higher_prio, struct heap* heap,
73 void* value, int gfp_flags);
74
75void* heap_take_del(heap_prio_t higher_prio,
76 struct heap* heap);
77#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 380fcb8acb33..a03580bc707c 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -133,7 +133,7 @@ void srp_ceiling_block(void);
133#define srp_ceiling_block() /* nothing */ 133#define srp_ceiling_block() /* nothing */
134#endif 134#endif
135 135
136#define heap2task(hn) ((struct task_struct*) hn->value) 136#define bheap2task(hn) ((struct task_struct*) hn->value)
137 137
138static inline int is_np(struct task_struct *t) 138static inline int is_np(struct task_struct *t)
139{ 139{
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index c7c55bef3e42..c780fdfcccae 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -5,14 +5,14 @@
5#ifndef __UNC_RT_DOMAIN_H__ 5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__ 6#define __UNC_RT_DOMAIN_H__
7 7
8#include <litmus/heap.h> 8#include <litmus/bheap.h>
9 9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */ 10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11 11
12struct _rt_domain; 12struct _rt_domain;
13 13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt); 14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks); 15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16 16
17struct release_queue { 17struct release_queue {
18 /* each slot maintains a list of release heaps sorted 18 /* each slot maintains a list of release heaps sorted
@@ -23,7 +23,7 @@ struct release_queue {
23typedef struct _rt_domain { 23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */ 24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock; 25 spinlock_t ready_lock;
26 struct heap ready_queue; 26 struct bheap ready_queue;
27 27
28 /* real-time tasks waiting for release are in here */ 28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock; 29 spinlock_t release_lock;
@@ -41,7 +41,7 @@ typedef struct _rt_domain {
41 release_jobs_t release_jobs; 41 release_jobs_t release_jobs;
42 42
43 /* how are tasks ordered in the ready queue? */ 43 /* how are tasks ordered in the ready queue? */
44 heap_prio_t order; 44 bheap_prio_t order;
45} rt_domain_t; 45} rt_domain_t;
46 46
47struct release_heap { 47struct release_heap {
@@ -49,7 +49,7 @@ struct release_heap {
49 struct list_head list; 49 struct list_head list;
50 lt_t release_time; 50 lt_t release_time;
51 /* all tasks to be released at release_time */ 51 /* all tasks to be released at release_time */
52 struct heap heap; 52 struct bheap heap;
53 /* used to trigger the release */ 53 /* used to trigger the release */
54 struct hrtimer timer; 54 struct hrtimer timer;
55 /* used to delegate releases */ 55 /* used to delegate releases */
@@ -61,47 +61,47 @@ struct release_heap {
61 61
62static inline struct task_struct* __next_ready(rt_domain_t* rt) 62static inline struct task_struct* __next_ready(rt_domain_t* rt)
63{ 63{
64 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); 64 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
65 if (hn) 65 if (hn)
66 return heap2task(hn); 66 return bheap2task(hn);
67 else 67 else
68 return NULL; 68 return NULL;
69} 69}
70 70
71void rt_domain_init(rt_domain_t *rt, heap_prio_t order, 71void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
72 check_resched_needed_t check, 72 check_resched_needed_t check,
73 release_jobs_t relase); 73 release_jobs_t relase);
74 74
75void __add_ready(rt_domain_t* rt, struct task_struct *new); 75void __add_ready(rt_domain_t* rt, struct task_struct *new);
76void __merge_ready(rt_domain_t* rt, struct heap *tasks); 76void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
77void __add_release(rt_domain_t* rt, struct task_struct *task); 77void __add_release(rt_domain_t* rt, struct task_struct *task);
78 78
79static inline struct task_struct* __take_ready(rt_domain_t* rt) 79static inline struct task_struct* __take_ready(rt_domain_t* rt)
80{ 80{
81 struct heap_node* hn = heap_take(rt->order, &rt->ready_queue); 81 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
82 if (hn) 82 if (hn)
83 return heap2task(hn); 83 return bheap2task(hn);
84 else 84 else
85 return NULL; 85 return NULL;
86} 86}
87 87
88static inline struct task_struct* __peek_ready(rt_domain_t* rt) 88static inline struct task_struct* __peek_ready(rt_domain_t* rt)
89{ 89{
90 struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue); 90 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
91 if (hn) 91 if (hn)
92 return heap2task(hn); 92 return bheap2task(hn);
93 else 93 else
94 return NULL; 94 return NULL;
95} 95}
96 96
97static inline int is_queued(struct task_struct *t) 97static inline int is_queued(struct task_struct *t)
98{ 98{
99 return heap_node_in_heap(tsk_rt(t)->heap_node); 99 return bheap_node_in_heap(tsk_rt(t)->heap_node);
100} 100}
101 101
102static inline void remove(rt_domain_t* rt, struct task_struct *t) 102static inline void remove(rt_domain_t* rt, struct task_struct *t)
103{ 103{
104 heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); 104 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
105} 105}
106 106
107static inline void add_ready(rt_domain_t* rt, struct task_struct *new) 107static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
@@ -113,7 +113,7 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
113 spin_unlock_irqrestore(&rt->ready_lock, flags); 113 spin_unlock_irqrestore(&rt->ready_lock, flags);
114} 114}
115 115
116static inline void merge_ready(rt_domain_t* rt, struct heap* tasks) 116static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
117{ 117{
118 unsigned long flags; 118 unsigned long flags;
119 spin_lock_irqsave(&rt->ready_lock, flags); 119 spin_lock_irqsave(&rt->ready_lock, flags);
@@ -144,7 +144,7 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
144 144
145static inline int __jobs_pending(rt_domain_t* rt) 145static inline int __jobs_pending(rt_domain_t* rt)
146{ 146{
147 return !heap_empty(&rt->ready_queue); 147 return !bheap_empty(&rt->ready_queue);
148} 148}
149 149
150static inline int jobs_pending(rt_domain_t* rt) 150static inline int jobs_pending(rt_domain_t* rt)
@@ -153,7 +153,7 @@ static inline int jobs_pending(rt_domain_t* rt)
153 int ret; 153 int ret;
154 /* first we need the write lock for rt_ready_queue */ 154 /* first we need the write lock for rt_ready_queue */
155 spin_lock_irqsave(&rt->ready_lock, flags); 155 spin_lock_irqsave(&rt->ready_lock, flags);
156 ret = !heap_empty(&rt->ready_queue); 156 ret = !bheap_empty(&rt->ready_queue);
157 spin_unlock_irqrestore(&rt->ready_lock, flags); 157 spin_unlock_irqrestore(&rt->ready_lock, flags);
158 return ret; 158 return ret;
159} 159}
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index c599f848d1ed..e20427846273 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -39,7 +39,7 @@ struct rt_task {
39#ifdef __KERNEL__ 39#ifdef __KERNEL__
40 40
41struct _rt_domain; 41struct _rt_domain;
42struct heap_node; 42struct bheap_node;
43struct release_heap; 43struct release_heap;
44 44
45struct rt_job { 45struct rt_job {
@@ -157,7 +157,7 @@ struct rt_param {
157 * other than this pointer (which is updated by the heap 157 * other than this pointer (which is updated by the heap
158 * implementation). 158 * implementation).
159 */ 159 */
160 struct heap_node* heap_node; 160 struct bheap_node* heap_node;
161 struct release_heap* rel_heap; 161 struct release_heap* rel_heap;
162 162
163 /* Used by rt_domain to queue task in release list. 163 /* Used by rt_domain to queue task in release list.
diff --git a/litmus/Makefile b/litmus/Makefile
index b15e65ee7f37..70c9684c3b98 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -10,7 +10,7 @@ obj-y = sched_plugin.o litmus.o \
10 fdso.o \ 10 fdso.o \
11 srp.o \ 11 srp.o \
12 fmlp.o \ 12 fmlp.o \
13 heap.o \ 13 bheap.o \
14 sched_gsn_edf.o 14 sched_gsn_edf.o
15 15
16obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 16obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
diff --git a/litmus/heap.c b/litmus/bheap.c
index 112d14da46c3..528af97f18a6 100644
--- a/litmus/heap.c
+++ b/litmus/bheap.c
@@ -1,15 +1,15 @@
1#include "linux/kernel.h" 1#include "linux/kernel.h"
2#include "litmus/heap.h" 2#include "litmus/bheap.h"
3 3
4void heap_init(struct heap* heap) 4void bheap_init(struct bheap* heap)
5{ 5{
6 heap->head = NULL; 6 heap->head = NULL;
7 heap->min = NULL; 7 heap->min = NULL;
8} 8}
9 9
10void heap_node_init(struct heap_node** _h, void* value) 10void bheap_node_init(struct bheap_node** _h, void* value)
11{ 11{
12 struct heap_node* h = *_h; 12 struct bheap_node* h = *_h;
13 h->parent = NULL; 13 h->parent = NULL;
14 h->next = NULL; 14 h->next = NULL;
15 h->child = NULL; 15 h->child = NULL;
@@ -20,8 +20,8 @@ void heap_node_init(struct heap_node** _h, void* value)
20 20
21 21
22/* make child a subtree of root */ 22/* make child a subtree of root */
23static void __heap_link(struct heap_node* root, 23static void __bheap_link(struct bheap_node* root,
24 struct heap_node* child) 24 struct bheap_node* child)
25{ 25{
26 child->parent = root; 26 child->parent = root;
27 child->next = root->child; 27 child->next = root->child;
@@ -30,11 +30,11 @@ static void __heap_link(struct heap_node* root,
30} 30}
31 31
32/* merge root lists */ 32/* merge root lists */
33static struct heap_node* __heap_merge(struct heap_node* a, 33static struct bheap_node* __bheap_merge(struct bheap_node* a,
34 struct heap_node* b) 34 struct bheap_node* b)
35{ 35{
36 struct heap_node* head = NULL; 36 struct bheap_node* head = NULL;
37 struct heap_node** pos = &head; 37 struct bheap_node** pos = &head;
38 38
39 while (a && b) { 39 while (a && b) {
40 if (a->degree < b->degree) { 40 if (a->degree < b->degree) {
@@ -54,10 +54,10 @@ static struct heap_node* __heap_merge(struct heap_node* a,
54} 54}
55 55
56/* reverse a linked list of nodes. also clears parent pointer */ 56/* reverse a linked list of nodes. also clears parent pointer */
57static struct heap_node* __heap_reverse(struct heap_node* h) 57static struct bheap_node* __bheap_reverse(struct bheap_node* h)
58{ 58{
59 struct heap_node* tail = NULL; 59 struct bheap_node* tail = NULL;
60 struct heap_node* next; 60 struct bheap_node* next;
61 61
62 if (!h) 62 if (!h)
63 return h; 63 return h;
@@ -74,10 +74,10 @@ static struct heap_node* __heap_reverse(struct heap_node* h)
74 return h; 74 return h;
75} 75}
76 76
77static void __heap_min(heap_prio_t higher_prio, struct heap* heap, 77static void __bheap_min(bheap_prio_t higher_prio, struct bheap* heap,
78 struct heap_node** prev, struct heap_node** node) 78 struct bheap_node** prev, struct bheap_node** node)
79{ 79{
80 struct heap_node *_prev, *cur; 80 struct bheap_node *_prev, *cur;
81 *prev = NULL; 81 *prev = NULL;
82 82
83 if (!heap->head) { 83 if (!heap->head) {
@@ -98,11 +98,11 @@ static void __heap_min(heap_prio_t higher_prio, struct heap* heap,
98 } 98 }
99} 99}
100 100
101static void __heap_union(heap_prio_t higher_prio, struct heap* heap, 101static void __bheap_union(bheap_prio_t higher_prio, struct bheap* heap,
102 struct heap_node* h2) 102 struct bheap_node* h2)
103{ 103{
104 struct heap_node* h1; 104 struct bheap_node* h1;
105 struct heap_node *prev, *x, *next; 105 struct bheap_node *prev, *x, *next;
106 if (!h2) 106 if (!h2)
107 return; 107 return;
108 h1 = heap->head; 108 h1 = heap->head;
@@ -110,7 +110,7 @@ static void __heap_union(heap_prio_t higher_prio, struct heap* heap,
110 heap->head = h2; 110 heap->head = h2;
111 return; 111 return;
112 } 112 }
113 h1 = __heap_merge(h1, h2); 113 h1 = __bheap_merge(h1, h2);
114 prev = NULL; 114 prev = NULL;
115 x = h1; 115 x = h1;
116 next = x->next; 116 next = x->next;
@@ -123,14 +123,14 @@ static void __heap_union(heap_prio_t higher_prio, struct heap* heap,
123 } else if (higher_prio(x, next)) { 123 } else if (higher_prio(x, next)) {
124 /* x becomes the root of next */ 124 /* x becomes the root of next */
125 x->next = next->next; 125 x->next = next->next;
126 __heap_link(x, next); 126 __bheap_link(x, next);
127 } else { 127 } else {
128 /* next becomes the root of x */ 128 /* next becomes the root of x */
129 if (prev) 129 if (prev)
130 prev->next = next; 130 prev->next = next;
131 else 131 else
132 h1 = next; 132 h1 = next;
133 __heap_link(next, x); 133 __bheap_link(next, x);
134 x = next; 134 x = next;
135 } 135 }
136 next = x->next; 136 next = x->next;
@@ -138,26 +138,26 @@ static void __heap_union(heap_prio_t higher_prio, struct heap* heap,
138 heap->head = h1; 138 heap->head = h1;
139} 139}
140 140
141static struct heap_node* __heap_extract_min(heap_prio_t higher_prio, 141static struct bheap_node* __bheap_extract_min(bheap_prio_t higher_prio,
142 struct heap* heap) 142 struct bheap* heap)
143{ 143{
144 struct heap_node *prev, *node; 144 struct bheap_node *prev, *node;
145 __heap_min(higher_prio, heap, &prev, &node); 145 __bheap_min(higher_prio, heap, &prev, &node);
146 if (!node) 146 if (!node)
147 return NULL; 147 return NULL;
148 if (prev) 148 if (prev)
149 prev->next = node->next; 149 prev->next = node->next;
150 else 150 else
151 heap->head = node->next; 151 heap->head = node->next;
152 __heap_union(higher_prio, heap, __heap_reverse(node->child)); 152 __bheap_union(higher_prio, heap, __bheap_reverse(node->child));
153 return node; 153 return node;
154} 154}
155 155
156/* insert (and reinitialize) a node into the heap */ 156/* insert (and reinitialize) a node into the heap */
157void heap_insert(heap_prio_t higher_prio, struct heap* heap, 157void bheap_insert(bheap_prio_t higher_prio, struct bheap* heap,
158 struct heap_node* node) 158 struct bheap_node* node)
159{ 159{
160 struct heap_node *min; 160 struct bheap_node *min;
161 node->child = NULL; 161 node->child = NULL;
162 node->parent = NULL; 162 node->parent = NULL;
163 node->next = NULL; 163 node->next = NULL;
@@ -169,48 +169,48 @@ void heap_insert(heap_prio_t higher_prio, struct heap* heap,
169 min->parent = NULL; 169 min->parent = NULL;
170 min->next = NULL; 170 min->next = NULL;
171 min->degree = 0; 171 min->degree = 0;
172 __heap_union(higher_prio, heap, min); 172 __bheap_union(higher_prio, heap, min);
173 heap->min = node; 173 heap->min = node;
174 } else 174 } else
175 __heap_union(higher_prio, heap, node); 175 __bheap_union(higher_prio, heap, node);
176} 176}
177 177
178void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap) 178void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap)
179{ 179{
180 struct heap_node* min; 180 struct bheap_node* min;
181 if (heap->min) { 181 if (heap->min) {
182 min = heap->min; 182 min = heap->min;
183 heap->min = NULL; 183 heap->min = NULL;
184 heap_insert(higher_prio, heap, min); 184 bheap_insert(higher_prio, heap, min);
185 } 185 }
186} 186}
187 187
188/* merge addition into target */ 188/* merge addition into target */
189void heap_union(heap_prio_t higher_prio, 189void bheap_union(bheap_prio_t higher_prio,
190 struct heap* target, struct heap* addition) 190 struct bheap* target, struct bheap* addition)
191{ 191{
192 /* first insert any cached minima, if necessary */ 192 /* first insert any cached minima, if necessary */
193 heap_uncache_min(higher_prio, target); 193 bheap_uncache_min(higher_prio, target);
194 heap_uncache_min(higher_prio, addition); 194 bheap_uncache_min(higher_prio, addition);
195 __heap_union(higher_prio, target, addition->head); 195 __bheap_union(higher_prio, target, addition->head);
196 /* this is a destructive merge */ 196 /* this is a destructive merge */
197 addition->head = NULL; 197 addition->head = NULL;
198} 198}
199 199
200struct heap_node* heap_peek(heap_prio_t higher_prio, 200struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
201 struct heap* heap) 201 struct bheap* heap)
202{ 202{
203 if (!heap->min) 203 if (!heap->min)
204 heap->min = __heap_extract_min(higher_prio, heap); 204 heap->min = __bheap_extract_min(higher_prio, heap);
205 return heap->min; 205 return heap->min;
206} 206}
207 207
208struct heap_node* heap_take(heap_prio_t higher_prio, 208struct bheap_node* bheap_take(bheap_prio_t higher_prio,
209 struct heap* heap) 209 struct bheap* heap)
210{ 210{
211 struct heap_node *node; 211 struct bheap_node *node;
212 if (!heap->min) 212 if (!heap->min)
213 heap->min = __heap_extract_min(higher_prio, heap); 213 heap->min = __bheap_extract_min(higher_prio, heap);
214 node = heap->min; 214 node = heap->min;
215 heap->min = NULL; 215 heap->min = NULL;
216 if (node) 216 if (node)
@@ -218,10 +218,10 @@ struct heap_node* heap_take(heap_prio_t higher_prio,
218 return node; 218 return node;
219} 219}
220 220
221int heap_decrease(heap_prio_t higher_prio, struct heap_node* node) 221int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node)
222{ 222{
223 struct heap_node *parent; 223 struct bheap_node *parent;
224 struct heap_node** tmp_ref; 224 struct bheap_node** tmp_ref;
225 void* tmp; 225 void* tmp;
226 226
227 /* bubble up */ 227 /* bubble up */
@@ -245,11 +245,11 @@ int heap_decrease(heap_prio_t higher_prio, struct heap_node* node)
245 return parent != NULL; 245 return parent != NULL;
246} 246}
247 247
248void heap_delete(heap_prio_t higher_prio, struct heap* heap, 248void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap,
249 struct heap_node* node) 249 struct bheap_node* node)
250{ 250{
251 struct heap_node *parent, *prev, *pos; 251 struct bheap_node *parent, *prev, *pos;
252 struct heap_node** tmp_ref; 252 struct bheap_node** tmp_ref;
253 void* tmp; 253 void* tmp;
254 254
255 if (heap->min != node) { 255 if (heap->min != node) {
@@ -283,32 +283,32 @@ void heap_delete(heap_prio_t higher_prio, struct heap* heap,
283 prev->next = node->next; 283 prev->next = node->next;
284 else 284 else
285 heap->head = node->next; 285 heap->head = node->next;
286 __heap_union(higher_prio, heap, __heap_reverse(node->child)); 286 __bheap_union(higher_prio, heap, __bheap_reverse(node->child));
287 } else 287 } else
288 heap->min = NULL; 288 heap->min = NULL;
289 node->degree = NOT_IN_HEAP; 289 node->degree = NOT_IN_HEAP;
290} 290}
291 291
292/* allocate a heap node for value and insert into the heap */ 292/* allocate a heap node for value and insert into the heap */
293int heap_add(heap_prio_t higher_prio, struct heap* heap, 293int bheap_add(bheap_prio_t higher_prio, struct bheap* heap,
294 void* value, int gfp_flags) 294 void* value, int gfp_flags)
295{ 295{
296 struct heap_node* hn = heap_node_alloc(gfp_flags); 296 struct bheap_node* hn = bheap_node_alloc(gfp_flags);
297 if (likely(hn)) { 297 if (likely(hn)) {
298 heap_node_init(&hn, value); 298 bheap_node_init(&hn, value);
299 heap_insert(higher_prio, heap, hn); 299 bheap_insert(higher_prio, heap, hn);
300 } 300 }
301 return hn != NULL; 301 return hn != NULL;
302} 302}
303 303
304void* heap_take_del(heap_prio_t higher_prio, 304void* bheap_take_del(bheap_prio_t higher_prio,
305 struct heap* heap) 305 struct bheap* heap)
306{ 306{
307 struct heap_node* hn = heap_take(higher_prio, heap); 307 struct bheap_node* hn = bheap_take(higher_prio, heap);
308 void* ret = NULL; 308 void* ret = NULL;
309 if (hn) { 309 if (hn) {
310 ret = hn->value; 310 ret = hn->value;
311 heap_node_free(hn); 311 bheap_node_free(hn);
312 } 312 }
313 return ret; 313 return ret;
314} 314}
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 97e37761cedc..06daec66c984 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -68,9 +68,9 @@ int edf_higher_prio(struct task_struct* first,
68 !second->rt_param.inh_task)))); 68 !second->rt_param.inh_task))));
69} 69}
70 70
71int edf_ready_order(struct heap_node* a, struct heap_node* b) 71int edf_ready_order(struct bheap_node* a, struct bheap_node* b)
72{ 72{
73 return edf_higher_prio(heap2task(a), heap2task(b)); 73 return edf_higher_prio(bheap2task(a), bheap2task(b));
74} 74}
75 75
76void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, 76void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
diff --git a/litmus/litmus.c b/litmus/litmus.c
index de751a14d77c..2dea340aea1d 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -15,7 +15,7 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <litmus/sched_plugin.h> 16#include <litmus/sched_plugin.h>
17 17
18#include <litmus/heap.h> 18#include <litmus/bheap.h>
19 19
20#include <litmus/trace.h> 20#include <litmus/trace.h>
21 21
@@ -31,17 +31,17 @@ atomic_t __log_seq_no = ATOMIC_INIT(0);
31/* current master CPU for handling timer IRQs */ 31/* current master CPU for handling timer IRQs */
32atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); 32atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
33 33
34static struct kmem_cache * heap_node_cache; 34static struct kmem_cache * bheap_node_cache;
35extern struct kmem_cache * release_heap_cache; 35extern struct kmem_cache * release_heap_cache;
36 36
37struct heap_node* heap_node_alloc(int gfp_flags) 37struct bheap_node* bheap_node_alloc(int gfp_flags)
38{ 38{
39 return kmem_cache_alloc(heap_node_cache, gfp_flags); 39 return kmem_cache_alloc(bheap_node_cache, gfp_flags);
40} 40}
41 41
42void heap_node_free(struct heap_node* hn) 42void bheap_node_free(struct bheap_node* hn)
43{ 43{
44 kmem_cache_free(heap_node_cache, hn); 44 kmem_cache_free(bheap_node_cache, hn);
45} 45}
46 46
47struct release_heap* release_heap_alloc(int gfp_flags); 47struct release_heap* release_heap_alloc(int gfp_flags);
@@ -323,19 +323,19 @@ long litmus_admit_task(struct task_struct* tsk)
323 spin_lock_irqsave(&task_transition_lock, flags); 323 spin_lock_irqsave(&task_transition_lock, flags);
324 324
325 /* allocate heap node for this task */ 325 /* allocate heap node for this task */
326 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC); 326 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
327 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); 327 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
328 328
329 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { 329 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
330 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 330 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
331 331
332 heap_node_free(tsk_rt(tsk)->heap_node); 332 bheap_node_free(tsk_rt(tsk)->heap_node);
333 release_heap_free(tsk_rt(tsk)->rel_heap); 333 release_heap_free(tsk_rt(tsk)->rel_heap);
334 334
335 retval = -ENOMEM; 335 retval = -ENOMEM;
336 goto out_unlock; 336 goto out_unlock;
337 } else { 337 } else {
338 heap_node_init(&tsk_rt(tsk)->heap_node, tsk); 338 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
339 } 339 }
340 340
341 retval = litmus->admit_task(tsk); 341 retval = litmus->admit_task(tsk);
@@ -359,8 +359,8 @@ void litmus_exit_task(struct task_struct* tsk)
359 359
360 litmus->task_exit(tsk); 360 litmus->task_exit(tsk);
361 361
362 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); 362 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
363 heap_node_free(tsk_rt(tsk)->heap_node); 363 bheap_node_free(tsk_rt(tsk)->heap_node);
364 release_heap_free(tsk_rt(tsk)->rel_heap); 364 release_heap_free(tsk_rt(tsk)->rel_heap);
365 365
366 atomic_dec(&rt_task_count); 366 atomic_dec(&rt_task_count);
@@ -648,7 +648,7 @@ static int __init _init_litmus(void)
648 648
649 register_sched_plugin(&linux_sched_plugin); 649 register_sched_plugin(&linux_sched_plugin);
650 650
651 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC); 651 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
652 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); 652 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
653 653
654#ifdef CONFIG_MAGIC_SYSRQ 654#ifdef CONFIG_MAGIC_SYSRQ
@@ -667,7 +667,7 @@ static int __init _init_litmus(void)
667static void _exit_litmus(void) 667static void _exit_litmus(void)
668{ 668{
669 exit_litmus_proc(); 669 exit_litmus_proc();
670 kmem_cache_destroy(heap_node_cache); 670 kmem_cache_destroy(bheap_node_cache);
671 kmem_cache_destroy(release_heap_cache); 671 kmem_cache_destroy(release_heap_cache);
672} 672}
673 673
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 62c9fdcd22be..0ed6d5cbbfc5 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -19,20 +19,20 @@
19 19
20#include <litmus/trace.h> 20#include <litmus/trace.h>
21 21
22#include <litmus/heap.h> 22#include <litmus/bheap.h>
23 23
24static int dummy_resched(rt_domain_t *rt) 24static int dummy_resched(rt_domain_t *rt)
25{ 25{
26 return 0; 26 return 0;
27} 27}
28 28
29static int dummy_order(struct heap_node* a, struct heap_node* b) 29static int dummy_order(struct bheap_node* a, struct bheap_node* b)
30{ 30{
31 return 0; 31 return 0;
32} 32}
33 33
34/* default implementation: use default lock */ 34/* default implementation: use default lock */
35static void default_release_jobs(rt_domain_t* rt, struct heap* tasks) 35static void default_release_jobs(rt_domain_t* rt, struct bheap* tasks)
36{ 36{
37 merge_ready(rt, tasks); 37 merge_ready(rt, tasks);
38} 38}
@@ -157,7 +157,7 @@ static void reinit_release_heap(struct task_struct* t)
157 BUG_ON(hrtimer_cancel(&rh->timer)); 157 BUG_ON(hrtimer_cancel(&rh->timer));
158 158
159 /* initialize */ 159 /* initialize */
160 heap_init(&rh->heap); 160 bheap_init(&rh->heap);
161 atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); 161 atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE);
162} 162}
163/* arm_release_timer() - start local release timer or trigger 163/* arm_release_timer() - start local release timer or trigger
@@ -204,7 +204,7 @@ static void arm_release_timer(rt_domain_t *_rt)
204 204
205 rh = get_release_heap(rt, t, 1); 205 rh = get_release_heap(rt, t, 1);
206 } 206 }
207 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); 208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
209 209
210 spin_unlock(&rt->release_lock); 210 spin_unlock(&rt->release_lock);
@@ -236,7 +236,7 @@ static void arm_release_timer(rt_domain_t *_rt)
236} 236}
237 237
238void rt_domain_init(rt_domain_t *rt, 238void rt_domain_init(rt_domain_t *rt,
239 heap_prio_t order, 239 bheap_prio_t order,
240 check_resched_needed_t check, 240 check_resched_needed_t check,
241 release_jobs_t release 241 release_jobs_t release
242 ) 242 )
@@ -253,7 +253,7 @@ void rt_domain_init(rt_domain_t *rt,
253 253
254 rt->release_master = NO_CPU; 254 rt->release_master = NO_CPU;
255 255
256 heap_init(&rt->ready_queue); 256 bheap_init(&rt->ready_queue);
257 INIT_LIST_HEAD(&rt->tobe_released); 257 INIT_LIST_HEAD(&rt->tobe_released);
258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) 258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
259 INIT_LIST_HEAD(&rt->release_queue.slot[i]); 259 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
@@ -276,18 +276,18 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new)
276 new->comm, new->pid, get_exec_cost(new), get_rt_period(new), 276 new->comm, new->pid, get_exec_cost(new), get_rt_period(new),
277 get_release(new), litmus_clock()); 277 get_release(new), litmus_clock());
278 278
279 BUG_ON(heap_node_in_heap(tsk_rt(new)->heap_node)); 279 BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node));
280 280
281 heap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node); 281 bheap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node);
282 rt->check_resched(rt); 282 rt->check_resched(rt);
283} 283}
284 284
285/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. 285/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable.
286 * @tasks - the newly released tasks 286 * @tasks - the newly released tasks
287 */ 287 */
288void __merge_ready(rt_domain_t* rt, struct heap* tasks) 288void __merge_ready(rt_domain_t* rt, struct bheap* tasks)
289{ 289{
290 heap_union(rt->order, &rt->ready_queue, tasks); 290 bheap_union(rt->order, &rt->ready_queue, tasks);
291 rt->check_resched(rt); 291 rt->check_resched(rt);
292} 292}
293 293
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index a223e69f2efb..9f256be86cf7 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -18,7 +18,7 @@
18#include <litmus/edf_common.h> 18#include <litmus/edf_common.h>
19#include <litmus/sched_trace.h> 19#include <litmus/sched_trace.h>
20 20
21#include <litmus/heap.h> 21#include <litmus/bheap.h>
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24
@@ -96,7 +96,7 @@ typedef struct {
96 struct task_struct* linked; /* only RT tasks */ 96 struct task_struct* linked; /* only RT tasks */
97 struct task_struct* scheduled; /* only RT tasks */ 97 struct task_struct* scheduled; /* only RT tasks */
98 atomic_t will_schedule; /* prevent unneeded IPIs */ 98 atomic_t will_schedule; /* prevent unneeded IPIs */
99 struct heap_node* hn; 99 struct bheap_node* hn;
100} cpu_entry_t; 100} cpu_entry_t;
101DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 101DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
102 102
@@ -111,14 +111,14 @@ cpu_entry_t* gsnedf_cpus[NR_CPUS];
111 111
112 112
113/* the cpus queue themselves according to priority in here */ 113/* the cpus queue themselves according to priority in here */
114static struct heap_node gsnedf_heap_node[NR_CPUS]; 114static struct bheap_node gsnedf_heap_node[NR_CPUS];
115static struct heap gsnedf_cpu_heap; 115static struct bheap gsnedf_cpu_heap;
116 116
117static rt_domain_t gsnedf; 117static rt_domain_t gsnedf;
118#define gsnedf_lock (gsnedf.ready_lock) 118#define gsnedf_lock (gsnedf.ready_lock)
119 119
120 120
121static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) 121static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
122{ 122{
123 cpu_entry_t *a, *b; 123 cpu_entry_t *a, *b;
124 a = _a->value; 124 a = _a->value;
@@ -134,16 +134,16 @@ static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
134 */ 134 */
135static void update_cpu_position(cpu_entry_t *entry) 135static void update_cpu_position(cpu_entry_t *entry)
136{ 136{
137 if (likely(heap_node_in_heap(entry->hn))) 137 if (likely(bheap_node_in_heap(entry->hn)))
138 heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 138 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
139 heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 139 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
140} 140}
141 141
142/* caller must hold gsnedf lock */ 142/* caller must hold gsnedf lock */
143static cpu_entry_t* lowest_prio_cpu(void) 143static cpu_entry_t* lowest_prio_cpu(void)
144{ 144{
145 struct heap_node* hn; 145 struct bheap_node* hn;
146 hn = heap_peek(cpu_lower_prio, &gsnedf_cpu_heap); 146 hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap);
147 return hn->value; 147 return hn->value;
148} 148}
149 149
@@ -304,7 +304,7 @@ static noinline void gsnedf_job_arrival(struct task_struct* task)
304 check_for_preemptions(); 304 check_for_preemptions();
305} 305}
306 306
307static void gsnedf_release_jobs(rt_domain_t* rt, struct heap* tasks) 307static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
308{ 308{
309 unsigned long flags; 309 unsigned long flags;
310 310
@@ -628,9 +628,9 @@ static void update_queue_position(struct task_struct *holder)
628 * We can't use heap_decrease() here since 628 * We can't use heap_decrease() here since
629 * the cpu_heap is ordered in reverse direction, so 629 * the cpu_heap is ordered in reverse direction, so
630 * it is actually an increase. */ 630 * it is actually an increase. */
631 heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 631 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
632 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 632 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
633 heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 633 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
634 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 634 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
635 } else { 635 } else {
636 /* holder may be queued: first stop queue changes */ 636 /* holder may be queued: first stop queue changes */
@@ -642,7 +642,7 @@ static void update_queue_position(struct task_struct *holder)
642 * of holder in some heap. Note that this 642 * of holder in some heap. Note that this
643 * may be a release heap. */ 643 * may be a release heap. */
644 check_preempt = 644 check_preempt =
645 !heap_decrease(edf_ready_order, 645 !bheap_decrease(edf_ready_order,
646 tsk_rt(holder)->heap_node); 646 tsk_rt(holder)->heap_node);
647 } else { 647 } else {
648 /* Nothing to do: if it is not queued and not linked 648 /* Nothing to do: if it is not queued and not linked
@@ -664,7 +664,7 @@ static void update_queue_position(struct task_struct *holder)
664 /* heap_decrease() hit the top level of the heap: make 664 /* heap_decrease() hit the top level of the heap: make
665 * sure preemption checks get the right task, not the 665 * sure preemption checks get the right task, not the
666 * potentially stale cache. */ 666 * potentially stale cache. */
667 heap_uncache_min(edf_ready_order, 667 bheap_uncache_min(edf_ready_order,
668 &gsnedf.ready_queue); 668 &gsnedf.ready_queue);
669 check_for_preemptions(); 669 check_for_preemptions();
670 } 670 }
@@ -770,12 +770,12 @@ static long gsnedf_activate_plugin(void)
770 int cpu; 770 int cpu;
771 cpu_entry_t *entry; 771 cpu_entry_t *entry;
772 772
773 heap_init(&gsnedf_cpu_heap); 773 bheap_init(&gsnedf_cpu_heap);
774 gsnedf.release_master = atomic_read(&release_master_cpu); 774 gsnedf.release_master = atomic_read(&release_master_cpu);
775 775
776 for_each_online_cpu(cpu) { 776 for_each_online_cpu(cpu) {
777 entry = &per_cpu(gsnedf_cpu_entries, cpu); 777 entry = &per_cpu(gsnedf_cpu_entries, cpu);
778 heap_node_init(&entry->hn, entry); 778 bheap_node_init(&entry->hn, entry);
779 atomic_set(&entry->will_schedule, 0); 779 atomic_set(&entry->will_schedule, 0);
780 entry->linked = NULL; 780 entry->linked = NULL;
781 entry->scheduled = NULL; 781 entry->scheduled = NULL;
@@ -816,7 +816,7 @@ static int __init init_gsn_edf(void)
816 int cpu; 816 int cpu;
817 cpu_entry_t *entry; 817 cpu_entry_t *entry;
818 818
819 heap_init(&gsnedf_cpu_heap); 819 bheap_init(&gsnedf_cpu_heap);
820 /* initialize CPU state */ 820 /* initialize CPU state */
821 for (cpu = 0; cpu < NR_CPUS; cpu++) { 821 for (cpu = 0; cpu < NR_CPUS; cpu++) {
822 entry = &per_cpu(gsnedf_cpu_entries, cpu); 822 entry = &per_cpu(gsnedf_cpu_entries, cpu);
@@ -824,7 +824,7 @@ static int __init init_gsn_edf(void)
824 atomic_set(&entry->will_schedule, 0); 824 atomic_set(&entry->will_schedule, 0);
825 entry->cpu = cpu; 825 entry->cpu = cpu;
826 entry->hn = &gsnedf_heap_node[cpu]; 826 entry->hn = &gsnedf_heap_node[cpu];
827 heap_node_init(&entry->hn, entry); 827 bheap_node_init(&entry->hn, entry);
828 } 828 }
829 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 829 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
830 return register_sched_plugin(&gsn_edf_plugin); 830 return register_sched_plugin(&gsn_edf_plugin);