aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-18 16:15:42 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-18 16:15:42 -0400
commitcfca2b1dc8d5d9e195e29ae4e62df7a09ab2d579 (patch)
tree93d1793b4e9246cfadf1df5c1e62781d267d4f5b /litmus
parentc7b59d1e59d066c5b7f1b950b85b4e0314cc19b6 (diff)
litmus core: add concurrent heap impl.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/cheap.c239
2 files changed, 240 insertions, 0 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index c2c88cc9f6..45f95d6a0c 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -6,6 +6,7 @@ obj-y = sched_plugin.o litmus.o \
6 edf_common.o jobs.o \ 6 edf_common.o jobs.o \
7 rt_domain.o fdso.o sync.o \ 7 rt_domain.o fdso.o sync.o \
8 fmlp.o srp.o norqlock.o \ 8 fmlp.o srp.o norqlock.o \
9 cheap.o \
9 sched_gsn_edf.o \ 10 sched_gsn_edf.o \
10 sched_psn_edf.o \ 11 sched_psn_edf.o \
11 sched_cedf.o \ 12 sched_cedf.o \
diff --git a/litmus/cheap.c b/litmus/cheap.c
new file mode 100644
index 0000000000..1036828192
--- /dev/null
+++ b/litmus/cheap.c
@@ -0,0 +1,239 @@
1#include "litmus/cheap.h"
2
3static unsigned int __cheap_parent(unsigned int child)
4{
5 return (child - 1) / 2;
6}
7
8static unsigned int __cheap_left_child(unsigned int parent)
9{
10 return parent * 2 + 1;
11}
12
13static unsigned int __cheap_right_child(unsigned int parent)
14{
15 return parent * 2 + 2;
16}
17
18static void __cheap_swap(struct cheap_node* a, struct cheap_node* b)
19{
20 unsigned int tag;
21 void* val;
22 tag = a->tag;
23 val = a->value;
24 a->tag = b->tag;
25 a->value = b->value;
26 b->tag = tag;
27 b->value = val;
28}
29
30void cheap_init(struct cheap* ch, unsigned int size,
31 struct cheap_node* nodes)
32{
33 unsigned int i;
34 spin_lock_init(&ch->lock);
35 ch->next = 0;
36 ch->size = size;
37 ch->heap = nodes;
38
39 for (i = 0; i < size; i++) {
40 spin_lock_init(&ch->heap[i].lock);
41 ch->heap[i].tag = CHEAP_EMPTY;
42 ch->heap[i].value = NULL;
43 }
44}
45
46void* cheap_peek(struct cheap* ch)
47{
48 void* val;
49 spin_lock(&ch->heap[CHEAP_ROOT].lock);
50 val = ch->heap[CHEAP_ROOT].tag != CHEAP_EMPTY ?
51 ch->heap[CHEAP_ROOT].value : NULL;
52 spin_unlock(&ch->heap[CHEAP_ROOT].lock);
53 return val;
54}
55
56int cheap_insert(cheap_prio_t higher_prio,
57 struct cheap* ch,
58 void* item,
59 int pid)
60{
61 int stop = 0;
62 unsigned int child, parent, locked;
63 unsigned int wait_for_parent_state;
64
65 spin_lock(&ch->lock);
66 if (ch->next < ch->size) {
67 /* ok, node allocated */
68 child = ch->next++;
69 spin_lock(&ch->heap[child].lock);
70 ch->heap[child].tag = pid;
71 ch->heap[child].value = item;
72 spin_unlock(&ch->lock);
73 } else {
74 /* out of space! */
75 spin_unlock(&ch->lock);
76 return -1;
77 }
78
79 spin_unlock(&ch->heap[child].lock);
80
81 /* bubble up */
82 while (!stop && child > CHEAP_ROOT) {
83 parent = __cheap_parent(child);
84 spin_lock(&ch->heap[parent].lock);
85 spin_lock(&ch->heap[child].lock);
86 locked = child;
87 wait_for_parent_state = CHEAP_EMPTY;
88 if (ch->heap[parent].tag == CHEAP_READY &&
89 ch->heap[child].tag == pid) {
90 /* no interference */
91 if (higher_prio(ch->heap[child].value,
92 ch->heap[parent].value)) {
93 /* out of order; swap and move up */
94 __cheap_swap(ch->heap + child,
95 ch->heap + parent);
96 child = parent;
97 } else {
98 /* In order; we are done. */
99 ch->heap[child].tag = CHEAP_READY;
100 stop = 1;
101 }
102 } else if (ch->heap[parent].tag == CHEAP_EMPTY) {
103 /* Concurrent extract moved child to root;
104 * we are done.
105 */
106 stop = 1;
107 } else if (ch->heap[child].tag != pid) {
108 /* Concurrent extract moved child up;
109 * we go after it.
110 */
111 child = parent;
112 } else {
113 /* Some other process needs to act first.
114 * We need to back off a little in order
115 * to give the others a chance to acquire the
116 * parent's lock.
117 */
118 wait_for_parent_state = ch->heap[parent].tag;
119 }
120
121 spin_unlock(&ch->heap[locked].lock);
122 spin_unlock(&ch->heap[parent].lock);
123
124 while (wait_for_parent_state != CHEAP_EMPTY &&
125 ((volatile unsigned int) ch->heap[parent].tag) ==
126 wait_for_parent_state)
127 cpu_relax();
128
129 }
130 if (!stop && child == CHEAP_ROOT) {
131 spin_lock(&ch->heap[child].lock);
132 if (ch->heap[child].tag == pid)
133 ch->heap[child].tag = CHEAP_READY;
134 spin_unlock(&ch->heap[child].lock);
135 }
136 return 0;
137}
138
139void* cheap_take_if(cheap_take_predicate_t pred,
140 void* pred_ctx,
141 cheap_prio_t higher_prio,
142 struct cheap* ch)
143{
144 void *val, *cval;
145 unsigned int ctag;
146 unsigned int left, right, child, parent;
147
148 spin_lock(&ch->lock);
149 if (ch->next > CHEAP_ROOT) {
150 child = ch->next - 1;
151 spin_lock(&ch->heap[child].lock);
152 /* see if callback wants this item
153 */
154 if (!pred || pred(pred_ctx, ch->heap[child].value))
155 /* yes, proceed */
156 ch->next--;
157 else {
158 /* no, cleanup and return */
159 spin_unlock(&ch->heap[child].lock);
160 child = ch->size;
161 }
162 } else
163 child = ch->size;
164 spin_unlock(&ch->lock);
165
166 if (child == ch->size)
167 /* empty heap */
168 return NULL;
169
170 /* take value from last leaf */
171 cval = ch->heap[child].value;
172 ctag = ch->heap[child].tag;
173 /* free last leaf */
174 ch->heap[child].tag = CHEAP_EMPTY;
175 ch->heap[child].value = NULL;
176
177 /* unlock before locking root to maintain locking order */
178 spin_unlock(&ch->heap[child].lock);
179
180 spin_lock(&ch->heap[CHEAP_ROOT].lock);
181 if (ch->heap[CHEAP_ROOT].tag == CHEAP_EMPTY) {
182 /* heap became empty, we got the last one */
183 spin_unlock(&ch->heap[CHEAP_ROOT].lock);
184 return cval;
185 } else {
186 /* grab value of root (=min), replace with
187 * what we got from the last leaf
188 */
189 val = ch->heap[CHEAP_ROOT].value;
190 ch->heap[CHEAP_ROOT].value = cval;
191 ch->heap[CHEAP_ROOT].tag = CHEAP_READY;
192 }
193
194 /* Bubble down. We are still holding the ROOT (=parent) lock. */
195 child = 0;
196 parent = CHEAP_ROOT;
197 while (parent < __cheap_parent(ch->size)) {
198 left = __cheap_left_child(parent);
199 right = __cheap_right_child(parent);
200 spin_lock(&ch->heap[left].lock);
201 if (ch->heap[left].tag == CHEAP_EMPTY) {
202 /* end of the heap, done */
203 spin_unlock(&ch->heap[left].lock);
204 break;
205 } else if (right < ch->size) {
206 /* right child node exists */
207 spin_lock(&ch->heap[right].lock);
208 if (ch->heap[right].tag == CHEAP_EMPTY ||
209 higher_prio(ch->heap[left].value,
210 ch->heap[right].value)) {
211 /* left child node has higher priority */
212 spin_unlock(&ch->heap[right].lock);
213 child = left;
214 } else {
215 /* right child node has higher priority */
216 spin_unlock(&ch->heap[left].lock);
217 child = right;
218 }
219 } else {
220 /* right child node does not exist */
221 child = left;
222 }
223 if (higher_prio(ch->heap[child].value,
224 ch->heap[parent].value)) {
225 /* parent and child out of order */
226 __cheap_swap(ch->heap + child,
227 ch->heap + parent);
228 spin_unlock(&ch->heap[parent].lock);
229 /* move down */
230 parent = child;
231 } else {
232 /* in order; we are done. */
233 spin_unlock(&ch->heap[child].lock);
234 break;
235 }
236 }
237 spin_unlock(&ch->heap[parent].lock);
238 return val;
239}