aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-09-10 10:06:22 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-09-10 10:06:22 -0400
commitc12cf2fe511e6ed9a0728c9fc43c1e5f3752a6ca (patch)
tree68397b8cd45a9b796b5cc0a5237731522334c82f
parent1e693ff3453ec9e1c9b5cda9972215ff6f48a151 (diff)
remove concurrent heap from mainline
-rw-r--r--include/litmus/cheap.h62
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/cheap.c249
3 files changed, 1 insertions, 312 deletions
diff --git a/include/litmus/cheap.h b/include/litmus/cheap.h
deleted file mode 100644
index 39c29fb0ab..0000000000
--- a/include/litmus/cheap.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/* cheap.h -- A concurrent heap implementation.
2 *
3 * (c) 2009 Bjoern B. Brandenburg, <bbb@cs.unc.edu>
4 *
5 * Based on:
6 *
7 * G. Hunt, M. Micheal, S. Parthasarath, and M. Scott
8 * "An efficient algorithm for concurrent priority queue heaps."
9 * Information Processing Letters, 60(3): 151-157, November 1996
10 */
11
12#ifndef CHEAP_H
13
14#include "linux/spinlock.h"
15
16#define CHEAP_EMPTY 0xffffffff
17#define CHEAP_READY 0xffffff00
18#define CHEAP_ROOT 0
19
20struct cheap_node {
21 spinlock_t lock;
22
23 unsigned int tag;
24 void* value;
25};
26
27struct cheap {
28 spinlock_t lock;
29
30 unsigned int next;
31 unsigned int size;
32
33 struct cheap_node* heap;
34};
35
36typedef int (*cheap_prio_t)(void* a, void* b);
37
38void cheap_init(struct cheap* ch,
39 unsigned int size,
40 struct cheap_node* nodes);
41
42int cheap_insert(cheap_prio_t higher_prio,
43 struct cheap* ch,
44 void* item,
45 int pid);
46
47void* cheap_peek(struct cheap* ch);
48
49typedef int (*cheap_take_predicate_t)(void* ctx, void* value);
50
51void* cheap_take_if(cheap_take_predicate_t pred,
52 void* pred_ctx,
53 cheap_prio_t higher_prio,
54 struct cheap* ch);
55
56static inline void* cheap_take(cheap_prio_t higher_prio,
57 struct cheap* ch)
58{
59 return cheap_take_if(NULL, NULL, higher_prio, ch);
60}
61
62#endif
diff --git a/litmus/Makefile b/litmus/Makefile
index 59fd6f7328..837f697004 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -6,7 +6,7 @@ obj-y = sched_plugin.o litmus.o \
6 edf_common.o jobs.o \ 6 edf_common.o jobs.o \
7 rt_domain.o fdso.o sync.o \ 7 rt_domain.o fdso.o sync.o \
8 fmlp.o srp.o norqlock.o \ 8 fmlp.o srp.o norqlock.o \
9 cheap.o heap.o \ 9 heap.o \
10 sched_gsn_edf.o \ 10 sched_gsn_edf.o \
11 sched_psn_edf.o \ 11 sched_psn_edf.o \
12 sched_cedf.o \ 12 sched_cedf.o \
diff --git a/litmus/cheap.c b/litmus/cheap.c
deleted file mode 100644
index fde5304f8c..0000000000
--- a/litmus/cheap.c
+++ /dev/null
@@ -1,249 +0,0 @@
1#include "litmus/cheap.h"
2
3static unsigned int __cheap_parent(unsigned int child)
4{
5 return (child - 1) / 2;
6}
7
8static unsigned int __cheap_left_child(unsigned int parent)
9{
10 return parent * 2 + 1;
11}
12
13static unsigned int __cheap_right_child(unsigned int parent)
14{
15 return parent * 2 + 2;
16}
17
18static void __cheap_swap(struct cheap_node* a, struct cheap_node* b)
19{
20 unsigned int tag;
21 void* val;
22 tag = a->tag;
23 val = a->value;
24 a->tag = b->tag;
25 a->value = b->value;
26 b->tag = tag;
27 b->value = val;
28}
29
30void cheap_init(struct cheap* ch, unsigned int size,
31 struct cheap_node* nodes)
32{
33 unsigned int i;
34 spin_lock_init(&ch->lock);
35 ch->next = 0;
36 ch->size = size;
37 ch->heap = nodes;
38
39 for (i = 0; i < size; i++) {
40 spin_lock_init(&ch->heap[i].lock);
41 ch->heap[i].tag = CHEAP_EMPTY;
42 ch->heap[i].value = NULL;
43 }
44}
45
46void* cheap_peek(struct cheap* ch)
47{
48 void* val;
49 spin_lock(&ch->heap[CHEAP_ROOT].lock);
50 val = ch->heap[CHEAP_ROOT].tag != CHEAP_EMPTY ?
51 ch->heap[CHEAP_ROOT].value : NULL;
52 spin_unlock(&ch->heap[CHEAP_ROOT].lock);
53 return val;
54}
55
56int cheap_insert(cheap_prio_t higher_prio,
57 struct cheap* ch,
58 void* item,
59 int pid)
60{
61 int stop = 0;
62 unsigned int child, parent, locked;
63 unsigned int wait_for_parent_state;
64
65 lockdep_off(); /* generates false positives */
66
67 spin_lock(&ch->lock);
68 if (ch->next < ch->size) {
69 /* ok, node allocated */
70 child = ch->next++;
71 spin_lock(&ch->heap[child].lock);
72 ch->heap[child].tag = pid;
73 ch->heap[child].value = item;
74 spin_unlock(&ch->lock);
75 } else {
76 /* out of space! */
77 spin_unlock(&ch->lock);
78 lockdep_on();
79 return -1;
80 }
81
82 spin_unlock(&ch->heap[child].lock);
83
84 /* bubble up */
85 while (!stop && child > CHEAP_ROOT) {
86 parent = __cheap_parent(child);
87 spin_lock(&ch->heap[parent].lock);
88 spin_lock(&ch->heap[child].lock);
89 locked = child;
90 wait_for_parent_state = CHEAP_EMPTY;
91 if (ch->heap[parent].tag == CHEAP_READY &&
92 ch->heap[child].tag == pid) {
93 /* no interference */
94 if (higher_prio(ch->heap[child].value,
95 ch->heap[parent].value)) {
96 /* out of order; swap and move up */
97 __cheap_swap(ch->heap + child,
98 ch->heap + parent);
99 child = parent;
100 } else {
101 /* In order; we are done. */
102 ch->heap[child].tag = CHEAP_READY;
103 stop = 1;
104 }
105 } else if (ch->heap[parent].tag == CHEAP_EMPTY) {
106 /* Concurrent extract moved child to root;
107 * we are done.
108 */
109 stop = 1;
110 } else if (ch->heap[child].tag != pid) {
111 /* Concurrent extract moved child up;
112 * we go after it.
113 */
114 child = parent;
115 } else {
116 /* Some other process needs to act first.
117 * We need to back off a little in order
118 * to give the others a chance to acquire the
119 * parent's lock.
120 */
121 wait_for_parent_state = ch->heap[parent].tag;
122 }
123
124 spin_unlock(&ch->heap[locked].lock);
125 spin_unlock(&ch->heap[parent].lock);
126
127 while (wait_for_parent_state != CHEAP_EMPTY &&
128 ((volatile unsigned int) ch->heap[parent].tag) ==
129 wait_for_parent_state)
130 cpu_relax();
131
132 }
133 if (!stop && child == CHEAP_ROOT) {
134 spin_lock(&ch->heap[child].lock);
135 if (ch->heap[child].tag == pid)
136 ch->heap[child].tag = CHEAP_READY;
137 spin_unlock(&ch->heap[child].lock);
138 }
139
140 lockdep_on();
141 return 0;
142}
143
144void* cheap_take_if(cheap_take_predicate_t pred,
145 void* pred_ctx,
146 cheap_prio_t higher_prio,
147 struct cheap* ch)
148{
149 void *val, *cval;
150 unsigned int ctag;
151 unsigned int left, right, child, parent;
152
153 lockdep_off();
154 spin_lock(&ch->lock);
155 if (ch->next > CHEAP_ROOT) {
156 child = ch->next - 1;
157 spin_lock(&ch->heap[child].lock);
158 /* see if callback wants this item
159 */
160 if (!pred || pred(pred_ctx, ch->heap[child].value))
161 /* yes, proceed */
162 ch->next--;
163 else {
164 /* no, cleanup and return */
165 spin_unlock(&ch->heap[child].lock);
166 child = ch->size;
167 }
168 } else
169 child = ch->size;
170 spin_unlock(&ch->lock);
171
172 if (child == ch->size) {
173 lockdep_on();
174 /* empty heap */
175 return NULL;
176 }
177
178 /* take value from last leaf */
179 cval = ch->heap[child].value;
180 ctag = ch->heap[child].tag;
181 /* free last leaf */
182 ch->heap[child].tag = CHEAP_EMPTY;
183 ch->heap[child].value = NULL;
184
185 /* unlock before locking root to maintain locking order */
186 spin_unlock(&ch->heap[child].lock);
187
188 spin_lock(&ch->heap[CHEAP_ROOT].lock);
189 if (ch->heap[CHEAP_ROOT].tag == CHEAP_EMPTY) {
190 /* heap became empty, we got the last one */
191 spin_unlock(&ch->heap[CHEAP_ROOT].lock);
192 lockdep_on();
193 return cval;
194 } else {
195 /* grab value of root (=min), replace with
196 * what we got from the last leaf
197 */
198 val = ch->heap[CHEAP_ROOT].value;
199 ch->heap[CHEAP_ROOT].value = cval;
200 ch->heap[CHEAP_ROOT].tag = CHEAP_READY;
201 }
202
203 /* Bubble down. We are still holding the ROOT (=parent) lock. */
204 child = 0;
205 parent = CHEAP_ROOT;
206 while (parent < __cheap_parent(ch->size)) {
207 left = __cheap_left_child(parent);
208 right = __cheap_right_child(parent);
209 spin_lock(&ch->heap[left].lock);
210 if (ch->heap[left].tag == CHEAP_EMPTY) {
211 /* end of the heap, done */
212 spin_unlock(&ch->heap[left].lock);
213 break;
214 } else if (right < ch->size) {
215 /* right child node exists */
216 spin_lock(&ch->heap[right].lock);
217 if (ch->heap[right].tag == CHEAP_EMPTY ||
218 higher_prio(ch->heap[left].value,
219 ch->heap[right].value)) {
220 /* left child node has higher priority */
221 spin_unlock(&ch->heap[right].lock);
222 child = left;
223 } else {
224 /* right child node has higher priority */
225 spin_unlock(&ch->heap[left].lock);
226 child = right;
227 }
228 } else {
229 /* right child node does not exist */
230 child = left;
231 }
232 if (higher_prio(ch->heap[child].value,
233 ch->heap[parent].value)) {
234 /* parent and child out of order */
235 __cheap_swap(ch->heap + child,
236 ch->heap + parent);
237 spin_unlock(&ch->heap[parent].lock);
238 /* move down */
239 parent = child;
240 } else {
241 /* in order; we are done. */
242 spin_unlock(&ch->heap[child].lock);
243 break;
244 }
245 }
246 spin_unlock(&ch->heap[parent].lock);
247 lockdep_on();
248 return val;
249}