diff options
Diffstat (limited to 'litmus/cheap.c')
-rw-r--r-- | litmus/cheap.c | 249 |
1 files changed, 0 insertions, 249 deletions
diff --git a/litmus/cheap.c b/litmus/cheap.c deleted file mode 100644 index fde5304f8c..0000000000 --- a/litmus/cheap.c +++ /dev/null | |||
@@ -1,249 +0,0 @@ | |||
1 | #include "litmus/cheap.h" | ||
2 | |||
3 | static unsigned int __cheap_parent(unsigned int child) | ||
4 | { | ||
5 | return (child - 1) / 2; | ||
6 | } | ||
7 | |||
8 | static unsigned int __cheap_left_child(unsigned int parent) | ||
9 | { | ||
10 | return parent * 2 + 1; | ||
11 | } | ||
12 | |||
13 | static unsigned int __cheap_right_child(unsigned int parent) | ||
14 | { | ||
15 | return parent * 2 + 2; | ||
16 | } | ||
17 | |||
18 | static void __cheap_swap(struct cheap_node* a, struct cheap_node* b) | ||
19 | { | ||
20 | unsigned int tag; | ||
21 | void* val; | ||
22 | tag = a->tag; | ||
23 | val = a->value; | ||
24 | a->tag = b->tag; | ||
25 | a->value = b->value; | ||
26 | b->tag = tag; | ||
27 | b->value = val; | ||
28 | } | ||
29 | |||
30 | void cheap_init(struct cheap* ch, unsigned int size, | ||
31 | struct cheap_node* nodes) | ||
32 | { | ||
33 | unsigned int i; | ||
34 | spin_lock_init(&ch->lock); | ||
35 | ch->next = 0; | ||
36 | ch->size = size; | ||
37 | ch->heap = nodes; | ||
38 | |||
39 | for (i = 0; i < size; i++) { | ||
40 | spin_lock_init(&ch->heap[i].lock); | ||
41 | ch->heap[i].tag = CHEAP_EMPTY; | ||
42 | ch->heap[i].value = NULL; | ||
43 | } | ||
44 | } | ||
45 | |||
46 | void* cheap_peek(struct cheap* ch) | ||
47 | { | ||
48 | void* val; | ||
49 | spin_lock(&ch->heap[CHEAP_ROOT].lock); | ||
50 | val = ch->heap[CHEAP_ROOT].tag != CHEAP_EMPTY ? | ||
51 | ch->heap[CHEAP_ROOT].value : NULL; | ||
52 | spin_unlock(&ch->heap[CHEAP_ROOT].lock); | ||
53 | return val; | ||
54 | } | ||
55 | |||
56 | int cheap_insert(cheap_prio_t higher_prio, | ||
57 | struct cheap* ch, | ||
58 | void* item, | ||
59 | int pid) | ||
60 | { | ||
61 | int stop = 0; | ||
62 | unsigned int child, parent, locked; | ||
63 | unsigned int wait_for_parent_state; | ||
64 | |||
65 | lockdep_off(); /* generates false positives */ | ||
66 | |||
67 | spin_lock(&ch->lock); | ||
68 | if (ch->next < ch->size) { | ||
69 | /* ok, node allocated */ | ||
70 | child = ch->next++; | ||
71 | spin_lock(&ch->heap[child].lock); | ||
72 | ch->heap[child].tag = pid; | ||
73 | ch->heap[child].value = item; | ||
74 | spin_unlock(&ch->lock); | ||
75 | } else { | ||
76 | /* out of space! */ | ||
77 | spin_unlock(&ch->lock); | ||
78 | lockdep_on(); | ||
79 | return -1; | ||
80 | } | ||
81 | |||
82 | spin_unlock(&ch->heap[child].lock); | ||
83 | |||
84 | /* bubble up */ | ||
85 | while (!stop && child > CHEAP_ROOT) { | ||
86 | parent = __cheap_parent(child); | ||
87 | spin_lock(&ch->heap[parent].lock); | ||
88 | spin_lock(&ch->heap[child].lock); | ||
89 | locked = child; | ||
90 | wait_for_parent_state = CHEAP_EMPTY; | ||
91 | if (ch->heap[parent].tag == CHEAP_READY && | ||
92 | ch->heap[child].tag == pid) { | ||
93 | /* no interference */ | ||
94 | if (higher_prio(ch->heap[child].value, | ||
95 | ch->heap[parent].value)) { | ||
96 | /* out of order; swap and move up */ | ||
97 | __cheap_swap(ch->heap + child, | ||
98 | ch->heap + parent); | ||
99 | child = parent; | ||
100 | } else { | ||
101 | /* In order; we are done. */ | ||
102 | ch->heap[child].tag = CHEAP_READY; | ||
103 | stop = 1; | ||
104 | } | ||
105 | } else if (ch->heap[parent].tag == CHEAP_EMPTY) { | ||
106 | /* Concurrent extract moved child to root; | ||
107 | * we are done. | ||
108 | */ | ||
109 | stop = 1; | ||
110 | } else if (ch->heap[child].tag != pid) { | ||
111 | /* Concurrent extract moved child up; | ||
112 | * we go after it. | ||
113 | */ | ||
114 | child = parent; | ||
115 | } else { | ||
116 | /* Some other process needs to act first. | ||
117 | * We need to back off a little in order | ||
118 | * to give the others a chance to acquire the | ||
119 | * parent's lock. | ||
120 | */ | ||
121 | wait_for_parent_state = ch->heap[parent].tag; | ||
122 | } | ||
123 | |||
124 | spin_unlock(&ch->heap[locked].lock); | ||
125 | spin_unlock(&ch->heap[parent].lock); | ||
126 | |||
127 | while (wait_for_parent_state != CHEAP_EMPTY && | ||
128 | ((volatile unsigned int) ch->heap[parent].tag) == | ||
129 | wait_for_parent_state) | ||
130 | cpu_relax(); | ||
131 | |||
132 | } | ||
133 | if (!stop && child == CHEAP_ROOT) { | ||
134 | spin_lock(&ch->heap[child].lock); | ||
135 | if (ch->heap[child].tag == pid) | ||
136 | ch->heap[child].tag = CHEAP_READY; | ||
137 | spin_unlock(&ch->heap[child].lock); | ||
138 | } | ||
139 | |||
140 | lockdep_on(); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | void* cheap_take_if(cheap_take_predicate_t pred, | ||
145 | void* pred_ctx, | ||
146 | cheap_prio_t higher_prio, | ||
147 | struct cheap* ch) | ||
148 | { | ||
149 | void *val, *cval; | ||
150 | unsigned int ctag; | ||
151 | unsigned int left, right, child, parent; | ||
152 | |||
153 | lockdep_off(); | ||
154 | spin_lock(&ch->lock); | ||
155 | if (ch->next > CHEAP_ROOT) { | ||
156 | child = ch->next - 1; | ||
157 | spin_lock(&ch->heap[child].lock); | ||
158 | /* see if callback wants this item | ||
159 | */ | ||
160 | if (!pred || pred(pred_ctx, ch->heap[child].value)) | ||
161 | /* yes, proceed */ | ||
162 | ch->next--; | ||
163 | else { | ||
164 | /* no, cleanup and return */ | ||
165 | spin_unlock(&ch->heap[child].lock); | ||
166 | child = ch->size; | ||
167 | } | ||
168 | } else | ||
169 | child = ch->size; | ||
170 | spin_unlock(&ch->lock); | ||
171 | |||
172 | if (child == ch->size) { | ||
173 | lockdep_on(); | ||
174 | /* empty heap */ | ||
175 | return NULL; | ||
176 | } | ||
177 | |||
178 | /* take value from last leaf */ | ||
179 | cval = ch->heap[child].value; | ||
180 | ctag = ch->heap[child].tag; | ||
181 | /* free last leaf */ | ||
182 | ch->heap[child].tag = CHEAP_EMPTY; | ||
183 | ch->heap[child].value = NULL; | ||
184 | |||
185 | /* unlock before locking root to maintain locking order */ | ||
186 | spin_unlock(&ch->heap[child].lock); | ||
187 | |||
188 | spin_lock(&ch->heap[CHEAP_ROOT].lock); | ||
189 | if (ch->heap[CHEAP_ROOT].tag == CHEAP_EMPTY) { | ||
190 | /* heap became empty, we got the last one */ | ||
191 | spin_unlock(&ch->heap[CHEAP_ROOT].lock); | ||
192 | lockdep_on(); | ||
193 | return cval; | ||
194 | } else { | ||
195 | /* grab value of root (=min), replace with | ||
196 | * what we got from the last leaf | ||
197 | */ | ||
198 | val = ch->heap[CHEAP_ROOT].value; | ||
199 | ch->heap[CHEAP_ROOT].value = cval; | ||
200 | ch->heap[CHEAP_ROOT].tag = CHEAP_READY; | ||
201 | } | ||
202 | |||
203 | /* Bubble down. We are still holding the ROOT (=parent) lock. */ | ||
204 | child = 0; | ||
205 | parent = CHEAP_ROOT; | ||
206 | while (parent < __cheap_parent(ch->size)) { | ||
207 | left = __cheap_left_child(parent); | ||
208 | right = __cheap_right_child(parent); | ||
209 | spin_lock(&ch->heap[left].lock); | ||
210 | if (ch->heap[left].tag == CHEAP_EMPTY) { | ||
211 | /* end of the heap, done */ | ||
212 | spin_unlock(&ch->heap[left].lock); | ||
213 | break; | ||
214 | } else if (right < ch->size) { | ||
215 | /* right child node exists */ | ||
216 | spin_lock(&ch->heap[right].lock); | ||
217 | if (ch->heap[right].tag == CHEAP_EMPTY || | ||
218 | higher_prio(ch->heap[left].value, | ||
219 | ch->heap[right].value)) { | ||
220 | /* left child node has higher priority */ | ||
221 | spin_unlock(&ch->heap[right].lock); | ||
222 | child = left; | ||
223 | } else { | ||
224 | /* right child node has higher priority */ | ||
225 | spin_unlock(&ch->heap[left].lock); | ||
226 | child = right; | ||
227 | } | ||
228 | } else { | ||
229 | /* right child node does not exist */ | ||
230 | child = left; | ||
231 | } | ||
232 | if (higher_prio(ch->heap[child].value, | ||
233 | ch->heap[parent].value)) { | ||
234 | /* parent and child out of order */ | ||
235 | __cheap_swap(ch->heap + child, | ||
236 | ch->heap + parent); | ||
237 | spin_unlock(&ch->heap[parent].lock); | ||
238 | /* move down */ | ||
239 | parent = child; | ||
240 | } else { | ||
241 | /* in order; we are done. */ | ||
242 | spin_unlock(&ch->heap[child].lock); | ||
243 | break; | ||
244 | } | ||
245 | } | ||
246 | spin_unlock(&ch->heap[parent].lock); | ||
247 | lockdep_on(); | ||
248 | return val; | ||
249 | } | ||