aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-19 17:44:05 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-19 17:44:05 -0400
commitefcf5b3b66ad9df13e9e53dea3a443a254f6e62b (patch)
treef92c6b821eca46e6c06fb80a8bcf6b82dfdcc9df
parent3f32c4d828e5e5f67e2483fada07be7cbad1032b (diff)
concurrent heap: disable lockdep
Lockdep doesn't like the heap's locking pattern. We'd have to allocate a locking class for each cheap_node, which is a bit excessive memory-wise.
-rw-r--r--litmus/cheap.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/litmus/cheap.c b/litmus/cheap.c
index 1036828192..9fc68fd053 100644
--- a/litmus/cheap.c
+++ b/litmus/cheap.c
@@ -61,6 +61,8 @@ int cheap_insert(cheap_prio_t higher_prio,
61 int stop = 0; 61 int stop = 0;
62 unsigned int child, parent, locked; 62 unsigned int child, parent, locked;
63 unsigned int wait_for_parent_state; 63 unsigned int wait_for_parent_state;
64
65 lockdep_off(); /* generates false positives */
64 66
65 spin_lock(&ch->lock); 67 spin_lock(&ch->lock);
66 if (ch->next < ch->size) { 68 if (ch->next < ch->size) {
@@ -73,6 +75,7 @@ int cheap_insert(cheap_prio_t higher_prio,
73 } else { 75 } else {
74 /* out of space! */ 76 /* out of space! */
75 spin_unlock(&ch->lock); 77 spin_unlock(&ch->lock);
78 lockdep_on();
76 return -1; 79 return -1;
77 } 80 }
78 81
@@ -133,6 +136,8 @@ int cheap_insert(cheap_prio_t higher_prio,
133 ch->heap[child].tag = CHEAP_READY; 136 ch->heap[child].tag = CHEAP_READY;
134 spin_unlock(&ch->heap[child].lock); 137 spin_unlock(&ch->heap[child].lock);
135 } 138 }
139
140 lockdep_on();
136 return 0; 141 return 0;
137} 142}
138 143
@@ -145,6 +150,7 @@ void* cheap_take_if(cheap_take_predicate_t pred,
145 unsigned int ctag; 150 unsigned int ctag;
146 unsigned int left, right, child, parent; 151 unsigned int left, right, child, parent;
147 152
153 lockdep_off();
148 spin_lock(&ch->lock); 154 spin_lock(&ch->lock);
149 if (ch->next > CHEAP_ROOT) { 155 if (ch->next > CHEAP_ROOT) {
150 child = ch->next - 1; 156 child = ch->next - 1;
@@ -163,9 +169,11 @@ void* cheap_take_if(cheap_take_predicate_t pred,
163 child = ch->size; 169 child = ch->size;
164 spin_unlock(&ch->lock); 170 spin_unlock(&ch->lock);
165 171
166 if (child == ch->size) 172 if (child == ch->size) {
173 lockdep_on();
167 /* empty heap */ 174 /* empty heap */
168 return NULL; 175 return NULL;
176 }
169 177
170 /* take value from last leaf */ 178 /* take value from last leaf */
171 cval = ch->heap[child].value; 179 cval = ch->heap[child].value;
@@ -181,6 +189,7 @@ void* cheap_take_if(cheap_take_predicate_t pred,
181 if (ch->heap[CHEAP_ROOT].tag == CHEAP_EMPTY) { 189 if (ch->heap[CHEAP_ROOT].tag == CHEAP_EMPTY) {
182 /* heap became empty, we got the last one */ 190 /* heap became empty, we got the last one */
183 spin_unlock(&ch->heap[CHEAP_ROOT].lock); 191 spin_unlock(&ch->heap[CHEAP_ROOT].lock);
192 lockdep_on();
184 return cval; 193 return cval;
185 } else { 194 } else {
186 /* grab value of root (=min), replace with 195 /* grab value of root (=min), replace with
@@ -235,5 +244,6 @@ void* cheap_take_if(cheap_take_predicate_t pred,
235 } 244 }
236 } 245 }
237 spin_unlock(&ch->heap[parent].lock); 246 spin_unlock(&ch->heap[parent].lock);
247 lockdep_on();
238 return val; 248 return val;
239} 249}