diff options
-rw-r--r-- | drivers/md/bcache/btree.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/closure.c | 103 | ||||
-rw-r--r-- | drivers/md/bcache/closure.h | 181 |
3 files changed, 40 insertions, 250 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index cb1a490f7f86..3e0c90130c2e 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -407,7 +407,7 @@ static void do_btree_node_write(struct btree *b) | |||
407 | b->bio = bch_bbio_alloc(b->c); | 407 | b->bio = bch_bbio_alloc(b->c); |
408 | 408 | ||
409 | b->bio->bi_end_io = btree_node_write_endio; | 409 | b->bio->bi_end_io = btree_node_write_endio; |
410 | b->bio->bi_private = &b->io.cl; | 410 | b->bio->bi_private = cl; |
411 | b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; | 411 | b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; |
412 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); | 412 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); |
413 | bch_bio_map(b->bio, i); | 413 | bch_bio_map(b->bio, i); |
@@ -672,8 +672,8 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush) | |||
672 | } | 672 | } |
673 | 673 | ||
674 | /* wait for any in flight btree write */ | 674 | /* wait for any in flight btree write */ |
675 | closure_wait_event_sync(&b->io.wait, &cl, | 675 | closure_wait_event(&b->io.wait, &cl, |
676 | atomic_read(&b->io.cl.remaining) == -1); | 676 | atomic_read(&b->io.cl.remaining) == -1); |
677 | 677 | ||
678 | return 0; | 678 | return 0; |
679 | } | 679 | } |
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 9aba2017f0d1..dfff2410322e 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c | |||
@@ -11,17 +11,6 @@ | |||
11 | 11 | ||
12 | #include "closure.h" | 12 | #include "closure.h" |
13 | 13 | ||
14 | void closure_queue(struct closure *cl) | ||
15 | { | ||
16 | struct workqueue_struct *wq = cl->wq; | ||
17 | if (wq) { | ||
18 | INIT_WORK(&cl->work, cl->work.func); | ||
19 | BUG_ON(!queue_work(wq, &cl->work)); | ||
20 | } else | ||
21 | cl->fn(cl); | ||
22 | } | ||
23 | EXPORT_SYMBOL_GPL(closure_queue); | ||
24 | |||
25 | #define CL_FIELD(type, field) \ | 14 | #define CL_FIELD(type, field) \ |
26 | case TYPE_ ## type: \ | 15 | case TYPE_ ## type: \ |
27 | return &container_of(cl, struct type, cl)->field | 16 | return &container_of(cl, struct type, cl)->field |
@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl) | |||
30 | { | 19 | { |
31 | switch (cl->type) { | 20 | switch (cl->type) { |
32 | CL_FIELD(closure_with_waitlist, wait); | 21 | CL_FIELD(closure_with_waitlist, wait); |
33 | CL_FIELD(closure_with_waitlist_and_timer, wait); | ||
34 | default: | ||
35 | return NULL; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | static struct timer_list *closure_timer(struct closure *cl) | ||
40 | { | ||
41 | switch (cl->type) { | ||
42 | CL_FIELD(closure_with_timer, timer); | ||
43 | CL_FIELD(closure_with_waitlist_and_timer, timer); | ||
44 | default: | 22 | default: |
45 | return NULL; | 23 | return NULL; |
46 | } | 24 | } |
@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) | |||
51 | int r = flags & CLOSURE_REMAINING_MASK; | 29 | int r = flags & CLOSURE_REMAINING_MASK; |
52 | 30 | ||
53 | BUG_ON(flags & CLOSURE_GUARD_MASK); | 31 | BUG_ON(flags & CLOSURE_GUARD_MASK); |
54 | BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING))); | 32 | BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR)); |
55 | 33 | ||
56 | /* Must deliver precisely one wakeup */ | 34 | /* Must deliver precisely one wakeup */ |
57 | if (r == 1 && (flags & CLOSURE_SLEEPING)) | 35 | if (r == 1 && (flags & CLOSURE_SLEEPING)) |
@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) | |||
59 | 37 | ||
60 | if (!r) { | 38 | if (!r) { |
61 | if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { | 39 | if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { |
62 | /* CLOSURE_BLOCKING might be set - clear it */ | ||
63 | atomic_set(&cl->remaining, | 40 | atomic_set(&cl->remaining, |
64 | CLOSURE_REMAINING_INITIALIZER); | 41 | CLOSURE_REMAINING_INITIALIZER); |
65 | closure_queue(cl); | 42 | closure_queue(cl); |
@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v) | |||
90 | { | 67 | { |
91 | closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); | 68 | closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); |
92 | } | 69 | } |
93 | EXPORT_SYMBOL_GPL(closure_sub); | 70 | EXPORT_SYMBOL(closure_sub); |
94 | 71 | ||
95 | void closure_put(struct closure *cl) | 72 | void closure_put(struct closure *cl) |
96 | { | 73 | { |
97 | closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); | 74 | closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); |
98 | } | 75 | } |
99 | EXPORT_SYMBOL_GPL(closure_put); | 76 | EXPORT_SYMBOL(closure_put); |
100 | 77 | ||
101 | static void set_waiting(struct closure *cl, unsigned long f) | 78 | static void set_waiting(struct closure *cl, unsigned long f) |
102 | { | 79 | { |
@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list) | |||
133 | closure_sub(cl, CLOSURE_WAITING + 1); | 110 | closure_sub(cl, CLOSURE_WAITING + 1); |
134 | } | 111 | } |
135 | } | 112 | } |
136 | EXPORT_SYMBOL_GPL(__closure_wake_up); | 113 | EXPORT_SYMBOL(__closure_wake_up); |
137 | 114 | ||
138 | bool closure_wait(struct closure_waitlist *list, struct closure *cl) | 115 | bool closure_wait(struct closure_waitlist *list, struct closure *cl) |
139 | { | 116 | { |
@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl) | |||
146 | 123 | ||
147 | return true; | 124 | return true; |
148 | } | 125 | } |
149 | EXPORT_SYMBOL_GPL(closure_wait); | 126 | EXPORT_SYMBOL(closure_wait); |
150 | 127 | ||
151 | /** | 128 | /** |
152 | * closure_sync() - sleep until a closure a closure has nothing left to wait on | 129 | * closure_sync() - sleep until a closure a closure has nothing left to wait on |
@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl) | |||
169 | 146 | ||
170 | __closure_end_sleep(cl); | 147 | __closure_end_sleep(cl); |
171 | } | 148 | } |
172 | EXPORT_SYMBOL_GPL(closure_sync); | 149 | EXPORT_SYMBOL(closure_sync); |
173 | 150 | ||
174 | /** | 151 | /** |
175 | * closure_trylock() - try to acquire the closure, without waiting | 152 | * closure_trylock() - try to acquire the closure, without waiting |
@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent) | |||
183 | CLOSURE_REMAINING_INITIALIZER) != -1) | 160 | CLOSURE_REMAINING_INITIALIZER) != -1) |
184 | return false; | 161 | return false; |
185 | 162 | ||
186 | closure_set_ret_ip(cl); | ||
187 | |||
188 | smp_mb(); | 163 | smp_mb(); |
164 | |||
189 | cl->parent = parent; | 165 | cl->parent = parent; |
190 | if (parent) | 166 | if (parent) |
191 | closure_get(parent); | 167 | closure_get(parent); |
192 | 168 | ||
169 | closure_set_ret_ip(cl); | ||
193 | closure_debug_create(cl); | 170 | closure_debug_create(cl); |
194 | return true; | 171 | return true; |
195 | } | 172 | } |
196 | EXPORT_SYMBOL_GPL(closure_trylock); | 173 | EXPORT_SYMBOL(closure_trylock); |
197 | 174 | ||
198 | void __closure_lock(struct closure *cl, struct closure *parent, | 175 | void __closure_lock(struct closure *cl, struct closure *parent, |
199 | struct closure_waitlist *wait_list) | 176 | struct closure_waitlist *wait_list) |
@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent, | |||
205 | if (closure_trylock(cl, parent)) | 182 | if (closure_trylock(cl, parent)) |
206 | return; | 183 | return; |
207 | 184 | ||
208 | closure_wait_event_sync(wait_list, &wait, | 185 | closure_wait_event(wait_list, &wait, |
209 | atomic_read(&cl->remaining) == -1); | 186 | atomic_read(&cl->remaining) == -1); |
210 | } | 187 | } |
211 | } | 188 | } |
212 | EXPORT_SYMBOL_GPL(__closure_lock); | 189 | EXPORT_SYMBOL(__closure_lock); |
213 | |||
214 | static void closure_delay_timer_fn(unsigned long data) | ||
215 | { | ||
216 | struct closure *cl = (struct closure *) data; | ||
217 | closure_sub(cl, CLOSURE_TIMER + 1); | ||
218 | } | ||
219 | |||
220 | void do_closure_timer_init(struct closure *cl) | ||
221 | { | ||
222 | struct timer_list *timer = closure_timer(cl); | ||
223 | |||
224 | init_timer(timer); | ||
225 | timer->data = (unsigned long) cl; | ||
226 | timer->function = closure_delay_timer_fn; | ||
227 | } | ||
228 | EXPORT_SYMBOL_GPL(do_closure_timer_init); | ||
229 | |||
230 | bool __closure_delay(struct closure *cl, unsigned long delay, | ||
231 | struct timer_list *timer) | ||
232 | { | ||
233 | if (atomic_read(&cl->remaining) & CLOSURE_TIMER) | ||
234 | return false; | ||
235 | |||
236 | BUG_ON(timer_pending(timer)); | ||
237 | |||
238 | timer->expires = jiffies + delay; | ||
239 | |||
240 | atomic_add(CLOSURE_TIMER + 1, &cl->remaining); | ||
241 | add_timer(timer); | ||
242 | return true; | ||
243 | } | ||
244 | EXPORT_SYMBOL_GPL(__closure_delay); | ||
245 | |||
246 | void __closure_flush(struct closure *cl, struct timer_list *timer) | ||
247 | { | ||
248 | if (del_timer(timer)) | ||
249 | closure_sub(cl, CLOSURE_TIMER + 1); | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(__closure_flush); | ||
252 | |||
253 | void __closure_flush_sync(struct closure *cl, struct timer_list *timer) | ||
254 | { | ||
255 | if (del_timer_sync(timer)) | ||
256 | closure_sub(cl, CLOSURE_TIMER + 1); | ||
257 | } | ||
258 | EXPORT_SYMBOL_GPL(__closure_flush_sync); | ||
259 | 190 | ||
260 | #ifdef CONFIG_BCACHE_CLOSURES_DEBUG | 191 | #ifdef CONFIG_BCACHE_CLOSURES_DEBUG |
261 | 192 | ||
@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl) | |||
273 | list_add(&cl->all, &closure_list); | 204 | list_add(&cl->all, &closure_list); |
274 | spin_unlock_irqrestore(&closure_list_lock, flags); | 205 | spin_unlock_irqrestore(&closure_list_lock, flags); |
275 | } | 206 | } |
276 | EXPORT_SYMBOL_GPL(closure_debug_create); | 207 | EXPORT_SYMBOL(closure_debug_create); |
277 | 208 | ||
278 | void closure_debug_destroy(struct closure *cl) | 209 | void closure_debug_destroy(struct closure *cl) |
279 | { | 210 | { |
@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl) | |||
286 | list_del(&cl->all); | 217 | list_del(&cl->all); |
287 | spin_unlock_irqrestore(&closure_list_lock, flags); | 218 | spin_unlock_irqrestore(&closure_list_lock, flags); |
288 | } | 219 | } |
289 | EXPORT_SYMBOL_GPL(closure_debug_destroy); | 220 | EXPORT_SYMBOL(closure_debug_destroy); |
290 | 221 | ||
291 | static struct dentry *debug; | 222 | static struct dentry *debug; |
292 | 223 | ||
@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data) | |||
304 | cl, (void *) cl->ip, cl->fn, cl->parent, | 235 | cl, (void *) cl->ip, cl->fn, cl->parent, |
305 | r & CLOSURE_REMAINING_MASK); | 236 | r & CLOSURE_REMAINING_MASK); |
306 | 237 | ||
307 | seq_printf(f, "%s%s%s%s%s%s\n", | 238 | seq_printf(f, "%s%s%s%s\n", |
308 | test_bit(WORK_STRUCT_PENDING, | 239 | test_bit(WORK_STRUCT_PENDING, |
309 | work_data_bits(&cl->work)) ? "Q" : "", | 240 | work_data_bits(&cl->work)) ? "Q" : "", |
310 | r & CLOSURE_RUNNING ? "R" : "", | 241 | r & CLOSURE_RUNNING ? "R" : "", |
311 | r & CLOSURE_BLOCKING ? "B" : "", | ||
312 | r & CLOSURE_STACK ? "S" : "", | 242 | r & CLOSURE_STACK ? "S" : "", |
313 | r & CLOSURE_SLEEPING ? "Sl" : "", | 243 | r & CLOSURE_SLEEPING ? "Sl" : ""); |
314 | r & CLOSURE_TIMER ? "T" : ""); | ||
315 | 244 | ||
316 | if (r & CLOSURE_WAITING) | 245 | if (r & CLOSURE_WAITING) |
317 | seq_printf(f, " W %pF\n", | 246 | seq_printf(f, " W %pF\n", |
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index ab011f03801f..9762f1be3304 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h | |||
@@ -155,21 +155,6 @@ | |||
155 | * delayed_work embeds a work item and a timer_list. The important thing is, use | 155 | * delayed_work embeds a work item and a timer_list. The important thing is, use |
156 | * it exactly like you would a regular closure and closure_put() will magically | 156 | * it exactly like you would a regular closure and closure_put() will magically |
157 | * handle everything for you. | 157 | * handle everything for you. |
158 | * | ||
159 | * We've got closures that embed timers, too. They're called, appropriately | ||
160 | * enough: | ||
161 | * struct closure_with_timer; | ||
162 | * | ||
163 | * This gives you access to closure_delay(). It takes a refcount for a specified | ||
164 | * number of jiffies - you could then call closure_sync() (for a slightly | ||
165 | * convoluted version of msleep()) or continue_at() - which gives you the same | ||
166 | * effect as using a delayed work item, except you can reuse the work_struct | ||
167 | * already embedded in struct closure. | ||
168 | * | ||
169 | * Lastly, there's struct closure_with_waitlist_and_timer. It does what you | ||
170 | * probably expect, if you happen to need the features of both. (You don't | ||
171 | * really want to know how all this is implemented, but if I've done my job | ||
172 | * right you shouldn't have to care). | ||
173 | */ | 158 | */ |
174 | 159 | ||
175 | struct closure; | 160 | struct closure; |
@@ -182,16 +167,11 @@ struct closure_waitlist { | |||
182 | enum closure_type { | 167 | enum closure_type { |
183 | TYPE_closure = 0, | 168 | TYPE_closure = 0, |
184 | TYPE_closure_with_waitlist = 1, | 169 | TYPE_closure_with_waitlist = 1, |
185 | TYPE_closure_with_timer = 2, | 170 | MAX_CLOSURE_TYPE = 1, |
186 | TYPE_closure_with_waitlist_and_timer = 3, | ||
187 | MAX_CLOSURE_TYPE = 3, | ||
188 | }; | 171 | }; |
189 | 172 | ||
190 | enum closure_state { | 173 | enum closure_state { |
191 | /* | 174 | /* |
192 | * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of | ||
193 | * waiting asynchronously | ||
194 | * | ||
195 | * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by | 175 | * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by |
196 | * the thread that owns the closure, and cleared by the thread that's | 176 | * the thread that owns the closure, and cleared by the thread that's |
197 | * waking up the closure. | 177 | * waking up the closure. |
@@ -200,10 +180,6 @@ enum closure_state { | |||
200 | * - indicates that cl->task is valid and closure_put() may wake it up. | 180 | * - indicates that cl->task is valid and closure_put() may wake it up. |
201 | * Only set or cleared by the thread that owns the closure. | 181 | * Only set or cleared by the thread that owns the closure. |
202 | * | 182 | * |
203 | * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure | ||
204 | * has an outstanding timer. Must be set by the thread that owns the | ||
205 | * closure, and cleared by the timer function when the timer goes off. | ||
206 | * | ||
207 | * The rest are for debugging and don't affect behaviour: | 183 | * The rest are for debugging and don't affect behaviour: |
208 | * | 184 | * |
209 | * CLOSURE_RUNNING: Set when a closure is running (i.e. by | 185 | * CLOSURE_RUNNING: Set when a closure is running (i.e. by |
@@ -218,19 +194,17 @@ enum closure_state { | |||
218 | * closure with this flag set | 194 | * closure with this flag set |
219 | */ | 195 | */ |
220 | 196 | ||
221 | CLOSURE_BITS_START = (1 << 19), | 197 | CLOSURE_BITS_START = (1 << 23), |
222 | CLOSURE_DESTRUCTOR = (1 << 19), | 198 | CLOSURE_DESTRUCTOR = (1 << 23), |
223 | CLOSURE_BLOCKING = (1 << 21), | 199 | CLOSURE_WAITING = (1 << 25), |
224 | CLOSURE_WAITING = (1 << 23), | 200 | CLOSURE_SLEEPING = (1 << 27), |
225 | CLOSURE_SLEEPING = (1 << 25), | ||
226 | CLOSURE_TIMER = (1 << 27), | ||
227 | CLOSURE_RUNNING = (1 << 29), | 201 | CLOSURE_RUNNING = (1 << 29), |
228 | CLOSURE_STACK = (1 << 31), | 202 | CLOSURE_STACK = (1 << 31), |
229 | }; | 203 | }; |
230 | 204 | ||
231 | #define CLOSURE_GUARD_MASK \ | 205 | #define CLOSURE_GUARD_MASK \ |
232 | ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \ | 206 | ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \ |
233 | CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1) | 207 | CLOSURE_RUNNING|CLOSURE_STACK) << 1) |
234 | 208 | ||
235 | #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) | 209 | #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) |
236 | #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) | 210 | #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) |
@@ -268,17 +242,6 @@ struct closure_with_waitlist { | |||
268 | struct closure_waitlist wait; | 242 | struct closure_waitlist wait; |
269 | }; | 243 | }; |
270 | 244 | ||
271 | struct closure_with_timer { | ||
272 | struct closure cl; | ||
273 | struct timer_list timer; | ||
274 | }; | ||
275 | |||
276 | struct closure_with_waitlist_and_timer { | ||
277 | struct closure cl; | ||
278 | struct closure_waitlist wait; | ||
279 | struct timer_list timer; | ||
280 | }; | ||
281 | |||
282 | extern unsigned invalid_closure_type(void); | 245 | extern unsigned invalid_closure_type(void); |
283 | 246 | ||
284 | #define __CLOSURE_TYPE(cl, _t) \ | 247 | #define __CLOSURE_TYPE(cl, _t) \ |
@@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void); | |||
289 | ( \ | 252 | ( \ |
290 | __CLOSURE_TYPE(cl, closure) \ | 253 | __CLOSURE_TYPE(cl, closure) \ |
291 | __CLOSURE_TYPE(cl, closure_with_waitlist) \ | 254 | __CLOSURE_TYPE(cl, closure_with_waitlist) \ |
292 | __CLOSURE_TYPE(cl, closure_with_timer) \ | ||
293 | __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \ | ||
294 | invalid_closure_type() \ | 255 | invalid_closure_type() \ |
295 | ) | 256 | ) |
296 | 257 | ||
297 | void closure_sub(struct closure *cl, int v); | 258 | void closure_sub(struct closure *cl, int v); |
298 | void closure_put(struct closure *cl); | 259 | void closure_put(struct closure *cl); |
299 | void closure_queue(struct closure *cl); | ||
300 | void __closure_wake_up(struct closure_waitlist *list); | 260 | void __closure_wake_up(struct closure_waitlist *list); |
301 | bool closure_wait(struct closure_waitlist *list, struct closure *cl); | 261 | bool closure_wait(struct closure_waitlist *list, struct closure *cl); |
302 | void closure_sync(struct closure *cl); | 262 | void closure_sync(struct closure *cl); |
@@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent); | |||
305 | void __closure_lock(struct closure *cl, struct closure *parent, | 265 | void __closure_lock(struct closure *cl, struct closure *parent, |
306 | struct closure_waitlist *wait_list); | 266 | struct closure_waitlist *wait_list); |
307 | 267 | ||
308 | void do_closure_timer_init(struct closure *cl); | ||
309 | bool __closure_delay(struct closure *cl, unsigned long delay, | ||
310 | struct timer_list *timer); | ||
311 | void __closure_flush(struct closure *cl, struct timer_list *timer); | ||
312 | void __closure_flush_sync(struct closure *cl, struct timer_list *timer); | ||
313 | |||
314 | #ifdef CONFIG_BCACHE_CLOSURES_DEBUG | 268 | #ifdef CONFIG_BCACHE_CLOSURES_DEBUG |
315 | 269 | ||
316 | void closure_debug_init(void); | 270 | void closure_debug_init(void); |
@@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl) | |||
354 | atomic_sub(CLOSURE_RUNNING, &cl->remaining); | 308 | atomic_sub(CLOSURE_RUNNING, &cl->remaining); |
355 | } | 309 | } |
356 | 310 | ||
357 | static inline bool closure_is_stopped(struct closure *cl) | ||
358 | { | ||
359 | return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING); | ||
360 | } | ||
361 | |||
362 | static inline bool closure_is_unlocked(struct closure *cl) | 311 | static inline bool closure_is_unlocked(struct closure *cl) |
363 | { | 312 | { |
364 | return atomic_read(&cl->remaining) == -1; | 313 | return atomic_read(&cl->remaining) == -1; |
@@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl) | |||
367 | static inline void do_closure_init(struct closure *cl, struct closure *parent, | 316 | static inline void do_closure_init(struct closure *cl, struct closure *parent, |
368 | bool running) | 317 | bool running) |
369 | { | 318 | { |
370 | switch (cl->type) { | ||
371 | case TYPE_closure_with_timer: | ||
372 | case TYPE_closure_with_waitlist_and_timer: | ||
373 | do_closure_timer_init(cl); | ||
374 | default: | ||
375 | break; | ||
376 | } | ||
377 | |||
378 | cl->parent = parent; | 319 | cl->parent = parent; |
379 | if (parent) | 320 | if (parent) |
380 | closure_get(parent); | 321 | closure_get(parent); |
@@ -429,8 +370,7 @@ do { \ | |||
429 | static inline void closure_init_stack(struct closure *cl) | 370 | static inline void closure_init_stack(struct closure *cl) |
430 | { | 371 | { |
431 | memset(cl, 0, sizeof(struct closure)); | 372 | memset(cl, 0, sizeof(struct closure)); |
432 | atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER| | 373 | atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK); |
433 | CLOSURE_BLOCKING|CLOSURE_STACK); | ||
434 | } | 374 | } |
435 | 375 | ||
436 | /** | 376 | /** |
@@ -461,24 +401,6 @@ do { \ | |||
461 | #define closure_lock(cl, parent) \ | 401 | #define closure_lock(cl, parent) \ |
462 | __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait) | 402 | __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait) |
463 | 403 | ||
464 | /** | ||
465 | * closure_delay() - delay some number of jiffies | ||
466 | * @cl: the closure that will sleep | ||
467 | * @delay: the delay in jiffies | ||
468 | * | ||
469 | * Takes a refcount on @cl which will be released after @delay jiffies; this may | ||
470 | * be used to have a function run after a delay with continue_at(), or | ||
471 | * closure_sync() may be used for a convoluted version of msleep(). | ||
472 | */ | ||
473 | #define closure_delay(cl, delay) \ | ||
474 | __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer) | ||
475 | |||
476 | #define closure_flush(cl) \ | ||
477 | __closure_flush(__to_internal_closure(cl), &(cl)->timer) | ||
478 | |||
479 | #define closure_flush_sync(cl) \ | ||
480 | __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer) | ||
481 | |||
482 | static inline void __closure_end_sleep(struct closure *cl) | 404 | static inline void __closure_end_sleep(struct closure *cl) |
483 | { | 405 | { |
484 | __set_current_state(TASK_RUNNING); | 406 | __set_current_state(TASK_RUNNING); |
@@ -498,40 +420,6 @@ static inline void __closure_start_sleep(struct closure *cl) | |||
498 | } | 420 | } |
499 | 421 | ||
500 | /** | 422 | /** |
501 | * closure_blocking() - returns true if the closure is in blocking mode. | ||
502 | * | ||
503 | * If a closure is in blocking mode, closure_wait_event() will sleep until the | ||
504 | * condition is true instead of waiting asynchronously. | ||
505 | */ | ||
506 | static inline bool closure_blocking(struct closure *cl) | ||
507 | { | ||
508 | return atomic_read(&cl->remaining) & CLOSURE_BLOCKING; | ||
509 | } | ||
510 | |||
511 | /** | ||
512 | * set_closure_blocking() - put a closure in blocking mode. | ||
513 | * | ||
514 | * If a closure is in blocking mode, closure_wait_event() will sleep until the | ||
515 | * condition is true instead of waiting asynchronously. | ||
516 | * | ||
517 | * Not thread safe - can only be called by the thread running the closure. | ||
518 | */ | ||
519 | static inline void set_closure_blocking(struct closure *cl) | ||
520 | { | ||
521 | if (!closure_blocking(cl)) | ||
522 | atomic_add(CLOSURE_BLOCKING, &cl->remaining); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Not thread safe - can only be called by the thread running the closure. | ||
527 | */ | ||
528 | static inline void clear_closure_blocking(struct closure *cl) | ||
529 | { | ||
530 | if (closure_blocking(cl)) | ||
531 | atomic_sub(CLOSURE_BLOCKING, &cl->remaining); | ||
532 | } | ||
533 | |||
534 | /** | ||
535 | * closure_wake_up() - wake up all closures on a wait list. | 423 | * closure_wake_up() - wake up all closures on a wait list. |
536 | */ | 424 | */ |
537 | static inline void closure_wake_up(struct closure_waitlist *list) | 425 | static inline void closure_wake_up(struct closure_waitlist *list) |
@@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list) | |||
561 | * refcount on our closure. If this was a stack allocated closure, that would be | 449 | * refcount on our closure. If this was a stack allocated closure, that would be |
562 | * bad. | 450 | * bad. |
563 | */ | 451 | */ |
564 | #define __closure_wait_event(list, cl, condition, _block) \ | 452 | #define closure_wait_event(list, cl, condition) \ |
565 | ({ \ | 453 | ({ \ |
566 | bool block = _block; \ | ||
567 | typeof(condition) ret; \ | 454 | typeof(condition) ret; \ |
568 | \ | 455 | \ |
569 | while (1) { \ | 456 | while (1) { \ |
570 | ret = (condition); \ | 457 | ret = (condition); \ |
571 | if (ret) { \ | 458 | if (ret) { \ |
572 | __closure_wake_up(list); \ | 459 | __closure_wake_up(list); \ |
573 | if (block) \ | 460 | closure_sync(cl); \ |
574 | closure_sync(cl); \ | ||
575 | \ | ||
576 | break; \ | 461 | break; \ |
577 | } \ | 462 | } \ |
578 | \ | 463 | \ |
579 | if (block) \ | 464 | __closure_start_sleep(cl); \ |
580 | __closure_start_sleep(cl); \ | ||
581 | \ | ||
582 | if (!closure_wait(list, cl)) { \ | ||
583 | if (!block) \ | ||
584 | break; \ | ||
585 | \ | 465 | \ |
466 | if (!closure_wait(list, cl)) \ | ||
586 | schedule(); \ | 467 | schedule(); \ |
587 | } \ | ||
588 | } \ | 468 | } \ |
589 | \ | 469 | \ |
590 | ret; \ | 470 | ret; \ |
591 | }) | 471 | }) |
592 | 472 | ||
593 | /** | 473 | static inline void closure_queue(struct closure *cl) |
594 | * closure_wait_event() - wait on a condition, synchronously or asynchronously. | 474 | { |
595 | * @list: the wait list to wait on | 475 | struct workqueue_struct *wq = cl->wq; |
596 | * @cl: the closure that is doing the waiting | 476 | if (wq) { |
597 | * @condition: a C expression for the event to wait for | 477 | INIT_WORK(&cl->work, cl->work.func); |
598 | * | 478 | BUG_ON(!queue_work(wq, &cl->work)); |
599 | * If the closure is in blocking mode, sleeps until the @condition evaluates to | 479 | } else |
600 | * true - exactly like wait_event(). | 480 | cl->fn(cl); |
601 | * | 481 | } |
602 | * If the closure is not in blocking mode, waits asynchronously; if the | ||
603 | * condition is currently false the @cl is put onto @list and returns. @list | ||
604 | * owns a refcount on @cl; closure_sync() or continue_at() may be used later to | ||
605 | * wait for another thread to wake up @list, which drops the refcount on @cl. | ||
606 | * | ||
607 | * Returns the value of @condition; @cl will be on @list iff @condition was | ||
608 | * false. | ||
609 | * | ||
610 | * closure_wake_up(@list) must be called after changing any variable that could | ||
611 | * cause @condition to become true. | ||
612 | */ | ||
613 | #define closure_wait_event(list, cl, condition) \ | ||
614 | __closure_wait_event(list, cl, condition, closure_blocking(cl)) | ||
615 | |||
616 | #define closure_wait_event_async(list, cl, condition) \ | ||
617 | __closure_wait_event(list, cl, condition, false) | ||
618 | |||
619 | #define closure_wait_event_sync(list, cl, condition) \ | ||
620 | __closure_wait_event(list, cl, condition, true) | ||
621 | 482 | ||
622 | static inline void set_closure_fn(struct closure *cl, closure_fn *fn, | 483 | static inline void set_closure_fn(struct closure *cl, closure_fn *fn, |
623 | struct workqueue_struct *wq) | 484 | struct workqueue_struct *wq) |