aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-20 18:55:23 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:08 -0500
commit1dd13c8d3c2d82e1b668d0b4754591291656542a (patch)
tree1dd56303d3f2fdc59b4088a284445a25d98582b7
parentcb7a583e6a6ace661a5890803e115d2292a293df (diff)
bcache: kill closure locking code
Also flesh out the documentation a bit Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/closure.c90
-rw-r--r--drivers/md/bcache/closure.h346
2 files changed, 123 insertions, 313 deletions
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index dfff2410322e..7a228de95fd7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,19 +11,6 @@
11 11
12#include "closure.h" 12#include "closure.h"
13 13
14#define CL_FIELD(type, field) \
15 case TYPE_ ## type: \
16 return &container_of(cl, struct type, cl)->field
17
18static struct closure_waitlist *closure_waitlist(struct closure *cl)
19{
20 switch (cl->type) {
21 CL_FIELD(closure_with_waitlist, wait);
22 default:
23 return NULL;
24 }
25}
26
27static inline void closure_put_after_sub(struct closure *cl, int flags) 14static inline void closure_put_after_sub(struct closure *cl, int flags)
28{ 15{
29 int r = flags & CLOSURE_REMAINING_MASK; 16 int r = flags & CLOSURE_REMAINING_MASK;
@@ -42,17 +29,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
42 closure_queue(cl); 29 closure_queue(cl);
43 } else { 30 } else {
44 struct closure *parent = cl->parent; 31 struct closure *parent = cl->parent;
45 struct closure_waitlist *wait = closure_waitlist(cl);
46 closure_fn *destructor = cl->fn; 32 closure_fn *destructor = cl->fn;
47 33
48 closure_debug_destroy(cl); 34 closure_debug_destroy(cl);
49 35
50 smp_mb();
51 atomic_set(&cl->remaining, -1);
52
53 if (wait)
54 closure_wake_up(wait);
55
56 if (destructor) 36 if (destructor)
57 destructor(cl); 37 destructor(cl);
58 38
@@ -69,19 +49,18 @@ void closure_sub(struct closure *cl, int v)
69} 49}
70EXPORT_SYMBOL(closure_sub); 50EXPORT_SYMBOL(closure_sub);
71 51
52/**
53 * closure_put - decrement a closure's refcount
54 */
72void closure_put(struct closure *cl) 55void closure_put(struct closure *cl)
73{ 56{
74 closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); 57 closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
75} 58}
76EXPORT_SYMBOL(closure_put); 59EXPORT_SYMBOL(closure_put);
77 60
78static void set_waiting(struct closure *cl, unsigned long f) 61/**
79{ 62 * closure_wake_up - wake up all closures on a wait list, without memory barrier
80#ifdef CONFIG_BCACHE_CLOSURES_DEBUG 63 */
81 cl->waiting_on = f;
82#endif
83}
84
85void __closure_wake_up(struct closure_waitlist *wait_list) 64void __closure_wake_up(struct closure_waitlist *wait_list)
86{ 65{
87 struct llist_node *list; 66 struct llist_node *list;
@@ -106,27 +85,34 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
106 cl = container_of(reverse, struct closure, list); 85 cl = container_of(reverse, struct closure, list);
107 reverse = llist_next(reverse); 86 reverse = llist_next(reverse);
108 87
109 set_waiting(cl, 0); 88 closure_set_waiting(cl, 0);
110 closure_sub(cl, CLOSURE_WAITING + 1); 89 closure_sub(cl, CLOSURE_WAITING + 1);
111 } 90 }
112} 91}
113EXPORT_SYMBOL(__closure_wake_up); 92EXPORT_SYMBOL(__closure_wake_up);
114 93
115bool closure_wait(struct closure_waitlist *list, struct closure *cl) 94/**
95 * closure_wait - add a closure to a waitlist
96 *
97 * @waitlist will own a ref on @cl, which will be released when
98 * closure_wake_up() is called on @waitlist.
99 *
100 */
101bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
116{ 102{
117 if (atomic_read(&cl->remaining) & CLOSURE_WAITING) 103 if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
118 return false; 104 return false;
119 105
120 set_waiting(cl, _RET_IP_); 106 closure_set_waiting(cl, _RET_IP_);
121 atomic_add(CLOSURE_WAITING + 1, &cl->remaining); 107 atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
122 llist_add(&cl->list, &list->list); 108 llist_add(&cl->list, &waitlist->list);
123 109
124 return true; 110 return true;
125} 111}
126EXPORT_SYMBOL(closure_wait); 112EXPORT_SYMBOL(closure_wait);
127 113
128/** 114/**
129 * closure_sync() - sleep until a closure a closure has nothing left to wait on 115 * closure_sync - sleep until a closure a closure has nothing left to wait on
130 * 116 *
131 * Sleeps until the refcount hits 1 - the thread that's running the closure owns 117 * Sleeps until the refcount hits 1 - the thread that's running the closure owns
132 * the last refcount. 118 * the last refcount.
@@ -148,46 +134,6 @@ void closure_sync(struct closure *cl)
148} 134}
149EXPORT_SYMBOL(closure_sync); 135EXPORT_SYMBOL(closure_sync);
150 136
151/**
152 * closure_trylock() - try to acquire the closure, without waiting
153 * @cl: closure to lock
154 *
155 * Returns true if the closure was succesfully locked.
156 */
157bool closure_trylock(struct closure *cl, struct closure *parent)
158{
159 if (atomic_cmpxchg(&cl->remaining, -1,
160 CLOSURE_REMAINING_INITIALIZER) != -1)
161 return false;
162
163 smp_mb();
164
165 cl->parent = parent;
166 if (parent)
167 closure_get(parent);
168
169 closure_set_ret_ip(cl);
170 closure_debug_create(cl);
171 return true;
172}
173EXPORT_SYMBOL(closure_trylock);
174
175void __closure_lock(struct closure *cl, struct closure *parent,
176 struct closure_waitlist *wait_list)
177{
178 struct closure wait;
179 closure_init_stack(&wait);
180
181 while (1) {
182 if (closure_trylock(cl, parent))
183 return;
184
185 closure_wait_event(wait_list, &wait,
186 atomic_read(&cl->remaining) == -1);
187 }
188}
189EXPORT_SYMBOL(__closure_lock);
190
191#ifdef CONFIG_BCACHE_CLOSURES_DEBUG 137#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
192 138
193static LIST_HEAD(closure_list); 139static LIST_HEAD(closure_list);
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index d29b773b863f..7ef7461912be 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -72,30 +72,6 @@
72 * closure - _always_ use continue_at(). Doing so consistently will help 72 * closure - _always_ use continue_at(). Doing so consistently will help
73 * eliminate an entire class of particularly pernicious races. 73 * eliminate an entire class of particularly pernicious races.
74 * 74 *
75 * For a closure to wait on an arbitrary event, we need to introduce waitlists:
76 *
77 * struct closure_waitlist list;
78 * closure_wait_event(list, cl, condition);
79 * closure_wake_up(wait_list);
80 *
81 * These work analagously to wait_event() and wake_up() - except that instead of
82 * operating on the current thread (for wait_event()) and lists of threads, they
83 * operate on an explicit closure and lists of closures.
84 *
85 * Because it's a closure we can now wait either synchronously or
86 * asynchronously. closure_wait_event() returns the current value of the
87 * condition, and if it returned false continue_at() or closure_sync() can be
88 * used to wait for it to become true.
89 *
90 * It's useful for waiting on things when you can't sleep in the context in
91 * which you must check the condition (perhaps a spinlock held, or you might be
92 * beneath generic_make_request() - in which case you can't sleep on IO).
93 *
94 * closure_wait_event() will wait either synchronously or asynchronously,
95 * depending on whether the closure is in blocking mode or not. You can pick a
96 * mode explicitly with closure_wait_event_sync() and
97 * closure_wait_event_async(), which do just what you might expect.
98 *
99 * Lastly, you might have a wait list dedicated to a specific event, and have no 75 * Lastly, you might have a wait list dedicated to a specific event, and have no
100 * need for specifying the condition - you just want to wait until someone runs 76 * need for specifying the condition - you just want to wait until someone runs
101 * closure_wake_up() on the appropriate wait list. In that case, just use 77 * closure_wake_up() on the appropriate wait list. In that case, just use
@@ -121,40 +97,6 @@
121 * All this implies that a closure should typically be embedded in a particular 97 * All this implies that a closure should typically be embedded in a particular
122 * struct (which its refcount will normally control the lifetime of), and that 98 * struct (which its refcount will normally control the lifetime of), and that
123 * struct can very much be thought of as a stack frame. 99 * struct can very much be thought of as a stack frame.
124 *
125 * Locking:
126 *
127 * Closures are based on work items but they can be thought of as more like
128 * threads - in that like threads and unlike work items they have a well
129 * defined lifetime; they are created (with closure_init()) and eventually
130 * complete after a continue_at(cl, NULL, NULL).
131 *
132 * Suppose you've got some larger structure with a closure embedded in it that's
133 * used for periodically doing garbage collection. You only want one garbage
134 * collection happening at a time, so the natural thing to do is protect it with
135 * a lock. However, it's difficult to use a lock protecting a closure correctly
136 * because the unlock should come after the last continue_to() (additionally, if
137 * you're using the closure asynchronously a mutex won't work since a mutex has
138 * to be unlocked by the same process that locked it).
139 *
140 * So to make it less error prone and more efficient, we also have the ability
141 * to use closures as locks:
142 *
143 * closure_init_unlocked();
144 * closure_trylock();
145 *
146 * That's all we need for trylock() - the last closure_put() implicitly unlocks
147 * it for you. But for closure_lock(), we also need a wait list:
148 *
149 * struct closure_with_waitlist frobnicator_cl;
150 *
151 * closure_init_unlocked(&frobnicator_cl);
152 * closure_lock(&frobnicator_cl);
153 *
154 * A closure_with_waitlist embeds a closure and a wait list - much like struct
155 * delayed_work embeds a work item and a timer_list. The important thing is, use
156 * it exactly like you would a regular closure and closure_put() will magically
157 * handle everything for you.
158 */ 100 */
159 101
160struct closure; 102struct closure;
@@ -164,12 +106,6 @@ struct closure_waitlist {
164 struct llist_head list; 106 struct llist_head list;
165}; 107};
166 108
167enum closure_type {
168 TYPE_closure = 0,
169 TYPE_closure_with_waitlist = 1,
170 MAX_CLOSURE_TYPE = 1,
171};
172
173enum closure_state { 109enum closure_state {
174 /* 110 /*
175 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by 111 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
@@ -224,8 +160,6 @@ struct closure {
224 160
225 atomic_t remaining; 161 atomic_t remaining;
226 162
227 enum closure_type type;
228
229#ifdef CONFIG_BCACHE_CLOSURES_DEBUG 163#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
230#define CLOSURE_MAGIC_DEAD 0xc054dead 164#define CLOSURE_MAGIC_DEAD 0xc054dead
231#define CLOSURE_MAGIC_ALIVE 0xc054a11e 165#define CLOSURE_MAGIC_ALIVE 0xc054a11e
@@ -237,34 +171,12 @@ struct closure {
237#endif 171#endif
238}; 172};
239 173
240struct closure_with_waitlist {
241 struct closure cl;
242 struct closure_waitlist wait;
243};
244
245extern unsigned invalid_closure_type(void);
246
247#define __CLOSURE_TYPE(cl, _t) \
248 __builtin_types_compatible_p(typeof(cl), struct _t) \
249 ? TYPE_ ## _t : \
250
251#define __closure_type(cl) \
252( \
253 __CLOSURE_TYPE(cl, closure) \
254 __CLOSURE_TYPE(cl, closure_with_waitlist) \
255 invalid_closure_type() \
256)
257
258void closure_sub(struct closure *cl, int v); 174void closure_sub(struct closure *cl, int v);
259void closure_put(struct closure *cl); 175void closure_put(struct closure *cl);
260void __closure_wake_up(struct closure_waitlist *list); 176void __closure_wake_up(struct closure_waitlist *list);
261bool closure_wait(struct closure_waitlist *list, struct closure *cl); 177bool closure_wait(struct closure_waitlist *list, struct closure *cl);
262void closure_sync(struct closure *cl); 178void closure_sync(struct closure *cl);
263 179
264bool closure_trylock(struct closure *cl, struct closure *parent);
265void __closure_lock(struct closure *cl, struct closure *parent,
266 struct closure_waitlist *wait_list);
267
268#ifdef CONFIG_BCACHE_CLOSURES_DEBUG 180#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
269 181
270void closure_debug_init(void); 182void closure_debug_init(void);
@@ -293,123 +205,97 @@ static inline void closure_set_ret_ip(struct closure *cl)
293#endif 205#endif
294} 206}
295 207
296static inline void closure_get(struct closure *cl) 208static inline void closure_set_waiting(struct closure *cl, unsigned long f)
297{ 209{
298#ifdef CONFIG_BCACHE_CLOSURES_DEBUG 210#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
299 BUG_ON((atomic_inc_return(&cl->remaining) & 211 cl->waiting_on = f;
300 CLOSURE_REMAINING_MASK) <= 1);
301#else
302 atomic_inc(&cl->remaining);
303#endif 212#endif
304} 213}
305 214
215static inline void __closure_end_sleep(struct closure *cl)
216{
217 __set_current_state(TASK_RUNNING);
218
219 if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
220 atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
221}
222
223static inline void __closure_start_sleep(struct closure *cl)
224{
225 closure_set_ip(cl);
226 cl->task = current;
227 set_current_state(TASK_UNINTERRUPTIBLE);
228
229 if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
230 atomic_add(CLOSURE_SLEEPING, &cl->remaining);
231}
232
306static inline void closure_set_stopped(struct closure *cl) 233static inline void closure_set_stopped(struct closure *cl)
307{ 234{
308 atomic_sub(CLOSURE_RUNNING, &cl->remaining); 235 atomic_sub(CLOSURE_RUNNING, &cl->remaining);
309} 236}
310 237
311static inline bool closure_is_unlocked(struct closure *cl) 238static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
239 struct workqueue_struct *wq)
312{ 240{
313 return atomic_read(&cl->remaining) == -1; 241 BUG_ON(object_is_on_stack(cl));
242 closure_set_ip(cl);
243 cl->fn = fn;
244 cl->wq = wq;
245 /* between atomic_dec() in closure_put() */
246 smp_mb__before_atomic_dec();
314} 247}
315 248
316static inline void do_closure_init(struct closure *cl, struct closure *parent, 249static inline void closure_queue(struct closure *cl)
317 bool running)
318{ 250{
319 cl->parent = parent; 251 struct workqueue_struct *wq = cl->wq;
320 if (parent) 252 if (wq) {
321 closure_get(parent); 253 INIT_WORK(&cl->work, cl->work.func);
322 254 BUG_ON(!queue_work(wq, &cl->work));
323 if (running) {
324 closure_debug_create(cl);
325 atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
326 } else 255 } else
327 atomic_set(&cl->remaining, -1); 256 cl->fn(cl);
328
329 closure_set_ip(cl);
330} 257}
331 258
332/* 259/**
333 * Hack to get at the embedded closure if there is one, by doing an unsafe cast: 260 * closure_get - increment a closure's refcount
334 * the result of __closure_type() is thrown away, it's used merely for type
335 * checking.
336 */ 261 */
337#define __to_internal_closure(cl) \ 262static inline void closure_get(struct closure *cl)
338({ \ 263{
339 BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE); \ 264#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
340 (struct closure *) cl; \ 265 BUG_ON((atomic_inc_return(&cl->remaining) &
341}) 266 CLOSURE_REMAINING_MASK) <= 1);
342 267#else
343#define closure_init_type(cl, parent, running) \ 268 atomic_inc(&cl->remaining);
344do { \ 269#endif
345 struct closure *_cl = __to_internal_closure(cl); \ 270}
346 _cl->type = __closure_type(*(cl)); \
347 do_closure_init(_cl, parent, running); \
348} while (0)
349 271
350/** 272/**
351 * closure_init() - Initialize a closure, setting the refcount to 1 273 * closure_init - Initialize a closure, setting the refcount to 1
352 * @cl: closure to initialize 274 * @cl: closure to initialize
353 * @parent: parent of the new closure. cl will take a refcount on it for its 275 * @parent: parent of the new closure. cl will take a refcount on it for its
354 * lifetime; may be NULL. 276 * lifetime; may be NULL.
355 */ 277 */
356#define closure_init(cl, parent) \ 278static inline void closure_init(struct closure *cl, struct closure *parent)
357 closure_init_type(cl, parent, true)
358
359static inline void closure_init_stack(struct closure *cl)
360{ 279{
361 memset(cl, 0, sizeof(struct closure)); 280 memset(cl, 0, sizeof(struct closure));
362 atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK); 281 cl->parent = parent;
363} 282 if (parent)
364 283 closure_get(parent);
365/**
366 * closure_init_unlocked() - Initialize a closure but leave it unlocked.
367 * @cl: closure to initialize
368 *
369 * For when the closure will be used as a lock. The closure may not be used
370 * until after a closure_lock() or closure_trylock().
371 */
372#define closure_init_unlocked(cl) \
373do { \
374 memset((cl), 0, sizeof(*(cl))); \
375 closure_init_type(cl, NULL, false); \
376} while (0)
377 284
378/** 285 atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
379 * closure_lock() - lock and initialize a closure.
380 * @cl: the closure to lock
381 * @parent: the new parent for this closure
382 *
383 * The closure must be of one of the types that has a waitlist (otherwise we
384 * wouldn't be able to sleep on contention).
385 *
386 * @parent has exactly the same meaning as in closure_init(); if non null, the
387 * closure will take a reference on @parent which will be released when it is
388 * unlocked.
389 */
390#define closure_lock(cl, parent) \
391 __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
392 286
393static inline void __closure_end_sleep(struct closure *cl) 287 closure_debug_create(cl);
394{ 288 closure_set_ip(cl);
395 __set_current_state(TASK_RUNNING);
396
397 if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
398 atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
399} 289}
400 290
401static inline void __closure_start_sleep(struct closure *cl) 291static inline void closure_init_stack(struct closure *cl)
402{ 292{
403 closure_set_ip(cl); 293 memset(cl, 0, sizeof(struct closure));
404 cl->task = current; 294 atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
405 set_current_state(TASK_UNINTERRUPTIBLE);
406
407 if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
408 atomic_add(CLOSURE_SLEEPING, &cl->remaining);
409} 295}
410 296
411/** 297/**
412 * closure_wake_up() - wake up all closures on a wait list. 298 * closure_wake_up - wake up all closures on a wait list.
413 */ 299 */
414static inline void closure_wake_up(struct closure_waitlist *list) 300static inline void closure_wake_up(struct closure_waitlist *list)
415{ 301{
@@ -417,69 +303,19 @@ static inline void closure_wake_up(struct closure_waitlist *list)
417 __closure_wake_up(list); 303 __closure_wake_up(list);
418} 304}
419 305
420/* 306/**
421 * Wait on an event, synchronously or asynchronously - analogous to wait_event() 307 * continue_at - jump to another function with barrier
422 * but for closures. 308 *
423 * 309 * After @cl is no longer waiting on anything (i.e. all outstanding refs have
424 * The loop is oddly structured so as to avoid a race; we must check the 310 * been dropped with closure_put()), it will resume execution at @fn running out
425 * condition again after we've added ourself to the waitlist. We know if we were 311 * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
426 * already on the waitlist because closure_wait() returns false; thus, we only 312 *
427 * schedule or break if closure_wait() returns false. If it returns true, we 313 * NOTE: This macro expands to a return in the calling function!
428 * just loop again - rechecking the condition. 314 *
429 * 315 * This is because after calling continue_at() you no longer have a ref on @cl,
430 * The __closure_wake_up() is necessary because we may race with the event 316 * and whatever @cl owns may be freed out from under you - a running closure fn
431 * becoming true; i.e. we see event false -> wait -> recheck condition, but the 317 * has a ref on its own closure which continue_at() drops.
432 * thread that made the event true may have called closure_wake_up() before we
433 * added ourself to the wait list.
434 *
435 * We have to call closure_sync() at the end instead of just
436 * __closure_end_sleep() because a different thread might've called
437 * closure_wake_up() before us and gotten preempted before they dropped the
438 * refcount on our closure. If this was a stack allocated closure, that would be
439 * bad.
440 */ 318 */
441#define closure_wait_event(list, cl, condition) \
442({ \
443 typeof(condition) ret; \
444 \
445 while (1) { \
446 ret = (condition); \
447 if (ret) { \
448 __closure_wake_up(list); \
449 closure_sync(cl); \
450 break; \
451 } \
452 \
453 __closure_start_sleep(cl); \
454 \
455 if (!closure_wait(list, cl)) \
456 schedule(); \
457 } \
458 \
459 ret; \
460})
461
462static inline void closure_queue(struct closure *cl)
463{
464 struct workqueue_struct *wq = cl->wq;
465 if (wq) {
466 INIT_WORK(&cl->work, cl->work.func);
467 BUG_ON(!queue_work(wq, &cl->work));
468 } else
469 cl->fn(cl);
470}
471
472static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
473 struct workqueue_struct *wq)
474{
475 BUG_ON(object_is_on_stack(cl));
476 closure_set_ip(cl);
477 cl->fn = fn;
478 cl->wq = wq;
479 /* between atomic_dec() in closure_put() */
480 smp_mb__before_atomic_dec();
481}
482
483#define continue_at(_cl, _fn, _wq) \ 319#define continue_at(_cl, _fn, _wq) \
484do { \ 320do { \
485 set_closure_fn(_cl, _fn, _wq); \ 321 set_closure_fn(_cl, _fn, _wq); \
@@ -487,8 +323,28 @@ do { \
487 return; \ 323 return; \
488} while (0) 324} while (0)
489 325
326/**
327 * closure_return - finish execution of a closure
328 *
329 * This is used to indicate that @cl is finished: when all outstanding refs on
330 * @cl have been dropped @cl's ref on its parent closure (as passed to
331 * closure_init()) will be dropped, if one was specified - thus this can be
332 * thought of as returning to the parent closure.
333 */
490#define closure_return(_cl) continue_at((_cl), NULL, NULL) 334#define closure_return(_cl) continue_at((_cl), NULL, NULL)
491 335
336/**
337 * continue_at_nobarrier - jump to another function without barrier
338 *
339 * Causes @fn to be executed out of @cl, in @wq context (or called directly if
340 * @wq is NULL).
341 *
342 * NOTE: like continue_at(), this macro expands to a return in the caller!
343 *
344 * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
345 * thus it's not safe to touch anything protected by @cl after a
346 * continue_at_nobarrier().
347 */
492#define continue_at_nobarrier(_cl, _fn, _wq) \ 348#define continue_at_nobarrier(_cl, _fn, _wq) \
493do { \ 349do { \
494 set_closure_fn(_cl, _fn, _wq); \ 350 set_closure_fn(_cl, _fn, _wq); \
@@ -496,6 +352,15 @@ do { \
496 return; \ 352 return; \
497} while (0) 353} while (0)
498 354
355/**
356 * closure_return - finish execution of a closure, with destructor
357 *
358 * Works like closure_return(), except @destructor will be called when all
359 * outstanding refs on @cl have been dropped; @destructor may be used to safely
360 * free the memory occupied by @cl, and it is called with the ref on the parent
361 * closure still held - so @destructor could safely return an item to a
362 * freelist protected by @cl's parent.
363 */
499#define closure_return_with_destructor(_cl, _destructor) \ 364#define closure_return_with_destructor(_cl, _destructor) \
500do { \ 365do { \
501 set_closure_fn(_cl, _destructor, NULL); \ 366 set_closure_fn(_cl, _destructor, NULL); \
@@ -503,6 +368,13 @@ do { \
503 return; \ 368 return; \
504} while (0) 369} while (0)
505 370
371/**
372 * closure_call - execute @fn out of a new, uninitialized closure
373 *
374 * Typically used when running out of one closure, and we want to run @fn
375 * asynchronously out of a new closure - @parent will then wait for @cl to
376 * finish.
377 */
506static inline void closure_call(struct closure *cl, closure_fn fn, 378static inline void closure_call(struct closure *cl, closure_fn fn,
507 struct workqueue_struct *wq, 379 struct workqueue_struct *wq,
508 struct closure *parent) 380 struct closure *parent)
@@ -511,12 +383,4 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
511 continue_at_nobarrier(cl, fn, wq); 383 continue_at_nobarrier(cl, fn, wq);
512} 384}
513 385
514static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
515 struct workqueue_struct *wq,
516 struct closure *parent)
517{
518 if (closure_trylock(cl, parent))
519 continue_at_nobarrier(cl, fn, wq);
520}
521
522#endif /* _LINUX_CLOSURE_H */ 386#endif /* _LINUX_CLOSURE_H */