aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-16 18:27:25 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:08 -0500
commitcb7a583e6a6ace661a5890803e115d2292a293df (patch)
treebe695468938237320560fc75b0f4b64cbe60117a /drivers/md
parenta5ae4300c15c778722c139953c825cd24d6ff517 (diff)
bcache: kill closure locking usage
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/bcache.h9
-rw-r--r--drivers/md/bcache/btree.c52
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/journal.c27
-rw-r--r--drivers/md/bcache/journal.h1
-rw-r--r--drivers/md/bcache/super.c54
7 files changed, 98 insertions, 55 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index dbdbca5a9591..9d062bc56261 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -309,7 +309,8 @@ struct cached_dev {
309 struct cache_sb sb; 309 struct cache_sb sb;
310 struct bio sb_bio; 310 struct bio sb_bio;
311 struct bio_vec sb_bv[1]; 311 struct bio_vec sb_bv[1];
312 struct closure_with_waitlist sb_write; 312 struct closure sb_write;
313 struct semaphore sb_write_mutex;
313 314
314 /* Refcount on the cache set. Always nonzero when we're caching. */ 315 /* Refcount on the cache set. Always nonzero when we're caching. */
315 atomic_t count; 316 atomic_t count;
@@ -514,7 +515,8 @@ struct cache_set {
514 uint64_t cached_dev_sectors; 515 uint64_t cached_dev_sectors;
515 struct closure caching; 516 struct closure caching;
516 517
517 struct closure_with_waitlist sb_write; 518 struct closure sb_write;
519 struct semaphore sb_write_mutex;
518 520
519 mempool_t *search; 521 mempool_t *search;
520 mempool_t *bio_meta; 522 mempool_t *bio_meta;
@@ -635,7 +637,8 @@ struct cache_set {
635 unsigned nr_uuids; 637 unsigned nr_uuids;
636 struct uuid_entry *uuids; 638 struct uuid_entry *uuids;
637 BKEY_PADDED(uuid_bucket); 639 BKEY_PADDED(uuid_bucket);
638 struct closure_with_waitlist uuid_write; 640 struct closure uuid_write;
641 struct semaphore uuid_write_mutex;
639 642
640 /* 643 /*
641 * A btree node on disk could have too many bsets for an iterator to fit 644 * A btree node on disk could have too many bsets for an iterator to fit
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index aaa87b3be9ef..101231f0f399 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -340,9 +340,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
340 w->journal = NULL; 340 w->journal = NULL;
341} 341}
342 342
343static void btree_node_write_unlock(struct closure *cl)
344{
345 struct btree *b = container_of(cl, struct btree, io);
346
347 up(&b->io_mutex);
348}
349
343static void __btree_node_write_done(struct closure *cl) 350static void __btree_node_write_done(struct closure *cl)
344{ 351{
345 struct btree *b = container_of(cl, struct btree, io.cl); 352 struct btree *b = container_of(cl, struct btree, io);
346 struct btree_write *w = btree_prev_write(b); 353 struct btree_write *w = btree_prev_write(b);
347 354
348 bch_bbio_free(b->bio, b->c); 355 bch_bbio_free(b->bio, b->c);
@@ -353,12 +360,12 @@ static void __btree_node_write_done(struct closure *cl)
353 queue_delayed_work(btree_io_wq, &b->work, 360 queue_delayed_work(btree_io_wq, &b->work,
354 msecs_to_jiffies(30000)); 361 msecs_to_jiffies(30000));
355 362
356 closure_return(cl); 363 closure_return_with_destructor(cl, btree_node_write_unlock);
357} 364}
358 365
359static void btree_node_write_done(struct closure *cl) 366static void btree_node_write_done(struct closure *cl)
360{ 367{
361 struct btree *b = container_of(cl, struct btree, io.cl); 368 struct btree *b = container_of(cl, struct btree, io);
362 struct bio_vec *bv; 369 struct bio_vec *bv;
363 int n; 370 int n;
364 371
@@ -371,7 +378,7 @@ static void btree_node_write_done(struct closure *cl)
371static void btree_node_write_endio(struct bio *bio, int error) 378static void btree_node_write_endio(struct bio *bio, int error)
372{ 379{
373 struct closure *cl = bio->bi_private; 380 struct closure *cl = bio->bi_private;
374 struct btree *b = container_of(cl, struct btree, io.cl); 381 struct btree *b = container_of(cl, struct btree, io);
375 382
376 if (error) 383 if (error)
377 set_btree_node_io_error(b); 384 set_btree_node_io_error(b);
@@ -382,7 +389,7 @@ static void btree_node_write_endio(struct bio *bio, int error)
382 389
383static void do_btree_node_write(struct btree *b) 390static void do_btree_node_write(struct btree *b)
384{ 391{
385 struct closure *cl = &b->io.cl; 392 struct closure *cl = &b->io;
386 struct bset *i = b->sets[b->nsets].data; 393 struct bset *i = b->sets[b->nsets].data;
387 BKEY_PADDED(key) k; 394 BKEY_PADDED(key) k;
388 395
@@ -435,7 +442,7 @@ static void do_btree_node_write(struct btree *b)
435 bch_submit_bbio(b->bio, b->c, &k.key, 0); 442 bch_submit_bbio(b->bio, b->c, &k.key, 0);
436 443
437 closure_sync(cl); 444 closure_sync(cl);
438 __btree_node_write_done(cl); 445 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
439 } 446 }
440} 447}
441 448
@@ -454,7 +461,8 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
454 cancel_delayed_work(&b->work); 461 cancel_delayed_work(&b->work);
455 462
456 /* If caller isn't waiting for write, parent refcount is cache set */ 463 /* If caller isn't waiting for write, parent refcount is cache set */
457 closure_lock(&b->io, parent ?: &b->c->cl); 464 down(&b->io_mutex);
465 closure_init(&b->io, parent ?: &b->c->cl);
458 466
459 clear_bit(BTREE_NODE_dirty, &b->flags); 467 clear_bit(BTREE_NODE_dirty, &b->flags);
460 change_bit(BTREE_NODE_write_idx, &b->flags); 468 change_bit(BTREE_NODE_write_idx, &b->flags);
@@ -554,7 +562,8 @@ static void mca_reinit(struct btree *b)
554static void mca_data_free(struct btree *b) 562static void mca_data_free(struct btree *b)
555{ 563{
556 struct bset_tree *t = b->sets; 564 struct bset_tree *t = b->sets;
557 BUG_ON(!closure_is_unlocked(&b->io.cl)); 565
566 BUG_ON(b->io_mutex.count != 1);
558 567
559 if (bset_prev_bytes(b) < PAGE_SIZE) 568 if (bset_prev_bytes(b) < PAGE_SIZE)
560 kfree(t->prev); 569 kfree(t->prev);
@@ -635,7 +644,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
635 INIT_LIST_HEAD(&b->list); 644 INIT_LIST_HEAD(&b->list);
636 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 645 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
637 b->c = c; 646 b->c = c;
638 closure_init_unlocked(&b->io); 647 sema_init(&b->io_mutex, 1);
639 648
640 mca_data_alloc(b, k, gfp); 649 mca_data_alloc(b, k, gfp);
641 return b; 650 return b;
@@ -653,22 +662,29 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
653 662
654 BUG_ON(btree_node_dirty(b) && !b->sets[0].data); 663 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
655 664
656 if (b->page_order < min_order || 665 if (b->page_order < min_order)
657 (!flush && 666 goto out_unlock;
658 (btree_node_dirty(b) || 667
659 atomic_read(&b->io.cl.remaining) != -1))) { 668 if (!flush) {
660 rw_unlock(true, b); 669 if (btree_node_dirty(b))
661 return -ENOMEM; 670 goto out_unlock;
671
672 if (down_trylock(&b->io_mutex))
673 goto out_unlock;
674 up(&b->io_mutex);
662 } 675 }
663 676
664 if (btree_node_dirty(b)) 677 if (btree_node_dirty(b))
665 bch_btree_node_write_sync(b); 678 bch_btree_node_write_sync(b);
666 679
667 /* wait for any in flight btree write */ 680 /* wait for any in flight btree write */
668 closure_wait_event(&b->io.wait, &cl, 681 down(&b->io_mutex);
669 atomic_read(&b->io.cl.remaining) == -1); 682 up(&b->io_mutex);
670 683
671 return 0; 684 return 0;
685out_unlock:
686 rw_unlock(true, b);
687 return -ENOMEM;
672} 688}
673 689
674static unsigned long bch_mca_scan(struct shrinker *shrink, 690static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -918,7 +934,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
918 if (!b->sets->data) 934 if (!b->sets->data)
919 goto err; 935 goto err;
920out: 936out:
921 BUG_ON(!closure_is_unlocked(&b->io.cl)); 937 BUG_ON(b->io_mutex.count != 1);
922 938
923 bkey_copy(&b->key, k); 939 bkey_copy(&b->key, k);
924 list_move(&b->list, &c->btree_cache); 940 list_move(&b->list, &c->btree_cache);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 767e75570896..d68af7442f70 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -143,7 +143,8 @@ struct btree {
143 struct bset_tree sets[MAX_BSETS]; 143 struct bset_tree sets[MAX_BSETS];
144 144
145 /* For outstanding btree writes, used as a lock - protects write_idx */ 145 /* For outstanding btree writes, used as a lock - protects write_idx */
146 struct closure_with_waitlist io; 146 struct closure io;
147 struct semaphore io_mutex;
147 148
148 struct list_head list; 149 struct list_head list;
149 struct delayed_work work; 150 struct delayed_work work;
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 03cb4d114e16..fab3767d6d28 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -127,9 +127,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
127 if (!b->c->verify) 127 if (!b->c->verify)
128 return; 128 return;
129 129
130 closure_wait_event(&b->io.wait, &cl, 130 down(&b->io_mutex);
131 atomic_read(&b->io.cl.remaining) == -1);
132
133 mutex_lock(&b->c->verify_lock); 131 mutex_lock(&b->c->verify_lock);
134 132
135 bkey_copy(&v->key, &b->key); 133 bkey_copy(&v->key, &b->key);
@@ -137,8 +135,6 @@ void bch_btree_verify(struct btree *b, struct bset *new)
137 v->level = b->level; 135 v->level = b->level;
138 136
139 bch_btree_node_read(v); 137 bch_btree_node_read(v);
140 closure_wait_event(&v->io.wait, &cl,
141 atomic_read(&b->io.cl.remaining) == -1);
142 138
143 if (new->keys != v->sets[0].data->keys || 139 if (new->keys != v->sets[0].data->keys ||
144 memcmp(new->start, 140 memcmp(new->start,
@@ -167,6 +163,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
167 } 163 }
168 164
169 mutex_unlock(&b->c->verify_lock); 165 mutex_unlock(&b->c->verify_lock);
166 up(&b->io_mutex);
170} 167}
171 168
172void bch_data_verify(struct cached_dev *dc, struct bio *bio) 169void bch_data_verify(struct cached_dev *dc, struct bio *bio)
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ad687285c2df..9d32d5790822 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -564,6 +564,14 @@ static void journal_write_done(struct closure *cl)
564 continue_at_nobarrier(cl, journal_write, system_wq); 564 continue_at_nobarrier(cl, journal_write, system_wq);
565} 565}
566 566
567static void journal_write_unlock(struct closure *cl)
568{
569 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
570
571 c->journal.io_in_flight = 0;
572 spin_unlock(&c->journal.lock);
573}
574
567static void journal_write_unlocked(struct closure *cl) 575static void journal_write_unlocked(struct closure *cl)
568 __releases(c->journal.lock) 576 __releases(c->journal.lock)
569{ 577{
@@ -578,15 +586,7 @@ static void journal_write_unlocked(struct closure *cl)
578 bio_list_init(&list); 586 bio_list_init(&list);
579 587
580 if (!w->need_write) { 588 if (!w->need_write) {
581 /* 589 closure_return_with_destructor(cl, journal_write_unlock);
582 * XXX: have to unlock closure before we unlock journal lock,
583 * else we race with bch_journal(). But this way we race
584 * against cache set unregister. Doh.
585 */
586 set_closure_fn(cl, NULL, NULL);
587 closure_sub(cl, CLOSURE_RUNNING + 1);
588 spin_unlock(&c->journal.lock);
589 return;
590 } else if (journal_full(&c->journal)) { 590 } else if (journal_full(&c->journal)) {
591 journal_reclaim(c); 591 journal_reclaim(c);
592 spin_unlock(&c->journal.lock); 592 spin_unlock(&c->journal.lock);
@@ -662,10 +662,12 @@ static void journal_try_write(struct cache_set *c)
662 662
663 w->need_write = true; 663 w->need_write = true;
664 664
665 if (closure_trylock(cl, &c->cl)) 665 if (!c->journal.io_in_flight) {
666 journal_write_unlocked(cl); 666 c->journal.io_in_flight = 1;
667 else 667 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
668 } else {
668 spin_unlock(&c->journal.lock); 669 spin_unlock(&c->journal.lock);
670 }
669} 671}
670 672
671static struct journal_write *journal_wait_for_write(struct cache_set *c, 673static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -793,7 +795,6 @@ int bch_journal_alloc(struct cache_set *c)
793{ 795{
794 struct journal *j = &c->journal; 796 struct journal *j = &c->journal;
795 797
796 closure_init_unlocked(&j->io);
797 spin_lock_init(&j->lock); 798 spin_lock_init(&j->lock);
798 INIT_DELAYED_WORK(&j->work, journal_write_work); 799 INIT_DELAYED_WORK(&j->work, journal_write_work);
799 800
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index a6472fda94b2..9180c4465075 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -104,6 +104,7 @@ struct journal {
104 /* used when waiting because the journal was full */ 104 /* used when waiting because the journal was full */
105 struct closure_waitlist wait; 105 struct closure_waitlist wait;
106 struct closure io; 106 struct closure io;
107 int io_in_flight;
107 struct delayed_work work; 108 struct delayed_work work;
108 109
109 /* Number of blocks free in the bucket(s) we're currently writing to */ 110 /* Number of blocks free in the bucket(s) we're currently writing to */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 93d593f957f6..b057676fc67d 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -225,7 +225,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
225 struct cached_dev *dc = bio->bi_private; 225 struct cached_dev *dc = bio->bi_private;
226 /* XXX: error checking */ 226 /* XXX: error checking */
227 227
228 closure_put(&dc->sb_write.cl); 228 closure_put(&dc->sb_write);
229} 229}
230 230
231static void __write_super(struct cache_sb *sb, struct bio *bio) 231static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -263,12 +263,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
263 submit_bio(REQ_WRITE, bio); 263 submit_bio(REQ_WRITE, bio);
264} 264}
265 265
266static void bch_write_bdev_super_unlock(struct closure *cl)
267{
268 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
269
270 up(&dc->sb_write_mutex);
271}
272
266void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 273void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
267{ 274{
268 struct closure *cl = &dc->sb_write.cl; 275 struct closure *cl = &dc->sb_write;
269 struct bio *bio = &dc->sb_bio; 276 struct bio *bio = &dc->sb_bio;
270 277
271 closure_lock(&dc->sb_write, parent); 278 down(&dc->sb_write_mutex);
279 closure_init(cl, parent);
272 280
273 bio_reset(bio); 281 bio_reset(bio);
274 bio->bi_bdev = dc->bdev; 282 bio->bi_bdev = dc->bdev;
@@ -278,7 +286,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
278 closure_get(cl); 286 closure_get(cl);
279 __write_super(&dc->sb, bio); 287 __write_super(&dc->sb, bio);
280 288
281 closure_return(cl); 289 closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
282} 290}
283 291
284static void write_super_endio(struct bio *bio, int error) 292static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +294,24 @@ static void write_super_endio(struct bio *bio, int error)
286 struct cache *ca = bio->bi_private; 294 struct cache *ca = bio->bi_private;
287 295
288 bch_count_io_errors(ca, error, "writing superblock"); 296 bch_count_io_errors(ca, error, "writing superblock");
289 closure_put(&ca->set->sb_write.cl); 297 closure_put(&ca->set->sb_write);
298}
299
300static void bcache_write_super_unlock(struct closure *cl)
301{
302 struct cache_set *c = container_of(cl, struct cache_set, sb_write);
303
304 up(&c->sb_write_mutex);
290} 305}
291 306
292void bcache_write_super(struct cache_set *c) 307void bcache_write_super(struct cache_set *c)
293{ 308{
294 struct closure *cl = &c->sb_write.cl; 309 struct closure *cl = &c->sb_write;
295 struct cache *ca; 310 struct cache *ca;
296 unsigned i; 311 unsigned i;
297 312
298 closure_lock(&c->sb_write, &c->cl); 313 down(&c->sb_write_mutex);
314 closure_init(cl, &c->cl);
299 315
300 c->sb.seq++; 316 c->sb.seq++;
301 317
@@ -317,7 +333,7 @@ void bcache_write_super(struct cache_set *c)
317 __write_super(&ca->sb, bio); 333 __write_super(&ca->sb, bio);
318 } 334 }
319 335
320 closure_return(cl); 336 closure_return_with_destructor(cl, bcache_write_super_unlock);
321} 337}
322 338
323/* UUID io */ 339/* UUID io */
@@ -325,23 +341,31 @@ void bcache_write_super(struct cache_set *c)
325static void uuid_endio(struct bio *bio, int error) 341static void uuid_endio(struct bio *bio, int error)
326{ 342{
327 struct closure *cl = bio->bi_private; 343 struct closure *cl = bio->bi_private;
328 struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); 344 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
329 345
330 cache_set_err_on(error, c, "accessing uuids"); 346 cache_set_err_on(error, c, "accessing uuids");
331 bch_bbio_free(bio, c); 347 bch_bbio_free(bio, c);
332 closure_put(cl); 348 closure_put(cl);
333} 349}
334 350
351static void uuid_io_unlock(struct closure *cl)
352{
353 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
354
355 up(&c->uuid_write_mutex);
356}
357
335static void uuid_io(struct cache_set *c, unsigned long rw, 358static void uuid_io(struct cache_set *c, unsigned long rw,
336 struct bkey *k, struct closure *parent) 359 struct bkey *k, struct closure *parent)
337{ 360{
338 struct closure *cl = &c->uuid_write.cl; 361 struct closure *cl = &c->uuid_write;
339 struct uuid_entry *u; 362 struct uuid_entry *u;
340 unsigned i; 363 unsigned i;
341 char buf[80]; 364 char buf[80];
342 365
343 BUG_ON(!parent); 366 BUG_ON(!parent);
344 closure_lock(&c->uuid_write, parent); 367 down(&c->uuid_write_mutex);
368 closure_init(cl, parent);
345 369
346 for (i = 0; i < KEY_PTRS(k); i++) { 370 for (i = 0; i < KEY_PTRS(k); i++) {
347 struct bio *bio = bch_bbio_alloc(c); 371 struct bio *bio = bch_bbio_alloc(c);
@@ -368,7 +392,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
368 u - c->uuids, u->uuid, u->label, 392 u - c->uuids, u->uuid, u->label,
369 u->first_reg, u->last_reg, u->invalidated); 393 u->first_reg, u->last_reg, u->invalidated);
370 394
371 closure_return(cl); 395 closure_return_with_destructor(cl, uuid_io_unlock);
372} 396}
373 397
374static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 398static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
@@ -1098,7 +1122,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1098 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1122 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1099 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1123 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1100 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1124 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1101 closure_init_unlocked(&dc->sb_write); 1125 sema_init(&dc->sb_write_mutex, 1);
1102 INIT_LIST_HEAD(&dc->io_lru); 1126 INIT_LIST_HEAD(&dc->io_lru);
1103 spin_lock_init(&dc->io_lock); 1127 spin_lock_init(&dc->io_lock);
1104 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1128 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1454,11 +1478,11 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1454 1478
1455 c->sort_crit_factor = int_sqrt(c->btree_pages); 1479 c->sort_crit_factor = int_sqrt(c->btree_pages);
1456 1480
1457 closure_init_unlocked(&c->sb_write); 1481 sema_init(&c->sb_write_mutex, 1);
1458 mutex_init(&c->bucket_lock); 1482 mutex_init(&c->bucket_lock);
1459 init_waitqueue_head(&c->try_wait); 1483 init_waitqueue_head(&c->try_wait);
1460 init_waitqueue_head(&c->bucket_wait); 1484 init_waitqueue_head(&c->bucket_wait);
1461 closure_init_unlocked(&c->uuid_write); 1485 sema_init(&c->uuid_write_mutex, 1);
1462 mutex_init(&c->sort_lock); 1486 mutex_init(&c->sort_lock);
1463 1487
1464 spin_lock_init(&c->sort_time.lock); 1488 spin_lock_init(&c->sort_time.lock);