diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-24 20:19:26 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:56:04 -0500 |
commit | 72a44517f3ca3725dc86081d105457df46448679 (patch) | |
tree | 247b859af6c4377ab1ea90fd1fd4e64278ccdbbd /drivers/md | |
parent | 35fcd848d72683141052aa9880542461577f2dbe (diff) |
bcache: Convert gc to a kthread
We needed a dedicated rescuer workqueue for gc anyways... and gc was
conceptually a dedicated thread, just one that wasn't running all the
time. Switch it to a dedicated thread to make the code a bit more
straightforward.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/alloc.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/bcache.h | 9 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 50 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 10 | ||||
-rw-r--r-- | drivers/md/bcache/movinggc.c | 35 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 20 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 2 |
8 files changed, 74 insertions, 60 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 1b64e662e81b..b9bd5866055d 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
@@ -210,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca) | |||
210 | * multiple times when it can't do anything | 210 | * multiple times when it can't do anything |
211 | */ | 211 | */ |
212 | ca->invalidate_needs_gc = 1; | 212 | ca->invalidate_needs_gc = 1; |
213 | bch_queue_gc(ca->set); | 213 | wake_up_gc(ca->set); |
214 | return; | 214 | return; |
215 | } | 215 | } |
216 | 216 | ||
@@ -235,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca) | |||
235 | 235 | ||
236 | if (++checked >= ca->sb.nbuckets) { | 236 | if (++checked >= ca->sb.nbuckets) { |
237 | ca->invalidate_needs_gc = 1; | 237 | ca->invalidate_needs_gc = 1; |
238 | bch_queue_gc(ca->set); | 238 | wake_up_gc(ca->set); |
239 | return; | 239 | return; |
240 | } | 240 | } |
241 | } | 241 | } |
@@ -260,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca) | |||
260 | 260 | ||
261 | if (++checked >= ca->sb.nbuckets / 2) { | 261 | if (++checked >= ca->sb.nbuckets / 2) { |
262 | ca->invalidate_needs_gc = 1; | 262 | ca->invalidate_needs_gc = 1; |
263 | bch_queue_gc(ca->set); | 263 | wake_up_gc(ca->set); |
264 | return; | 264 | return; |
265 | } | 265 | } |
266 | } | 266 | } |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d3520748bc27..09410eb07d82 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -773,7 +773,7 @@ struct cache_set { | |||
773 | struct gc_stat gc_stats; | 773 | struct gc_stat gc_stats; |
774 | size_t nbuckets; | 774 | size_t nbuckets; |
775 | 775 | ||
776 | struct closure_with_waitlist gc; | 776 | struct task_struct *gc_thread; |
777 | /* Where in the btree gc currently is */ | 777 | /* Where in the btree gc currently is */ |
778 | struct bkey gc_done; | 778 | struct bkey gc_done; |
779 | 779 | ||
@@ -786,11 +786,10 @@ struct cache_set { | |||
786 | /* Counts how many sectors bio_insert has added to the cache */ | 786 | /* Counts how many sectors bio_insert has added to the cache */ |
787 | atomic_t sectors_to_gc; | 787 | atomic_t sectors_to_gc; |
788 | 788 | ||
789 | struct closure moving_gc; | 789 | wait_queue_head_t moving_gc_wait; |
790 | struct closure_waitlist moving_gc_wait; | ||
791 | struct keybuf moving_gc_keys; | 790 | struct keybuf moving_gc_keys; |
792 | /* Number of moving GC bios in flight */ | 791 | /* Number of moving GC bios in flight */ |
793 | atomic_t in_flight; | 792 | struct semaphore moving_in_flight; |
794 | 793 | ||
795 | struct btree *root; | 794 | struct btree *root; |
796 | 795 | ||
@@ -1176,7 +1175,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...); | |||
1176 | void bch_prio_write(struct cache *); | 1175 | void bch_prio_write(struct cache *); |
1177 | void bch_write_bdev_super(struct cached_dev *, struct closure *); | 1176 | void bch_write_bdev_super(struct cached_dev *, struct closure *); |
1178 | 1177 | ||
1179 | extern struct workqueue_struct *bcache_wq, *bch_gc_wq; | 1178 | extern struct workqueue_struct *bcache_wq; |
1180 | extern const char * const bch_cache_modes[]; | 1179 | extern const char * const bch_cache_modes[]; |
1181 | extern struct mutex bch_register_lock; | 1180 | extern struct mutex bch_register_lock; |
1182 | extern struct list_head bch_cache_sets; | 1181 | extern struct list_head bch_cache_sets; |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 935d90df397b..17bfd87fc8f4 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -28,7 +28,9 @@ | |||
28 | 28 | ||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | #include <linux/freezer.h> | ||
31 | #include <linux/hash.h> | 32 | #include <linux/hash.h> |
33 | #include <linux/kthread.h> | ||
32 | #include <linux/prefetch.h> | 34 | #include <linux/prefetch.h> |
33 | #include <linux/random.h> | 35 | #include <linux/random.h> |
34 | #include <linux/rcupdate.h> | 36 | #include <linux/rcupdate.h> |
@@ -105,7 +107,6 @@ static const char *op_type(struct btree_op *op) | |||
105 | #define PTR_HASH(c, k) \ | 107 | #define PTR_HASH(c, k) \ |
106 | (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) | 108 | (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) |
107 | 109 | ||
108 | struct workqueue_struct *bch_gc_wq; | ||
109 | static struct workqueue_struct *btree_io_wq; | 110 | static struct workqueue_struct *btree_io_wq; |
110 | 111 | ||
111 | void bch_btree_op_init_stack(struct btree_op *op) | 112 | void bch_btree_op_init_stack(struct btree_op *op) |
@@ -732,12 +733,9 @@ int bch_btree_cache_alloc(struct cache_set *c) | |||
732 | { | 733 | { |
733 | unsigned i; | 734 | unsigned i; |
734 | 735 | ||
735 | /* XXX: doesn't check for errors */ | ||
736 | |||
737 | closure_init_unlocked(&c->gc); | ||
738 | |||
739 | for (i = 0; i < mca_reserve(c); i++) | 736 | for (i = 0; i < mca_reserve(c); i++) |
740 | mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); | 737 | if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) |
738 | return -ENOMEM; | ||
741 | 739 | ||
742 | list_splice_init(&c->btree_cache, | 740 | list_splice_init(&c->btree_cache, |
743 | &c->btree_cache_freeable); | 741 | &c->btree_cache_freeable); |
@@ -1456,9 +1454,8 @@ size_t bch_btree_gc_finish(struct cache_set *c) | |||
1456 | return available; | 1454 | return available; |
1457 | } | 1455 | } |
1458 | 1456 | ||
1459 | static void bch_btree_gc(struct closure *cl) | 1457 | static void bch_btree_gc(struct cache_set *c) |
1460 | { | 1458 | { |
1461 | struct cache_set *c = container_of(cl, struct cache_set, gc.cl); | ||
1462 | int ret; | 1459 | int ret; |
1463 | unsigned long available; | 1460 | unsigned long available; |
1464 | struct gc_stat stats; | 1461 | struct gc_stat stats; |
@@ -1483,7 +1480,7 @@ static void bch_btree_gc(struct closure *cl) | |||
1483 | 1480 | ||
1484 | if (ret) { | 1481 | if (ret) { |
1485 | pr_warn("gc failed!"); | 1482 | pr_warn("gc failed!"); |
1486 | continue_at(cl, bch_btree_gc, bch_gc_wq); | 1483 | return; |
1487 | } | 1484 | } |
1488 | 1485 | ||
1489 | /* Possibly wait for new UUIDs or whatever to hit disk */ | 1486 | /* Possibly wait for new UUIDs or whatever to hit disk */ |
@@ -1505,12 +1502,35 @@ static void bch_btree_gc(struct closure *cl) | |||
1505 | 1502 | ||
1506 | trace_bcache_gc_end(c); | 1503 | trace_bcache_gc_end(c); |
1507 | 1504 | ||
1508 | continue_at(cl, bch_moving_gc, bch_gc_wq); | 1505 | bch_moving_gc(c); |
1506 | } | ||
1507 | |||
1508 | static int bch_gc_thread(void *arg) | ||
1509 | { | ||
1510 | struct cache_set *c = arg; | ||
1511 | |||
1512 | while (1) { | ||
1513 | bch_btree_gc(c); | ||
1514 | |||
1515 | set_current_state(TASK_INTERRUPTIBLE); | ||
1516 | if (kthread_should_stop()) | ||
1517 | break; | ||
1518 | |||
1519 | try_to_freeze(); | ||
1520 | schedule(); | ||
1521 | } | ||
1522 | |||
1523 | return 0; | ||
1509 | } | 1524 | } |
1510 | 1525 | ||
1511 | void bch_queue_gc(struct cache_set *c) | 1526 | int bch_gc_thread_start(struct cache_set *c) |
1512 | { | 1527 | { |
1513 | closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl); | 1528 | c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); |
1529 | if (IS_ERR(c->gc_thread)) | ||
1530 | return PTR_ERR(c->gc_thread); | ||
1531 | |||
1532 | set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); | ||
1533 | return 0; | ||
1514 | } | 1534 | } |
1515 | 1535 | ||
1516 | /* Initial partial gc */ | 1536 | /* Initial partial gc */ |
@@ -2480,14 +2500,12 @@ void bch_btree_exit(void) | |||
2480 | { | 2500 | { |
2481 | if (btree_io_wq) | 2501 | if (btree_io_wq) |
2482 | destroy_workqueue(btree_io_wq); | 2502 | destroy_workqueue(btree_io_wq); |
2483 | if (bch_gc_wq) | ||
2484 | destroy_workqueue(bch_gc_wq); | ||
2485 | } | 2503 | } |
2486 | 2504 | ||
2487 | int __init bch_btree_init(void) | 2505 | int __init bch_btree_init(void) |
2488 | { | 2506 | { |
2489 | if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) || | 2507 | btree_io_wq = create_singlethread_workqueue("bch_btree_io"); |
2490 | !(btree_io_wq = create_singlethread_workqueue("bch_btree_io"))) | 2508 | if (!btree_io_wq) |
2491 | return -ENOMEM; | 2509 | return -ENOMEM; |
2492 | 2510 | ||
2493 | return 0; | 2511 | return 0; |
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index d691d954730e..fa9641aaed39 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -388,12 +388,18 @@ int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *); | |||
388 | 388 | ||
389 | int bch_btree_search_recurse(struct btree *, struct btree_op *); | 389 | int bch_btree_search_recurse(struct btree *, struct btree_op *); |
390 | 390 | ||
391 | void bch_queue_gc(struct cache_set *); | 391 | int bch_gc_thread_start(struct cache_set *); |
392 | size_t bch_btree_gc_finish(struct cache_set *); | 392 | size_t bch_btree_gc_finish(struct cache_set *); |
393 | void bch_moving_gc(struct closure *); | 393 | void bch_moving_gc(struct cache_set *); |
394 | int bch_btree_check(struct cache_set *, struct btree_op *); | 394 | int bch_btree_check(struct cache_set *, struct btree_op *); |
395 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); | 395 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); |
396 | 396 | ||
397 | static inline void wake_up_gc(struct cache_set *c) | ||
398 | { | ||
399 | if (c->gc_thread) | ||
400 | wake_up_process(c->gc_thread); | ||
401 | } | ||
402 | |||
397 | void bch_keybuf_init(struct keybuf *); | 403 | void bch_keybuf_init(struct keybuf *); |
398 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, | 404 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, |
399 | keybuf_pred_fn *); | 405 | keybuf_pred_fn *); |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 2c42377a65aa..6ba050456ec8 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
@@ -57,8 +57,7 @@ static void write_moving_finish(struct closure *cl) | |||
57 | 57 | ||
58 | bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); | 58 | bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); |
59 | 59 | ||
60 | atomic_dec_bug(&io->s.op.c->in_flight); | 60 | up(&io->s.op.c->moving_in_flight); |
61 | closure_wake_up(&io->s.op.c->moving_gc_wait); | ||
62 | 61 | ||
63 | closure_return_with_destructor(cl, moving_io_destructor); | 62 | closure_return_with_destructor(cl, moving_io_destructor); |
64 | } | 63 | } |
@@ -113,7 +112,7 @@ static void write_moving(struct closure *cl) | |||
113 | bch_data_insert(&s->op.cl); | 112 | bch_data_insert(&s->op.cl); |
114 | } | 113 | } |
115 | 114 | ||
116 | continue_at(cl, write_moving_finish, bch_gc_wq); | 115 | continue_at(cl, write_moving_finish, system_wq); |
117 | } | 116 | } |
118 | 117 | ||
119 | static void read_moving_submit(struct closure *cl) | 118 | static void read_moving_submit(struct closure *cl) |
@@ -124,15 +123,17 @@ static void read_moving_submit(struct closure *cl) | |||
124 | 123 | ||
125 | bch_submit_bbio(bio, s->op.c, &io->w->key, 0); | 124 | bch_submit_bbio(bio, s->op.c, &io->w->key, 0); |
126 | 125 | ||
127 | continue_at(cl, write_moving, bch_gc_wq); | 126 | continue_at(cl, write_moving, system_wq); |
128 | } | 127 | } |
129 | 128 | ||
130 | static void read_moving(struct closure *cl) | 129 | static void read_moving(struct cache_set *c) |
131 | { | 130 | { |
132 | struct cache_set *c = container_of(cl, struct cache_set, moving_gc); | ||
133 | struct keybuf_key *w; | 131 | struct keybuf_key *w; |
134 | struct moving_io *io; | 132 | struct moving_io *io; |
135 | struct bio *bio; | 133 | struct bio *bio; |
134 | struct closure cl; | ||
135 | |||
136 | closure_init_stack(&cl); | ||
136 | 137 | ||
137 | /* XXX: if we error, background writeback could stall indefinitely */ | 138 | /* XXX: if we error, background writeback could stall indefinitely */ |
138 | 139 | ||
@@ -164,13 +165,8 @@ static void read_moving(struct closure *cl) | |||
164 | 165 | ||
165 | trace_bcache_gc_copy(&w->key); | 166 | trace_bcache_gc_copy(&w->key); |
166 | 167 | ||
167 | closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); | 168 | down(&c->moving_in_flight); |
168 | 169 | closure_call(&io->s.cl, read_moving_submit, NULL, &cl); | |
169 | if (atomic_inc_return(&c->in_flight) >= 64) { | ||
170 | closure_wait_event(&c->moving_gc_wait, cl, | ||
171 | atomic_read(&c->in_flight) < 64); | ||
172 | continue_at(cl, read_moving, bch_gc_wq); | ||
173 | } | ||
174 | } | 170 | } |
175 | 171 | ||
176 | if (0) { | 172 | if (0) { |
@@ -180,7 +176,7 @@ err: if (!IS_ERR_OR_NULL(w->private)) | |||
180 | bch_keybuf_del(&c->moving_gc_keys, w); | 176 | bch_keybuf_del(&c->moving_gc_keys, w); |
181 | } | 177 | } |
182 | 178 | ||
183 | closure_return(cl); | 179 | closure_sync(&cl); |
184 | } | 180 | } |
185 | 181 | ||
186 | static bool bucket_cmp(struct bucket *l, struct bucket *r) | 182 | static bool bucket_cmp(struct bucket *l, struct bucket *r) |
@@ -193,15 +189,14 @@ static unsigned bucket_heap_top(struct cache *ca) | |||
193 | return GC_SECTORS_USED(heap_peek(&ca->heap)); | 189 | return GC_SECTORS_USED(heap_peek(&ca->heap)); |
194 | } | 190 | } |
195 | 191 | ||
196 | void bch_moving_gc(struct closure *cl) | 192 | void bch_moving_gc(struct cache_set *c) |
197 | { | 193 | { |
198 | struct cache_set *c = container_of(cl, struct cache_set, gc.cl); | ||
199 | struct cache *ca; | 194 | struct cache *ca; |
200 | struct bucket *b; | 195 | struct bucket *b; |
201 | unsigned i; | 196 | unsigned i; |
202 | 197 | ||
203 | if (!c->copy_gc_enabled) | 198 | if (!c->copy_gc_enabled) |
204 | closure_return(cl); | 199 | return; |
205 | 200 | ||
206 | mutex_lock(&c->bucket_lock); | 201 | mutex_lock(&c->bucket_lock); |
207 | 202 | ||
@@ -242,13 +237,11 @@ void bch_moving_gc(struct closure *cl) | |||
242 | 237 | ||
243 | c->moving_gc_keys.last_scanned = ZERO_KEY; | 238 | c->moving_gc_keys.last_scanned = ZERO_KEY; |
244 | 239 | ||
245 | closure_init(&c->moving_gc, cl); | 240 | read_moving(c); |
246 | read_moving(&c->moving_gc); | ||
247 | |||
248 | closure_return(cl); | ||
249 | } | 241 | } |
250 | 242 | ||
251 | void bch_moving_init_cache_set(struct cache_set *c) | 243 | void bch_moving_init_cache_set(struct cache_set *c) |
252 | { | 244 | { |
253 | bch_keybuf_init(&c->moving_gc_keys); | 245 | bch_keybuf_init(&c->moving_gc_keys); |
246 | sema_init(&c->moving_in_flight, 64); | ||
254 | } | 247 | } |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 26d18f4bf4a0..f779eb420d69 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -520,7 +520,7 @@ static void bch_data_insert_start(struct closure *cl) | |||
520 | 520 | ||
521 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { | 521 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { |
522 | set_gc_sectors(op->c); | 522 | set_gc_sectors(op->c); |
523 | bch_queue_gc(op->c); | 523 | wake_up_gc(op->c); |
524 | } | 524 | } |
525 | 525 | ||
526 | /* | 526 | /* |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 84398a82fbe3..f89e2296bde1 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -1342,6 +1342,9 @@ static void cache_set_flush(struct closure *cl) | |||
1342 | kobject_put(&c->internal); | 1342 | kobject_put(&c->internal); |
1343 | kobject_del(&c->kobj); | 1343 | kobject_del(&c->kobj); |
1344 | 1344 | ||
1345 | if (c->gc_thread) | ||
1346 | kthread_stop(c->gc_thread); | ||
1347 | |||
1345 | if (!IS_ERR_OR_NULL(c->root)) | 1348 | if (!IS_ERR_OR_NULL(c->root)) |
1346 | list_add(&c->root->list, &c->btree_cache); | 1349 | list_add(&c->root->list, &c->btree_cache); |
1347 | 1350 | ||
@@ -1579,8 +1582,6 @@ static void run_cache_set(struct cache_set *c) | |||
1579 | bch_journal_replay(c, &journal, &op); | 1582 | bch_journal_replay(c, &journal, &op); |
1580 | } else { | 1583 | } else { |
1581 | pr_notice("invalidating existing data"); | 1584 | pr_notice("invalidating existing data"); |
1582 | /* Don't want invalidate_buckets() to queue a gc yet */ | ||
1583 | closure_lock(&c->gc, NULL); | ||
1584 | 1585 | ||
1585 | for_each_cache(ca, c, i) { | 1586 | for_each_cache(ca, c, i) { |
1586 | unsigned j; | 1587 | unsigned j; |
@@ -1606,12 +1607,12 @@ static void run_cache_set(struct cache_set *c) | |||
1606 | 1607 | ||
1607 | err = "cannot allocate new UUID bucket"; | 1608 | err = "cannot allocate new UUID bucket"; |
1608 | if (__uuid_write(c)) | 1609 | if (__uuid_write(c)) |
1609 | goto err_unlock_gc; | 1610 | goto err; |
1610 | 1611 | ||
1611 | err = "cannot allocate new btree root"; | 1612 | err = "cannot allocate new btree root"; |
1612 | c->root = bch_btree_node_alloc(c, 0); | 1613 | c->root = bch_btree_node_alloc(c, 0); |
1613 | if (IS_ERR_OR_NULL(c->root)) | 1614 | if (IS_ERR_OR_NULL(c->root)) |
1614 | goto err_unlock_gc; | 1615 | goto err; |
1615 | 1616 | ||
1616 | bkey_copy_key(&c->root->key, &MAX_KEY); | 1617 | bkey_copy_key(&c->root->key, &MAX_KEY); |
1617 | bch_btree_node_write(c->root, &op.cl); | 1618 | bch_btree_node_write(c->root, &op.cl); |
@@ -1628,12 +1629,12 @@ static void run_cache_set(struct cache_set *c) | |||
1628 | 1629 | ||
1629 | bch_journal_next(&c->journal); | 1630 | bch_journal_next(&c->journal); |
1630 | bch_journal_meta(c, &op.cl); | 1631 | bch_journal_meta(c, &op.cl); |
1631 | |||
1632 | /* Unlock */ | ||
1633 | closure_set_stopped(&c->gc.cl); | ||
1634 | closure_put(&c->gc.cl); | ||
1635 | } | 1632 | } |
1636 | 1633 | ||
1634 | err = "error starting gc thread"; | ||
1635 | if (bch_gc_thread_start(c)) | ||
1636 | goto err; | ||
1637 | |||
1637 | closure_sync(&op.cl); | 1638 | closure_sync(&op.cl); |
1638 | c->sb.last_mount = get_seconds(); | 1639 | c->sb.last_mount = get_seconds(); |
1639 | bcache_write_super(c); | 1640 | bcache_write_super(c); |
@@ -1644,9 +1645,6 @@ static void run_cache_set(struct cache_set *c) | |||
1644 | flash_devs_run(c); | 1645 | flash_devs_run(c); |
1645 | 1646 | ||
1646 | return; | 1647 | return; |
1647 | err_unlock_gc: | ||
1648 | closure_set_stopped(&c->gc.cl); | ||
1649 | closure_put(&c->gc.cl); | ||
1650 | err: | 1648 | err: |
1651 | closure_sync(&op.cl); | 1649 | closure_sync(&op.cl); |
1652 | /* XXX: test this, it's broken */ | 1650 | /* XXX: test this, it's broken */ |
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index b3a66f17231d..ab286b9b5e40 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
@@ -566,7 +566,7 @@ STORE(__bch_cache_set) | |||
566 | } | 566 | } |
567 | 567 | ||
568 | if (attr == &sysfs_trigger_gc) | 568 | if (attr == &sysfs_trigger_gc) |
569 | bch_queue_gc(c); | 569 | wake_up_gc(c); |
570 | 570 | ||
571 | if (attr == &sysfs_prune_cache) { | 571 | if (attr == &sysfs_prune_cache) { |
572 | struct shrink_control sc; | 572 | struct shrink_control sc; |