aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-07-16 23:10:18 -0400
committerJens Axboe <axboe@kernel.dk>2013-07-16 23:10:18 -0400
commit0878ae2db83a10894724cdeaba7ef9f1ac1c9ac8 (patch)
treea0c6d8f2505eb8f499022acbdd856baefe9e5f3d
parentd0e3d0238d83b05d7846c7281524e0f814633dbd (diff)
parent79826c35eb99cd3c0873b8396f45fa26c87fb0b0 (diff)
Merge branch 'bcache-for-3.11' of git://evilpiepirate.org/~kent/linux-bcache into for-3.11/drivers
Kent writes: Hey Jens - I've been busy torture testing and chasing bugs, here's the fruits of my labors. These are all fairly small fixes, some of them quite important.
-rw-r--r--drivers/md/bcache/alloc.c18
-rw-r--r--drivers/md/bcache/bcache.h5
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/closure.c6
-rw-r--r--drivers/md/bcache/journal.c7
-rw-r--r--drivers/md/bcache/request.c8
-rw-r--r--drivers/md/bcache/super.c42
-rw-r--r--drivers/md/bcache/sysfs.c2
8 files changed, 61 insertions, 31 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index b54b73b9b2b7..e45f5575fd4d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,6 +63,7 @@
63#include "bcache.h" 63#include "bcache.h"
64#include "btree.h" 64#include "btree.h"
65 65
66#include <linux/freezer.h>
66#include <linux/kthread.h> 67#include <linux/kthread.h>
67#include <linux/random.h> 68#include <linux/random.h>
68#include <trace/events/bcache.h> 69#include <trace/events/bcache.h>
@@ -363,11 +364,10 @@ do { \
363 break; \ 364 break; \
364 \ 365 \
365 mutex_unlock(&(ca)->set->bucket_lock); \ 366 mutex_unlock(&(ca)->set->bucket_lock); \
366 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ 367 if (kthread_should_stop()) \
367 closure_put(&ca->set->cl); \
368 return 0; \ 368 return 0; \
369 } \
370 \ 369 \
370 try_to_freeze(); \
371 schedule(); \ 371 schedule(); \
372 mutex_lock(&(ca)->set->bucket_lock); \ 372 mutex_lock(&(ca)->set->bucket_lock); \
373 } \ 373 } \
@@ -547,14 +547,12 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
547 547
548int bch_cache_allocator_start(struct cache *ca) 548int bch_cache_allocator_start(struct cache *ca)
549{ 549{
550 ca->alloc_thread = kthread_create(bch_allocator_thread, 550 struct task_struct *k = kthread_run(bch_allocator_thread,
551 ca, "bcache_allocator"); 551 ca, "bcache_allocator");
552 if (IS_ERR(ca->alloc_thread)) 552 if (IS_ERR(k))
553 return PTR_ERR(ca->alloc_thread); 553 return PTR_ERR(k);
554
555 closure_get(&ca->set->cl);
556 wake_up_process(ca->alloc_thread);
557 554
555 ca->alloc_thread = k;
558 return 0; 556 return 0;
559} 557}
560 558
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 342ba86c6e4f..b39f6f0b45f2 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -434,6 +434,7 @@ struct bcache_device {
434 434
435 /* If nonzero, we're detaching/unregistering from cache set */ 435 /* If nonzero, we're detaching/unregistering from cache set */
436 atomic_t detaching; 436 atomic_t detaching;
437 int flush_done;
437 438
438 uint64_t nr_stripes; 439 uint64_t nr_stripes;
439 unsigned stripe_size_bits; 440 unsigned stripe_size_bits;
@@ -663,13 +664,9 @@ struct gc_stat {
663 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; 664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
664 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. 665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
665 * flushing dirty data). 666 * flushing dirty data).
666 *
667 * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
668 * the allocation thread.
669 */ 667 */
670#define CACHE_SET_UNREGISTERING 0 668#define CACHE_SET_UNREGISTERING 0
671#define CACHE_SET_STOPPING 1 669#define CACHE_SET_STOPPING 1
672#define CACHE_SET_STOPPING_2 2
673 670
674struct cache_set { 671struct cache_set {
675 struct closure cl; 672 struct closure cl;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 15b58239c683..ee372884c405 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1410,8 +1410,10 @@ static void btree_gc_start(struct cache_set *c)
1410 for_each_cache(ca, c, i) 1410 for_each_cache(ca, c, i)
1411 for_each_bucket(b, ca) { 1411 for_each_bucket(b, ca) {
1412 b->gc_gen = b->gen; 1412 b->gc_gen = b->gen;
1413 if (!atomic_read(&b->pin)) 1413 if (!atomic_read(&b->pin)) {
1414 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 1414 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
1415 SET_GC_SECTORS_USED(b, 0);
1416 }
1415 } 1417 }
1416 1418
1417 mutex_unlock(&c->bucket_lock); 1419 mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index bd05a9a8c7cf..9aba2017f0d1 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
66 } else { 66 } else {
67 struct closure *parent = cl->parent; 67 struct closure *parent = cl->parent;
68 struct closure_waitlist *wait = closure_waitlist(cl); 68 struct closure_waitlist *wait = closure_waitlist(cl);
69 closure_fn *destructor = cl->fn;
69 70
70 closure_debug_destroy(cl); 71 closure_debug_destroy(cl);
71 72
73 smp_mb();
72 atomic_set(&cl->remaining, -1); 74 atomic_set(&cl->remaining, -1);
73 75
74 if (wait) 76 if (wait)
75 closure_wake_up(wait); 77 closure_wake_up(wait);
76 78
77 if (cl->fn) 79 if (destructor)
78 cl->fn(cl); 80 destructor(cl);
79 81
80 if (parent) 82 if (parent)
81 closure_put(parent); 83 closure_put(parent);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 4b250667bb7f..ba95ab84b2be 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -184,9 +184,14 @@ bsearch:
184 pr_debug("starting binary search, l %u r %u", l, r); 184 pr_debug("starting binary search, l %u r %u", l, r);
185 185
186 while (l + 1 < r) { 186 while (l + 1 < r) {
187 seq = list_entry(list->prev, struct journal_replay,
188 list)->j.seq;
189
187 m = (l + r) >> 1; 190 m = (l + r) >> 1;
191 read_bucket(m);
188 192
189 if (read_bucket(m)) 193 if (seq != list_entry(list->prev, struct journal_replay,
194 list)->j.seq)
190 l = m; 195 l = m;
191 else 196 else
192 r = m; 197 r = m;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index b6e74d3c8faf..786a1a4f74d8 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -488,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl)
488 bch_queue_gc(op->c); 488 bch_queue_gc(op->c);
489 } 489 }
490 490
491 /*
492 * Journal writes are marked REQ_FLUSH; if the original write was a
493 * flush, it'll wait on the journal write.
494 */
495 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
496
491 do { 497 do {
492 unsigned i; 498 unsigned i;
493 struct bkey *k; 499 struct bkey *k;
@@ -710,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
710 s->task = current; 716 s->task = current;
711 s->orig_bio = bio; 717 s->orig_bio = bio;
712 s->write = (bio->bi_rw & REQ_WRITE) != 0; 718 s->write = (bio->bi_rw & REQ_WRITE) != 0;
713 s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0; 719 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
714 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; 720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
715 s->recoverable = 1; 721 s->recoverable = 1;
716 s->start_time = jiffies; 722 s->start_time = jiffies;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index cff2d182dfb0..547c4c57b052 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
16#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/genhd.h> 18#include <linux/genhd.h>
19#include <linux/kthread.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/random.h> 21#include <linux/random.h>
21#include <linux/reboot.h> 22#include <linux/reboot.h>
@@ -706,7 +707,8 @@ static void bcache_device_detach(struct bcache_device *d)
706 atomic_set(&d->detaching, 0); 707 atomic_set(&d->detaching, 0);
707 } 708 }
708 709
709 bcache_device_unlink(d); 710 if (!d->flush_done)
711 bcache_device_unlink(d);
710 712
711 d->c->devices[d->id] = NULL; 713 d->c->devices[d->id] = NULL;
712 closure_put(&d->c->caching); 714 closure_put(&d->c->caching);
@@ -806,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
806 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 808 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
807 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 809 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
808 810
811 blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
812
809 return 0; 813 return 0;
810} 814}
811 815
@@ -1053,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl)
1053 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1057 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1054 struct bcache_device *d = &dc->disk; 1058 struct bcache_device *d = &dc->disk;
1055 1059
1060 mutex_lock(&bch_register_lock);
1061 d->flush_done = 1;
1062
1063 if (d->c)
1064 bcache_device_unlink(d);
1065
1066 mutex_unlock(&bch_register_lock);
1067
1056 bch_cache_accounting_destroy(&dc->accounting); 1068 bch_cache_accounting_destroy(&dc->accounting);
1057 kobject_del(&d->kobj); 1069 kobject_del(&d->kobj);
1058 1070
@@ -1318,11 +1330,9 @@ static void cache_set_free(struct closure *cl)
1318static void cache_set_flush(struct closure *cl) 1330static void cache_set_flush(struct closure *cl)
1319{ 1331{
1320 struct cache_set *c = container_of(cl, struct cache_set, caching); 1332 struct cache_set *c = container_of(cl, struct cache_set, caching);
1333 struct cache *ca;
1321 struct btree *b; 1334 struct btree *b;
1322 1335 unsigned i;
1323 /* Shut down allocator threads */
1324 set_bit(CACHE_SET_STOPPING_2, &c->flags);
1325 wake_up_allocators(c);
1326 1336
1327 bch_cache_accounting_destroy(&c->accounting); 1337 bch_cache_accounting_destroy(&c->accounting);
1328 1338
@@ -1337,24 +1347,32 @@ static void cache_set_flush(struct closure *cl)
1337 if (btree_node_dirty(b)) 1347 if (btree_node_dirty(b))
1338 bch_btree_node_write(b, NULL); 1348 bch_btree_node_write(b, NULL);
1339 1349
1350 for_each_cache(ca, c, i)
1351 if (ca->alloc_thread)
1352 kthread_stop(ca->alloc_thread);
1353
1340 closure_return(cl); 1354 closure_return(cl);
1341} 1355}
1342 1356
1343static void __cache_set_unregister(struct closure *cl) 1357static void __cache_set_unregister(struct closure *cl)
1344{ 1358{
1345 struct cache_set *c = container_of(cl, struct cache_set, caching); 1359 struct cache_set *c = container_of(cl, struct cache_set, caching);
1346 struct cached_dev *dc, *t; 1360 struct cached_dev *dc;
1347 size_t i; 1361 size_t i;
1348 1362
1349 mutex_lock(&bch_register_lock); 1363 mutex_lock(&bch_register_lock);
1350 1364
1351 if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
1352 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1353 bch_cached_dev_detach(dc);
1354
1355 for (i = 0; i < c->nr_uuids; i++) 1365 for (i = 0; i < c->nr_uuids; i++)
1356 if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) 1366 if (c->devices[i]) {
1357 bcache_device_stop(c->devices[i]); 1367 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1368 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1369 dc = container_of(c->devices[i],
1370 struct cached_dev, disk);
1371 bch_cached_dev_detach(dc);
1372 } else {
1373 bcache_device_stop(c->devices[i]);
1374 }
1375 }
1358 1376
1359 mutex_unlock(&bch_register_lock); 1377 mutex_unlock(&bch_register_lock);
1360 1378
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index dd3f00a42729..12a2c2846f99 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -232,6 +232,8 @@ STORE(__cached_dev)
232 bch_uuid_write(dc->disk.c); 232 bch_uuid_write(dc->disk.c);
233 } 233 }
234 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 234 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
235 if (!env)
236 return -ENOMEM;
235 add_uevent_var(env, "DRIVER=bcache"); 237 add_uevent_var(env, "DRIVER=bcache");
236 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), 238 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
237 add_uevent_var(env, "CACHED_LABEL=%s", buf); 239 add_uevent_var(env, "CACHED_LABEL=%s", buf);