aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-10 21:31:58 -0400
committerKent Overstreet <kmo@daterainc.com>2013-07-12 03:22:49 -0400
commit79826c35eb99cd3c0873b8396f45fa26c87fb0b0 (patch)
tree0812a5cfdb0b15321af8c3f57eb4d8790e928d9e /drivers/md/bcache
parent29ebf465b9050f241c4433a796a32e6c896a9dcd (diff)
bcache: Allocation kthread fixes
The alloc kthread should've been using try_to_freeze() - and also there was the potential for the alloc kthread to get woken up after it had shut down, which would have been bad. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/alloc.c18
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/super.c11
3 files changed, 15 insertions, 18 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index b54b73b9b2b7..e45f5575fd4d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,6 +63,7 @@
63#include "bcache.h" 63#include "bcache.h"
64#include "btree.h" 64#include "btree.h"
65 65
66#include <linux/freezer.h>
66#include <linux/kthread.h> 67#include <linux/kthread.h>
67#include <linux/random.h> 68#include <linux/random.h>
68#include <trace/events/bcache.h> 69#include <trace/events/bcache.h>
@@ -363,11 +364,10 @@ do { \
363 break; \ 364 break; \
364 \ 365 \
365 mutex_unlock(&(ca)->set->bucket_lock); \ 366 mutex_unlock(&(ca)->set->bucket_lock); \
366 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ 367 if (kthread_should_stop()) \
367 closure_put(&ca->set->cl); \
368 return 0; \ 368 return 0; \
369 } \
370 \ 369 \
370 try_to_freeze(); \
371 schedule(); \ 371 schedule(); \
372 mutex_lock(&(ca)->set->bucket_lock); \ 372 mutex_lock(&(ca)->set->bucket_lock); \
373 } \ 373 } \
@@ -547,14 +547,12 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
547 547
548int bch_cache_allocator_start(struct cache *ca) 548int bch_cache_allocator_start(struct cache *ca)
549{ 549{
550 ca->alloc_thread = kthread_create(bch_allocator_thread, 550 struct task_struct *k = kthread_run(bch_allocator_thread,
551 ca, "bcache_allocator"); 551 ca, "bcache_allocator");
552 if (IS_ERR(ca->alloc_thread)) 552 if (IS_ERR(k))
553 return PTR_ERR(ca->alloc_thread); 553 return PTR_ERR(k);
554
555 closure_get(&ca->set->cl);
556 wake_up_process(ca->alloc_thread);
557 554
555 ca->alloc_thread = k;
558 return 0; 556 return 0;
559} 557}
560 558
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 68f1ded81ae0..b39f6f0b45f2 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -664,13 +664,9 @@ struct gc_stat {
664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; 664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. 665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
666 * flushing dirty data). 666 * flushing dirty data).
667 *
668 * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
669 * the allocation thread.
670 */ 667 */
671#define CACHE_SET_UNREGISTERING 0 668#define CACHE_SET_UNREGISTERING 0
672#define CACHE_SET_STOPPING 1 669#define CACHE_SET_STOPPING 1
673#define CACHE_SET_STOPPING_2 2
674 670
675struct cache_set { 671struct cache_set {
676 struct closure cl; 672 struct closure cl;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f6a62174e8f6..547c4c57b052 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
16#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/genhd.h> 18#include <linux/genhd.h>
19#include <linux/kthread.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/random.h> 21#include <linux/random.h>
21#include <linux/reboot.h> 22#include <linux/reboot.h>
@@ -1329,11 +1330,9 @@ static void cache_set_free(struct closure *cl)
1329static void cache_set_flush(struct closure *cl) 1330static void cache_set_flush(struct closure *cl)
1330{ 1331{
1331 struct cache_set *c = container_of(cl, struct cache_set, caching); 1332 struct cache_set *c = container_of(cl, struct cache_set, caching);
1333 struct cache *ca;
1332 struct btree *b; 1334 struct btree *b;
1333 1335 unsigned i;
1334 /* Shut down allocator threads */
1335 set_bit(CACHE_SET_STOPPING_2, &c->flags);
1336 wake_up_allocators(c);
1337 1336
1338 bch_cache_accounting_destroy(&c->accounting); 1337 bch_cache_accounting_destroy(&c->accounting);
1339 1338
@@ -1348,6 +1347,10 @@ static void cache_set_flush(struct closure *cl)
1348 if (btree_node_dirty(b)) 1347 if (btree_node_dirty(b))
1349 bch_btree_node_write(b, NULL); 1348 bch_btree_node_write(b, NULL);
1350 1349
1350 for_each_cache(ca, c, i)
1351 if (ca->alloc_thread)
1352 kthread_stop(ca->alloc_thread);
1353
1351 closure_return(cl); 1354 closure_return(cl);
1352} 1355}
1353 1356