diff options
author | Kent Overstreet <koverstreet@google.com> | 2013-04-24 22:01:12 -0400 |
---|---|---|
committer | Kent Overstreet <koverstreet@google.com> | 2013-06-26 20:09:13 -0400 |
commit | 119ba0f82839cd80eaef3e6991988f1403965d5b (patch) | |
tree | 35422c7c210537530083632ecb2d1119298d307a /drivers/md | |
parent | a9dd53adbb84c12f769a862ba2c80404873c2c99 (diff) |
bcache: Convert allocator thread to kthread
Using a workqueue when we just want a single thread is a bit silly.
Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/alloc.c | 34 | ||||
-rw-r--r-- | drivers/md/bcache/bcache.h | 17 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 19 |
4 files changed, 43 insertions, 33 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 048f2947e08b..38428f46ea74 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include "bcache.h" | 63 | #include "bcache.h" |
64 | #include "btree.h" | 64 | #include "btree.h" |
65 | 65 | ||
66 | #include <linux/kthread.h> | ||
66 | #include <linux/random.h> | 67 | #include <linux/random.h> |
67 | 68 | ||
68 | #define MAX_IN_FLIGHT_DISCARDS 8U | 69 | #define MAX_IN_FLIGHT_DISCARDS 8U |
@@ -151,7 +152,7 @@ static void discard_finish(struct work_struct *w) | |||
151 | mutex_unlock(&ca->set->bucket_lock); | 152 | mutex_unlock(&ca->set->bucket_lock); |
152 | 153 | ||
153 | closure_wake_up(&ca->set->bucket_wait); | 154 | closure_wake_up(&ca->set->bucket_wait); |
154 | wake_up(&ca->set->alloc_wait); | 155 | wake_up_process(ca->alloc_thread); |
155 | 156 | ||
156 | closure_put(&ca->set->cl); | 157 | closure_put(&ca->set->cl); |
157 | } | 158 | } |
@@ -358,30 +359,26 @@ static void invalidate_buckets(struct cache *ca) | |||
358 | 359 | ||
359 | #define allocator_wait(ca, cond) \ | 360 | #define allocator_wait(ca, cond) \ |
360 | do { \ | 361 | do { \ |
361 | DEFINE_WAIT(__wait); \ | ||
362 | \ | ||
363 | while (1) { \ | 362 | while (1) { \ |
364 | prepare_to_wait(&ca->set->alloc_wait, \ | 363 | set_current_state(TASK_INTERRUPTIBLE); \ |
365 | &__wait, TASK_INTERRUPTIBLE); \ | ||
366 | if (cond) \ | 364 | if (cond) \ |
367 | break; \ | 365 | break; \ |
368 | \ | 366 | \ |
369 | mutex_unlock(&(ca)->set->bucket_lock); \ | 367 | mutex_unlock(&(ca)->set->bucket_lock); \ |
370 | if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ | 368 | if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ |
371 | finish_wait(&ca->set->alloc_wait, &__wait); \ | 369 | closure_put(&ca->set->cl); \ |
372 | closure_return(cl); \ | 370 | return 0; \ |
373 | } \ | 371 | } \ |
374 | \ | 372 | \ |
375 | schedule(); \ | 373 | schedule(); \ |
376 | mutex_lock(&(ca)->set->bucket_lock); \ | 374 | mutex_lock(&(ca)->set->bucket_lock); \ |
377 | } \ | 375 | } \ |
378 | \ | 376 | __set_current_state(TASK_RUNNING); \ |
379 | finish_wait(&ca->set->alloc_wait, &__wait); \ | ||
380 | } while (0) | 377 | } while (0) |
381 | 378 | ||
382 | void bch_allocator_thread(struct closure *cl) | 379 | static int bch_allocator_thread(void *arg) |
383 | { | 380 | { |
384 | struct cache *ca = container_of(cl, struct cache, alloc); | 381 | struct cache *ca = arg; |
385 | 382 | ||
386 | mutex_lock(&ca->set->bucket_lock); | 383 | mutex_lock(&ca->set->bucket_lock); |
387 | 384 | ||
@@ -442,7 +439,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) | |||
442 | { | 439 | { |
443 | long r = -1; | 440 | long r = -1; |
444 | again: | 441 | again: |
445 | wake_up(&ca->set->alloc_wait); | 442 | wake_up_process(ca->alloc_thread); |
446 | 443 | ||
447 | if (fifo_used(&ca->free) > ca->watermark[watermark] && | 444 | if (fifo_used(&ca->free) > ca->watermark[watermark] && |
448 | fifo_pop(&ca->free, r)) { | 445 | fifo_pop(&ca->free, r)) { |
@@ -552,6 +549,19 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, | |||
552 | 549 | ||
553 | /* Init */ | 550 | /* Init */ |
554 | 551 | ||
552 | int bch_cache_allocator_start(struct cache *ca) | ||
553 | { | ||
554 | ca->alloc_thread = kthread_create(bch_allocator_thread, | ||
555 | ca, "bcache_allocator"); | ||
556 | if (IS_ERR(ca->alloc_thread)) | ||
557 | return PTR_ERR(ca->alloc_thread); | ||
558 | |||
559 | closure_get(&ca->set->cl); | ||
560 | wake_up_process(ca->alloc_thread); | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
555 | void bch_cache_allocator_exit(struct cache *ca) | 565 | void bch_cache_allocator_exit(struct cache *ca) |
556 | { | 566 | { |
557 | struct discard *d; | 567 | struct discard *d; |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d3e15b42a4ab..166c8ddc0be4 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -565,8 +565,7 @@ struct cache { | |||
565 | 565 | ||
566 | unsigned watermark[WATERMARK_MAX]; | 566 | unsigned watermark[WATERMARK_MAX]; |
567 | 567 | ||
568 | struct closure alloc; | 568 | struct task_struct *alloc_thread; |
569 | struct workqueue_struct *alloc_workqueue; | ||
570 | 569 | ||
571 | struct closure prio; | 570 | struct closure prio; |
572 | struct prio_set *disk_buckets; | 571 | struct prio_set *disk_buckets; |
@@ -703,9 +702,6 @@ struct cache_set { | |||
703 | /* For the btree cache */ | 702 | /* For the btree cache */ |
704 | struct shrinker shrink; | 703 | struct shrinker shrink; |
705 | 704 | ||
706 | /* For the allocator itself */ | ||
707 | wait_queue_head_t alloc_wait; | ||
708 | |||
709 | /* For the btree cache and anything allocation related */ | 705 | /* For the btree cache and anything allocation related */ |
710 | struct mutex bucket_lock; | 706 | struct mutex bucket_lock; |
711 | 707 | ||
@@ -1173,6 +1169,15 @@ static inline uint8_t bucket_disk_gen(struct bucket *b) | |||
1173 | static struct kobj_attribute ksysfs_##n = \ | 1169 | static struct kobj_attribute ksysfs_##n = \ |
1174 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) | 1170 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) |
1175 | 1171 | ||
1172 | static inline void wake_up_allocators(struct cache_set *c) | ||
1173 | { | ||
1174 | struct cache *ca; | ||
1175 | unsigned i; | ||
1176 | |||
1177 | for_each_cache(ca, c, i) | ||
1178 | wake_up_process(ca->alloc_thread); | ||
1179 | } | ||
1180 | |||
1176 | /* Forward declarations */ | 1181 | /* Forward declarations */ |
1177 | 1182 | ||
1178 | void bch_writeback_queue(struct cached_dev *); | 1183 | void bch_writeback_queue(struct cached_dev *); |
@@ -1193,7 +1198,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); | |||
1193 | uint8_t bch_inc_gen(struct cache *, struct bucket *); | 1198 | uint8_t bch_inc_gen(struct cache *, struct bucket *); |
1194 | void bch_rescale_priorities(struct cache_set *, int); | 1199 | void bch_rescale_priorities(struct cache_set *, int); |
1195 | bool bch_bucket_add_unused(struct cache *, struct bucket *); | 1200 | bool bch_bucket_add_unused(struct cache *, struct bucket *); |
1196 | void bch_allocator_thread(struct closure *); | ||
1197 | 1201 | ||
1198 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); | 1202 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); |
1199 | void bch_bucket_free(struct cache_set *, struct bkey *); | 1203 | void bch_bucket_free(struct cache_set *, struct bkey *); |
@@ -1244,6 +1248,7 @@ int bch_btree_cache_alloc(struct cache_set *); | |||
1244 | void bch_cached_dev_writeback_init(struct cached_dev *); | 1248 | void bch_cached_dev_writeback_init(struct cached_dev *); |
1245 | void bch_moving_init_cache_set(struct cache_set *); | 1249 | void bch_moving_init_cache_set(struct cache_set *); |
1246 | 1250 | ||
1251 | int bch_cache_allocator_start(struct cache *ca); | ||
1247 | void bch_cache_allocator_exit(struct cache *ca); | 1252 | void bch_cache_allocator_exit(struct cache *ca); |
1248 | int bch_cache_allocator_init(struct cache *ca); | 1253 | int bch_cache_allocator_init(struct cache *ca); |
1249 | 1254 | ||
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 7a5658f04e62..45b88fbffbe0 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -273,7 +273,7 @@ static void btree_complete_write(struct btree *b, struct btree_write *w) | |||
273 | { | 273 | { |
274 | if (w->prio_blocked && | 274 | if (w->prio_blocked && |
275 | !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) | 275 | !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) |
276 | wake_up(&b->c->alloc_wait); | 276 | wake_up_allocators(b->c); |
277 | 277 | ||
278 | if (w->journal) { | 278 | if (w->journal) { |
279 | atomic_dec_bug(w->journal); | 279 | atomic_dec_bug(w->journal); |
@@ -984,7 +984,7 @@ static void btree_node_free(struct btree *b, struct btree_op *op) | |||
984 | 984 | ||
985 | if (b->prio_blocked && | 985 | if (b->prio_blocked && |
986 | !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked)) | 986 | !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked)) |
987 | wake_up(&b->c->alloc_wait); | 987 | wake_up_allocators(b->c); |
988 | 988 | ||
989 | b->prio_blocked = 0; | 989 | b->prio_blocked = 0; |
990 | 990 | ||
@@ -1547,7 +1547,7 @@ static void bch_btree_gc(struct closure *cl) | |||
1547 | blktrace_msg_all(c, "Finished gc"); | 1547 | blktrace_msg_all(c, "Finished gc"); |
1548 | 1548 | ||
1549 | trace_bcache_gc_end(c->sb.set_uuid); | 1549 | trace_bcache_gc_end(c->sb.set_uuid); |
1550 | wake_up(&c->alloc_wait); | 1550 | wake_up_allocators(c); |
1551 | 1551 | ||
1552 | continue_at(cl, bch_moving_gc, bch_gc_wq); | 1552 | continue_at(cl, bch_moving_gc, bch_gc_wq); |
1553 | } | 1553 | } |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3de5626919ef..aaeda235fc75 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -1282,7 +1282,7 @@ static void cache_set_flush(struct closure *cl) | |||
1282 | 1282 | ||
1283 | /* Shut down allocator threads */ | 1283 | /* Shut down allocator threads */ |
1284 | set_bit(CACHE_SET_STOPPING_2, &c->flags); | 1284 | set_bit(CACHE_SET_STOPPING_2, &c->flags); |
1285 | wake_up(&c->alloc_wait); | 1285 | wake_up_allocators(c); |
1286 | 1286 | ||
1287 | bch_cache_accounting_destroy(&c->accounting); | 1287 | bch_cache_accounting_destroy(&c->accounting); |
1288 | 1288 | ||
@@ -1373,7 +1373,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |||
1373 | c->btree_pages = max_t(int, c->btree_pages / 4, | 1373 | c->btree_pages = max_t(int, c->btree_pages / 4, |
1374 | BTREE_MAX_PAGES); | 1374 | BTREE_MAX_PAGES); |
1375 | 1375 | ||
1376 | init_waitqueue_head(&c->alloc_wait); | ||
1377 | mutex_init(&c->bucket_lock); | 1376 | mutex_init(&c->bucket_lock); |
1378 | mutex_init(&c->fill_lock); | 1377 | mutex_init(&c->fill_lock); |
1379 | mutex_init(&c->sort_lock); | 1378 | mutex_init(&c->sort_lock); |
@@ -1496,9 +1495,10 @@ static void run_cache_set(struct cache_set *c) | |||
1496 | */ | 1495 | */ |
1497 | bch_journal_next(&c->journal); | 1496 | bch_journal_next(&c->journal); |
1498 | 1497 | ||
1498 | err = "error starting allocator thread"; | ||
1499 | for_each_cache(ca, c, i) | 1499 | for_each_cache(ca, c, i) |
1500 | closure_call(&ca->alloc, bch_allocator_thread, | 1500 | if (bch_cache_allocator_start(ca)) |
1501 | system_wq, &c->cl); | 1501 | goto err; |
1502 | 1502 | ||
1503 | /* | 1503 | /* |
1504 | * First place it's safe to allocate: btree_check() and | 1504 | * First place it's safe to allocate: btree_check() and |
@@ -1531,17 +1531,16 @@ static void run_cache_set(struct cache_set *c) | |||
1531 | 1531 | ||
1532 | bch_btree_gc_finish(c); | 1532 | bch_btree_gc_finish(c); |
1533 | 1533 | ||
1534 | err = "error starting allocator thread"; | ||
1534 | for_each_cache(ca, c, i) | 1535 | for_each_cache(ca, c, i) |
1535 | closure_call(&ca->alloc, bch_allocator_thread, | 1536 | if (bch_cache_allocator_start(ca)) |
1536 | ca->alloc_workqueue, &c->cl); | 1537 | goto err; |
1537 | 1538 | ||
1538 | mutex_lock(&c->bucket_lock); | 1539 | mutex_lock(&c->bucket_lock); |
1539 | for_each_cache(ca, c, i) | 1540 | for_each_cache(ca, c, i) |
1540 | bch_prio_write(ca); | 1541 | bch_prio_write(ca); |
1541 | mutex_unlock(&c->bucket_lock); | 1542 | mutex_unlock(&c->bucket_lock); |
1542 | 1543 | ||
1543 | wake_up(&c->alloc_wait); | ||
1544 | |||
1545 | err = "cannot allocate new UUID bucket"; | 1544 | err = "cannot allocate new UUID bucket"; |
1546 | if (__uuid_write(c)) | 1545 | if (__uuid_write(c)) |
1547 | goto err_unlock_gc; | 1546 | goto err_unlock_gc; |
@@ -1673,9 +1672,6 @@ void bch_cache_release(struct kobject *kobj) | |||
1673 | 1672 | ||
1674 | bio_split_pool_free(&ca->bio_split_hook); | 1673 | bio_split_pool_free(&ca->bio_split_hook); |
1675 | 1674 | ||
1676 | if (ca->alloc_workqueue) | ||
1677 | destroy_workqueue(ca->alloc_workqueue); | ||
1678 | |||
1679 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); | 1675 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); |
1680 | kfree(ca->prio_buckets); | 1676 | kfree(ca->prio_buckets); |
1681 | vfree(ca->buckets); | 1677 | vfree(ca->buckets); |
@@ -1723,7 +1719,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) | |||
1723 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * | 1719 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * |
1724 | 2, GFP_KERNEL)) || | 1720 | 2, GFP_KERNEL)) || |
1725 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || | 1721 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || |
1726 | !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || | ||
1727 | bio_split_pool_init(&ca->bio_split_hook)) | 1722 | bio_split_pool_init(&ca->bio_split_hook)) |
1728 | return -ENOMEM; | 1723 | return -ENOMEM; |
1729 | 1724 | ||