aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/alloc.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-04-24 22:01:12 -0400
committerKent Overstreet <koverstreet@google.com>2013-06-26 20:09:13 -0400
commit119ba0f82839cd80eaef3e6991988f1403965d5b (patch)
tree35422c7c210537530083632ecb2d1119298d307a /drivers/md/bcache/alloc.c
parenta9dd53adbb84c12f769a862ba2c80404873c2c99 (diff)
bcache: Convert allocator thread to kthread
Using a workqueue when we just want a single thread is a bit silly. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/alloc.c')
-rw-r--r--drivers/md/bcache/alloc.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 048f2947e08b..38428f46ea74 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,6 +63,7 @@
63#include "bcache.h" 63#include "bcache.h"
64#include "btree.h" 64#include "btree.h"
65 65
66#include <linux/kthread.h>
66#include <linux/random.h> 67#include <linux/random.h>
67 68
68#define MAX_IN_FLIGHT_DISCARDS 8U 69#define MAX_IN_FLIGHT_DISCARDS 8U
@@ -151,7 +152,7 @@ static void discard_finish(struct work_struct *w)
151 mutex_unlock(&ca->set->bucket_lock); 152 mutex_unlock(&ca->set->bucket_lock);
152 153
153 closure_wake_up(&ca->set->bucket_wait); 154 closure_wake_up(&ca->set->bucket_wait);
154 wake_up(&ca->set->alloc_wait); 155 wake_up_process(ca->alloc_thread);
155 156
156 closure_put(&ca->set->cl); 157 closure_put(&ca->set->cl);
157} 158}
@@ -358,30 +359,26 @@ static void invalidate_buckets(struct cache *ca)
358 359
359#define allocator_wait(ca, cond) \ 360#define allocator_wait(ca, cond) \
360do { \ 361do { \
361 DEFINE_WAIT(__wait); \
362 \
363 while (1) { \ 362 while (1) { \
364 prepare_to_wait(&ca->set->alloc_wait, \ 363 set_current_state(TASK_INTERRUPTIBLE); \
365 &__wait, TASK_INTERRUPTIBLE); \
366 if (cond) \ 364 if (cond) \
367 break; \ 365 break; \
368 \ 366 \
369 mutex_unlock(&(ca)->set->bucket_lock); \ 367 mutex_unlock(&(ca)->set->bucket_lock); \
370 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ 368 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \
371 finish_wait(&ca->set->alloc_wait, &__wait); \ 369 closure_put(&ca->set->cl); \
372 closure_return(cl); \ 370 return 0; \
373 } \ 371 } \
374 \ 372 \
375 schedule(); \ 373 schedule(); \
376 mutex_lock(&(ca)->set->bucket_lock); \ 374 mutex_lock(&(ca)->set->bucket_lock); \
377 } \ 375 } \
378 \ 376 __set_current_state(TASK_RUNNING); \
379 finish_wait(&ca->set->alloc_wait, &__wait); \
380} while (0) 377} while (0)
381 378
382void bch_allocator_thread(struct closure *cl) 379static int bch_allocator_thread(void *arg)
383{ 380{
384 struct cache *ca = container_of(cl, struct cache, alloc); 381 struct cache *ca = arg;
385 382
386 mutex_lock(&ca->set->bucket_lock); 383 mutex_lock(&ca->set->bucket_lock);
387 384
@@ -442,7 +439,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
442{ 439{
443 long r = -1; 440 long r = -1;
444again: 441again:
445 wake_up(&ca->set->alloc_wait); 442 wake_up_process(ca->alloc_thread);
446 443
447 if (fifo_used(&ca->free) > ca->watermark[watermark] && 444 if (fifo_used(&ca->free) > ca->watermark[watermark] &&
448 fifo_pop(&ca->free, r)) { 445 fifo_pop(&ca->free, r)) {
@@ -552,6 +549,19 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
552 549
553/* Init */ 550/* Init */
554 551
552int bch_cache_allocator_start(struct cache *ca)
553{
554 ca->alloc_thread = kthread_create(bch_allocator_thread,
555 ca, "bcache_allocator");
556 if (IS_ERR(ca->alloc_thread))
557 return PTR_ERR(ca->alloc_thread);
558
559 closure_get(&ca->set->cl);
560 wake_up_process(ca->alloc_thread);
561
562 return 0;
563}
564
555void bch_cache_allocator_exit(struct cache *ca) 565void bch_cache_allocator_exit(struct cache *ca)
556{ 566{
557 struct discard *d; 567 struct discard *d;