diff options
Diffstat (limited to 'drivers/md/bcache/alloc.c')
-rw-r--r-- | drivers/md/bcache/alloc.c | 46 |
1 files changed, 25 insertions, 21 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 048f2947e08b..e45f5575fd4d 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
@@ -63,7 +63,10 @@ | |||
63 | #include "bcache.h" | 63 | #include "bcache.h" |
64 | #include "btree.h" | 64 | #include "btree.h" |
65 | 65 | ||
66 | #include <linux/freezer.h> | ||
67 | #include <linux/kthread.h> | ||
66 | #include <linux/random.h> | 68 | #include <linux/random.h> |
69 | #include <trace/events/bcache.h> | ||
67 | 70 | ||
68 | #define MAX_IN_FLIGHT_DISCARDS 8U | 71 | #define MAX_IN_FLIGHT_DISCARDS 8U |
69 | 72 | ||
@@ -151,7 +154,7 @@ static void discard_finish(struct work_struct *w) | |||
151 | mutex_unlock(&ca->set->bucket_lock); | 154 | mutex_unlock(&ca->set->bucket_lock); |
152 | 155 | ||
153 | closure_wake_up(&ca->set->bucket_wait); | 156 | closure_wake_up(&ca->set->bucket_wait); |
154 | wake_up(&ca->set->alloc_wait); | 157 | wake_up_process(ca->alloc_thread); |
155 | 158 | ||
156 | closure_put(&ca->set->cl); | 159 | closure_put(&ca->set->cl); |
157 | } | 160 | } |
@@ -350,38 +353,30 @@ static void invalidate_buckets(struct cache *ca) | |||
350 | break; | 353 | break; |
351 | } | 354 | } |
352 | 355 | ||
353 | pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu", | 356 | trace_bcache_alloc_invalidate(ca); |
354 | fifo_used(&ca->free), ca->free.size, | ||
355 | fifo_used(&ca->free_inc), ca->free_inc.size, | ||
356 | fifo_used(&ca->unused), ca->unused.size); | ||
357 | } | 357 | } |
358 | 358 | ||
359 | #define allocator_wait(ca, cond) \ | 359 | #define allocator_wait(ca, cond) \ |
360 | do { \ | 360 | do { \ |
361 | DEFINE_WAIT(__wait); \ | ||
362 | \ | ||
363 | while (1) { \ | 361 | while (1) { \ |
364 | prepare_to_wait(&ca->set->alloc_wait, \ | 362 | set_current_state(TASK_INTERRUPTIBLE); \ |
365 | &__wait, TASK_INTERRUPTIBLE); \ | ||
366 | if (cond) \ | 363 | if (cond) \ |
367 | break; \ | 364 | break; \ |
368 | \ | 365 | \ |
369 | mutex_unlock(&(ca)->set->bucket_lock); \ | 366 | mutex_unlock(&(ca)->set->bucket_lock); \ |
370 | if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ | 367 | if (kthread_should_stop()) \ |
371 | finish_wait(&ca->set->alloc_wait, &__wait); \ | 368 | return 0; \ |
372 | closure_return(cl); \ | ||
373 | } \ | ||
374 | \ | 369 | \ |
370 | try_to_freeze(); \ | ||
375 | schedule(); \ | 371 | schedule(); \ |
376 | mutex_lock(&(ca)->set->bucket_lock); \ | 372 | mutex_lock(&(ca)->set->bucket_lock); \ |
377 | } \ | 373 | } \ |
378 | \ | 374 | __set_current_state(TASK_RUNNING); \ |
379 | finish_wait(&ca->set->alloc_wait, &__wait); \ | ||
380 | } while (0) | 375 | } while (0) |
381 | 376 | ||
382 | void bch_allocator_thread(struct closure *cl) | 377 | static int bch_allocator_thread(void *arg) |
383 | { | 378 | { |
384 | struct cache *ca = container_of(cl, struct cache, alloc); | 379 | struct cache *ca = arg; |
385 | 380 | ||
386 | mutex_lock(&ca->set->bucket_lock); | 381 | mutex_lock(&ca->set->bucket_lock); |
387 | 382 | ||
@@ -442,7 +437,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) | |||
442 | { | 437 | { |
443 | long r = -1; | 438 | long r = -1; |
444 | again: | 439 | again: |
445 | wake_up(&ca->set->alloc_wait); | 440 | wake_up_process(ca->alloc_thread); |
446 | 441 | ||
447 | if (fifo_used(&ca->free) > ca->watermark[watermark] && | 442 | if (fifo_used(&ca->free) > ca->watermark[watermark] && |
448 | fifo_pop(&ca->free, r)) { | 443 | fifo_pop(&ca->free, r)) { |
@@ -476,9 +471,7 @@ again: | |||
476 | return r; | 471 | return r; |
477 | } | 472 | } |
478 | 473 | ||
479 | pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu", | 474 | trace_bcache_alloc_fail(ca); |
480 | atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free), | ||
481 | fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | ||
482 | 475 | ||
483 | if (cl) { | 476 | if (cl) { |
484 | closure_wait(&ca->set->bucket_wait, cl); | 477 | closure_wait(&ca->set->bucket_wait, cl); |
@@ -552,6 +545,17 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, | |||
552 | 545 | ||
553 | /* Init */ | 546 | /* Init */ |
554 | 547 | ||
548 | int bch_cache_allocator_start(struct cache *ca) | ||
549 | { | ||
550 | struct task_struct *k = kthread_run(bch_allocator_thread, | ||
551 | ca, "bcache_allocator"); | ||
552 | if (IS_ERR(k)) | ||
553 | return PTR_ERR(k); | ||
554 | |||
555 | ca->alloc_thread = k; | ||
556 | return 0; | ||
557 | } | ||
558 | |||
555 | void bch_cache_allocator_exit(struct cache *ca) | 559 | void bch_cache_allocator_exit(struct cache *ca) |
556 | { | 560 | { |
557 | struct discard *d; | 561 | struct discard *d; |