diff options
-rw-r--r-- | drivers/md/bcache/bcache.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 39 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 3 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 2 |
5 files changed, 26 insertions, 26 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 6b420a55c745..c3ea03c9a1a8 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -425,7 +425,7 @@ struct cache { | |||
425 | * until a gc finishes - otherwise we could pointlessly burn a ton of | 425 | * until a gc finishes - otherwise we could pointlessly burn a ton of |
426 | * cpu | 426 | * cpu |
427 | */ | 427 | */ |
428 | unsigned invalidate_needs_gc:1; | 428 | unsigned invalidate_needs_gc; |
429 | 429 | ||
430 | bool discard; /* Get rid of? */ | 430 | bool discard; /* Get rid of? */ |
431 | 431 | ||
@@ -593,8 +593,8 @@ struct cache_set { | |||
593 | 593 | ||
594 | /* Counts how many sectors bio_insert has added to the cache */ | 594 | /* Counts how many sectors bio_insert has added to the cache */ |
595 | atomic_t sectors_to_gc; | 595 | atomic_t sectors_to_gc; |
596 | wait_queue_head_t gc_wait; | ||
596 | 597 | ||
597 | wait_queue_head_t moving_gc_wait; | ||
598 | struct keybuf moving_gc_keys; | 598 | struct keybuf moving_gc_keys; |
599 | /* Number of moving GC bios in flight */ | 599 | /* Number of moving GC bios in flight */ |
600 | struct semaphore moving_in_flight; | 600 | struct semaphore moving_in_flight; |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 81d3db40cd7b..2efdce07247c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c) | |||
1757 | bch_moving_gc(c); | 1757 | bch_moving_gc(c); |
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | static int bch_gc_thread(void *arg) | 1760 | static bool gc_should_run(struct cache_set *c) |
1761 | { | 1761 | { |
1762 | struct cache_set *c = arg; | ||
1763 | struct cache *ca; | 1762 | struct cache *ca; |
1764 | unsigned i; | 1763 | unsigned i; |
1765 | 1764 | ||
1766 | while (1) { | 1765 | for_each_cache(ca, c, i) |
1767 | again: | 1766 | if (ca->invalidate_needs_gc) |
1768 | bch_btree_gc(c); | 1767 | return true; |
1769 | 1768 | ||
1770 | set_current_state(TASK_INTERRUPTIBLE); | 1769 | if (atomic_read(&c->sectors_to_gc) < 0) |
1771 | if (kthread_should_stop()) | 1770 | return true; |
1772 | break; | ||
1773 | 1771 | ||
1774 | mutex_lock(&c->bucket_lock); | 1772 | return false; |
1773 | } | ||
1775 | 1774 | ||
1776 | for_each_cache(ca, c, i) | 1775 | static int bch_gc_thread(void *arg) |
1777 | if (ca->invalidate_needs_gc) { | 1776 | { |
1778 | mutex_unlock(&c->bucket_lock); | 1777 | struct cache_set *c = arg; |
1779 | set_current_state(TASK_RUNNING); | ||
1780 | goto again; | ||
1781 | } | ||
1782 | 1778 | ||
1783 | mutex_unlock(&c->bucket_lock); | 1779 | while (1) { |
1780 | wait_event_interruptible(c->gc_wait, | ||
1781 | kthread_should_stop() || gc_should_run(c)); | ||
1784 | 1782 | ||
1785 | schedule(); | 1783 | if (kthread_should_stop()) |
1784 | break; | ||
1785 | |||
1786 | set_gc_sectors(c); | ||
1787 | bch_btree_gc(c); | ||
1786 | } | 1788 | } |
1787 | 1789 | ||
1788 | return 0; | 1790 | return 0; |
@@ -1790,11 +1792,10 @@ again: | |||
1790 | 1792 | ||
1791 | int bch_gc_thread_start(struct cache_set *c) | 1793 | int bch_gc_thread_start(struct cache_set *c) |
1792 | { | 1794 | { |
1793 | c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); | 1795 | c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); |
1794 | if (IS_ERR(c->gc_thread)) | 1796 | if (IS_ERR(c->gc_thread)) |
1795 | return PTR_ERR(c->gc_thread); | 1797 | return PTR_ERR(c->gc_thread); |
1796 | 1798 | ||
1797 | set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); | ||
1798 | return 0; | 1799 | return 0; |
1799 | } | 1800 | } |
1800 | 1801 | ||
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 5c391fa01bed..9b80417cd547 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *); | |||
260 | 260 | ||
261 | static inline void wake_up_gc(struct cache_set *c) | 261 | static inline void wake_up_gc(struct cache_set *c) |
262 | { | 262 | { |
263 | if (c->gc_thread) | 263 | wake_up(&c->gc_wait); |
264 | wake_up_process(c->gc_thread); | ||
265 | } | 264 | } |
266 | 265 | ||
267 | #define MAP_DONE 0 | 266 | #define MAP_DONE 0 |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 40ffe5e424b3..a37c1776f2e3 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl) | |||
196 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 196 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
197 | struct bio *bio = op->bio, *n; | 197 | struct bio *bio = op->bio, *n; |
198 | 198 | ||
199 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { | 199 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) |
200 | set_gc_sectors(op->c); | ||
201 | wake_up_gc(op->c); | 200 | wake_up_gc(op->c); |
202 | } | ||
203 | 201 | ||
204 | if (op->bypass) | 202 | if (op->bypass) |
205 | return bch_data_invalidate(cl); | 203 | return bch_data_invalidate(cl); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 849ad441cd76..66669c8f4161 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -1491,6 +1491,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |||
1491 | mutex_init(&c->bucket_lock); | 1491 | mutex_init(&c->bucket_lock); |
1492 | init_waitqueue_head(&c->btree_cache_wait); | 1492 | init_waitqueue_head(&c->btree_cache_wait); |
1493 | init_waitqueue_head(&c->bucket_wait); | 1493 | init_waitqueue_head(&c->bucket_wait); |
1494 | init_waitqueue_head(&c->gc_wait); | ||
1494 | sema_init(&c->uuid_write_mutex, 1); | 1495 | sema_init(&c->uuid_write_mutex, 1); |
1495 | 1496 | ||
1496 | spin_lock_init(&c->btree_gc_time.lock); | 1497 | spin_lock_init(&c->btree_gc_time.lock); |
@@ -1550,6 +1551,7 @@ static void run_cache_set(struct cache_set *c) | |||
1550 | 1551 | ||
1551 | for_each_cache(ca, c, i) | 1552 | for_each_cache(ca, c, i) |
1552 | c->nbuckets += ca->sb.nbuckets; | 1553 | c->nbuckets += ca->sb.nbuckets; |
1554 | set_gc_sectors(c); | ||
1553 | 1555 | ||
1554 | if (CACHE_SYNC(&c->sb)) { | 1556 | if (CACHE_SYNC(&c->sb)) { |
1555 | LIST_HEAD(journal); | 1557 | LIST_HEAD(journal); |