aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTang Junhui <tang.junhui@zte.com.cn>2018-07-26 00:17:34 -0400
committerJens Axboe <axboe@kernel.dk>2018-07-27 11:15:46 -0400
commit5c25c4fc74af40657606dd01df27cc5eb9efb26c (patch)
treebb8382641c55cf0104c0bb547ac864069ff80482
parent99a27d59bd7b2ce1a82a4e826e8e7881f4d4954d (diff)
bcache: finish incremental GC
In GC thread, we record the latest GC key in gc_done, which is expected to be used for incremental GC, but in currently code, we didn't realize it. When GC runs, front side IO would be blocked until the GC over, it would be a long time if there is a lot of btree nodes. This patch realizes incremental GC, the main ideal is that, when there are front side I/Os, after GC some nodes (100), we stop GC, release locker of the btree node, and go to process the front side I/Os for some times (100 ms), then go back to GC again. By this patch, when we doing GC, I/Os are not blocked all the time, and there is no obvious I/Os zero jump problem any more. Patch v2: Rename some variables and macros name as Coly suggested. Signed-off-by: Tang Junhui <tang.junhui@zte.com.cn> Signed-off-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/md/bcache/bcache.h5
-rw-r--r--drivers/md/bcache/btree.c14
-rw-r--r--drivers/md/bcache/request.c3
3 files changed, 21 insertions, 1 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 3226d38bf859..872ef4d67711 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -474,6 +474,7 @@ struct cache {
474 474
475struct gc_stat { 475struct gc_stat {
476 size_t nodes; 476 size_t nodes;
477 size_t nodes_pre;
477 size_t key_bytes; 478 size_t key_bytes;
478 479
479 size_t nkeys; 480 size_t nkeys;
@@ -604,6 +605,10 @@ struct cache_set {
604 */ 605 */
605 atomic_t rescale; 606 atomic_t rescale;
606 /* 607 /*
608 * used for GC, identify if any front side I/Os is inflight
609 */
610 atomic_t search_inflight;
611 /*
607 * When we invalidate buckets, we use both the priority and the amount 612 * When we invalidate buckets, we use both the priority and the amount
608 * of good data to determine which buckets to reuse first - to weight 613 * of good data to determine which buckets to reuse first - to weight
609 * those together consistently we keep track of the smallest nonzero 614 * those together consistently we keep track of the smallest nonzero
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 547c9eedc2f4..b4407ba12667 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -90,6 +90,8 @@
90 90
91#define MAX_NEED_GC 64 91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72 92#define MAX_SAVE_PRIO 72
93#define MIN_GC_NODES 100
94#define GC_SLEEP_MS 100
93 95
94#define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 96#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
95 97
@@ -1585,6 +1587,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1585 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1587 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1586 r->b = NULL; 1588 r->b = NULL;
1587 1589
1590 if (atomic_read(&b->c->search_inflight) &&
1591 gc->nodes >= gc->nodes_pre + MIN_GC_NODES) {
1592 gc->nodes_pre = gc->nodes;
1593 ret = -EAGAIN;
1594 break;
1595 }
1596
1588 if (need_resched()) { 1597 if (need_resched()) {
1589 ret = -EAGAIN; 1598 ret = -EAGAIN;
1590 break; 1599 break;
@@ -1753,7 +1762,10 @@ static void bch_btree_gc(struct cache_set *c)
1753 closure_sync(&writes); 1762 closure_sync(&writes);
1754 cond_resched(); 1763 cond_resched();
1755 1764
1756 if (ret && ret != -EAGAIN) 1765 if (ret == -EAGAIN)
1766 schedule_timeout_interruptible(msecs_to_jiffies
1767 (GC_SLEEP_MS));
1768 else if (ret)
1757 pr_warn("gc failed!"); 1769 pr_warn("gc failed!");
1758 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1770 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1759 1771
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 97707b0c54ce..43af905920f5 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -701,6 +701,8 @@ static void search_free(struct closure *cl)
701{ 701{
702 struct search *s = container_of(cl, struct search, cl); 702 struct search *s = container_of(cl, struct search, cl);
703 703
704 atomic_dec(&s->d->c->search_inflight);
705
704 if (s->iop.bio) 706 if (s->iop.bio)
705 bio_put(s->iop.bio); 707 bio_put(s->iop.bio);
706 708
@@ -718,6 +720,7 @@ static inline struct search *search_alloc(struct bio *bio,
718 720
719 closure_init(&s->cl, NULL); 721 closure_init(&s->cl, NULL);
720 do_bio_hook(s, bio, request_endio); 722 do_bio_hook(s, bio, request_endio);
723 atomic_inc(&d->c->search_inflight);
721 724
722 s->orig_bio = bio; 725 s->orig_bio = bio;
723 s->cache_miss = NULL; 726 s->cache_miss = NULL;