aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Swenson <nks@daterainc.com>2013-11-07 20:53:19 -0500
committerKent Overstreet <kmo@daterainc.com>2013-12-16 17:22:58 -0500
commit981aa8c091e164ea51dd1e81b71a1f3852bbcceb (patch)
tree63b14b1df54db25daa2ce46c8f42b5cff0ab1b89
parentbee63f40cb5f5e8ab2abfbc85acde99cc0acd4b5 (diff)
bcache: bugfix - moving_gc now moves only correct buckets
Removed gc_move_threshold because picking buckets only by threshold could lead moving extra buckets (ei. if there are buckets at the threshold that aren't supposed to be moved do to space considerations). This is replaced by a GC_MOVE bit in the gc_mark bitmask. Now only marked buckets get moved. Signed-off-by: Nicholas Swenson <nks@daterainc.com> Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/movinggc.c8
3 files changed, 8 insertions, 8 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
421 421
422 if (watermark <= WATERMARK_METADATA) { 422 if (watermark <= WATERMARK_METADATA) {
423 SET_GC_MARK(b, GC_MARK_METADATA); 423 SET_GC_MARK(b, GC_MARK_METADATA);
424 SET_GC_MOVE(b, 0);
424 b->prio = BTREE_PRIO; 425 b->prio = BTREE_PRIO;
425 } else { 426 } else {
426 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 427 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
428 SET_GC_MOVE(b, 0);
427 b->prio = INITIAL_PRIO; 429 b->prio = INITIAL_PRIO;
428 } 430 }
429 431
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..a7b1a7631ed2 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
197 uint8_t disk_gen; 197 uint8_t disk_gen;
198 uint8_t last_gc; /* Most out of date gen in the btree */ 198 uint8_t last_gc; /* Most out of date gen in the btree */
199 uint8_t gc_gen; 199 uint8_t gc_gen;
200 uint16_t gc_mark; 200 uint16_t gc_mark; /* Bitfield used by GC. See below for field */
201}; 201};
202 202
203/* 203/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209#define GC_MARK_RECLAIMABLE 0 209#define GC_MARK_RECLAIMABLE 0
210#define GC_MARK_DIRTY 1 210#define GC_MARK_DIRTY 1
211#define GC_MARK_METADATA 2 211#define GC_MARK_METADATA 2
212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); 212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
213BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
213 214
214#include "journal.h" 215#include "journal.h"
215#include "stats.h" 216#include "stats.h"
@@ -445,7 +446,6 @@ struct cache {
445 * call prio_write() to keep gens from wrapping. 446 * call prio_write() to keep gens from wrapping.
446 */ 447 */
447 uint8_t need_save_prio; 448 uint8_t need_save_prio;
448 unsigned gc_move_threshold;
449 449
450 /* 450 /*
451 * If nonzero, we know we aren't going to find any buckets to invalidate 451 * If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 46c952379fab..30f347d4e609 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
25 unsigned i; 25 unsigned i;
26 26
27 for (i = 0; i < KEY_PTRS(k); i++) { 27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i); 28 struct bucket *g = PTR_BUCKET(c, k, i);
30 29
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 30 if (GC_MOVE(g))
32 return true; 31 return true;
33 } 32 }
34 33
@@ -227,9 +226,8 @@ void bch_moving_gc(struct cache_set *c)
227 sectors_to_move -= GC_SECTORS_USED(b); 226 sectors_to_move -= GC_SECTORS_USED(b);
228 } 227 }
229 228
230 ca->gc_move_threshold = bucket_heap_top(ca); 229 while (heap_pop(&ca->heap, b, bucket_cmp))
231 230 SET_GC_MOVE(b, 1);
232 pr_debug("threshold %u", ca->gc_move_threshold);
233 } 231 }
234 232
235 mutex_unlock(&c->bucket_lock); 233 mutex_unlock(&c->bucket_lock);