aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/movinggc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/movinggc.c')
-rw-r--r--drivers/md/bcache/movinggc.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
25 unsigned i; 25 unsigned i;
26 26
27 for (i = 0; i < KEY_PTRS(k); i++) { 27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i); 28 struct bucket *g = PTR_BUCKET(c, k, i);
30 29
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 30 if (GC_MOVE(g))
32 return true; 31 return true;
33 } 32 }
34 33
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
65 64
66static void read_moving_endio(struct bio *bio, int error) 65static void read_moving_endio(struct bio *bio, int error)
67{ 66{
67 struct bbio *b = container_of(bio, struct bbio, bio);
68 struct moving_io *io = container_of(bio->bi_private, 68 struct moving_io *io = container_of(bio->bi_private,
69 struct moving_io, cl); 69 struct moving_io, cl);
70 70
71 if (error) 71 if (error)
72 io->op.error = error; 72 io->op.error = error;
73 else if (!KEY_DIRTY(&b->key) &&
74 ptr_stale(io->op.c, &b->key, 0)) {
75 io->op.error = -EINTR;
76 }
73 77
74 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 78 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
75} 79}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
141 if (!w) 145 if (!w)
142 break; 146 break;
143 147
148 if (ptr_stale(c, &w->key, 0)) {
149 bch_keybuf_del(&c->moving_gc_keys, w);
150 continue;
151 }
152
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) 153 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 154 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL); 155 GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 193
185static unsigned bucket_heap_top(struct cache *ca) 194static unsigned bucket_heap_top(struct cache *ca)
186{ 195{
187 return GC_SECTORS_USED(heap_peek(&ca->heap)); 196 struct bucket *b;
197 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
188} 198}
189 199
190void bch_moving_gc(struct cache_set *c) 200void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
226 sectors_to_move -= GC_SECTORS_USED(b); 236 sectors_to_move -= GC_SECTORS_USED(b);
227 } 237 }
228 238
229 ca->gc_move_threshold = bucket_heap_top(ca); 239 while (heap_pop(&ca->heap, b, bucket_cmp))
230 240 SET_GC_MOVE(b, 1);
231 pr_debug("threshold %u", ca->gc_move_threshold);
232 } 241 }
233 242
234 mutex_unlock(&c->bucket_lock); 243 mutex_unlock(&c->bucket_lock);