aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/movinggc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/movinggc.c')
-rw-r--r--drivers/md/bcache/movinggc.c254
1 files changed, 254 insertions, 0 deletions
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
new file mode 100644
index 000000000000..8589512c972e
--- /dev/null
+++ b/drivers/md/bcache/movinggc.c
@@ -0,0 +1,254 @@
1/*
2 * Moving/copying garbage collector
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
10#include "request.h"
11
12struct moving_io {
13 struct keybuf_key *w;
14 struct search s;
15 struct bbio bio;
16};
17
18static bool moving_pred(struct keybuf *buf, struct bkey *k)
19{
20 struct cache_set *c = container_of(buf, struct cache_set,
21 moving_gc_keys);
22 unsigned i;
23
24 for (i = 0; i < KEY_PTRS(k); i++) {
25 struct cache *ca = PTR_CACHE(c, k, i);
26 struct bucket *g = PTR_BUCKET(c, k, i);
27
28 if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
29 return true;
30 }
31
32 return false;
33}
34
35/* Moving GC - IO loop */
36
37static void moving_io_destructor(struct closure *cl)
38{
39 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
40 kfree(io);
41}
42
43static void write_moving_finish(struct closure *cl)
44{
45 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
46 struct bio *bio = &io->bio.bio;
47 struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
48
49 while (bv-- != bio->bi_io_vec)
50 __free_page(bv->bv_page);
51
52 pr_debug("%s %s", io->s.op.insert_collision
53 ? "collision moving" : "moved",
54 pkey(&io->w->key));
55
56 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
57
58 atomic_dec_bug(&io->s.op.c->in_flight);
59 closure_wake_up(&io->s.op.c->moving_gc_wait);
60
61 closure_return_with_destructor(cl, moving_io_destructor);
62}
63
64static void read_moving_endio(struct bio *bio, int error)
65{
66 struct moving_io *io = container_of(bio->bi_private,
67 struct moving_io, s.cl);
68
69 if (error)
70 io->s.error = error;
71
72 bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
73}
74
75static void moving_init(struct moving_io *io)
76{
77 struct bio *bio = &io->bio.bio;
78
79 bio_init(bio);
80 bio_get(bio);
81 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
82
83 bio->bi_size = KEY_SIZE(&io->w->key) << 9;
84 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
85 PAGE_SECTORS);
86 bio->bi_private = &io->s.cl;
87 bio->bi_io_vec = bio->bi_inline_vecs;
88 bch_bio_map(bio, NULL);
89}
90
91static void write_moving(struct closure *cl)
92{
93 struct search *s = container_of(cl, struct search, cl);
94 struct moving_io *io = container_of(s, struct moving_io, s);
95
96 if (!s->error) {
97 trace_bcache_write_moving(&io->bio.bio);
98
99 moving_init(io);
100
101 io->bio.bio.bi_sector = KEY_START(&io->w->key);
102 s->op.lock = -1;
103 s->op.write_prio = 1;
104 s->op.cache_bio = &io->bio.bio;
105
106 s->writeback = KEY_DIRTY(&io->w->key);
107 s->op.csum = KEY_CSUM(&io->w->key);
108
109 s->op.type = BTREE_REPLACE;
110 bkey_copy(&s->op.replace, &io->w->key);
111
112 closure_init(&s->op.cl, cl);
113 bch_insert_data(&s->op.cl);
114 }
115
116 continue_at(cl, write_moving_finish, NULL);
117}
118
119static void read_moving_submit(struct closure *cl)
120{
121 struct search *s = container_of(cl, struct search, cl);
122 struct moving_io *io = container_of(s, struct moving_io, s);
123 struct bio *bio = &io->bio.bio;
124
125 trace_bcache_read_moving(bio);
126 bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
127
128 continue_at(cl, write_moving, bch_gc_wq);
129}
130
131static void read_moving(struct closure *cl)
132{
133 struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
134 struct keybuf_key *w;
135 struct moving_io *io;
136 struct bio *bio;
137
138 /* XXX: if we error, background writeback could stall indefinitely */
139
140 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
141 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY);
142 if (!w)
143 break;
144
145 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
146 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
147 GFP_KERNEL);
148 if (!io)
149 goto err;
150
151 w->private = io;
152 io->w = w;
153 io->s.op.inode = KEY_INODE(&w->key);
154 io->s.op.c = c;
155
156 moving_init(io);
157 bio = &io->bio.bio;
158
159 bio->bi_rw = READ;
160 bio->bi_end_io = read_moving_endio;
161
162 if (bch_bio_alloc_pages(bio, GFP_KERNEL))
163 goto err;
164
165 pr_debug("%s", pkey(&w->key));
166
167 closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
168
169 if (atomic_inc_return(&c->in_flight) >= 64) {
170 closure_wait_event(&c->moving_gc_wait, cl,
171 atomic_read(&c->in_flight) < 64);
172 continue_at(cl, read_moving, bch_gc_wq);
173 }
174 }
175
176 if (0) {
177err: if (!IS_ERR_OR_NULL(w->private))
178 kfree(w->private);
179
180 bch_keybuf_del(&c->moving_gc_keys, w);
181 }
182
183 closure_return(cl);
184}
185
186static bool bucket_cmp(struct bucket *l, struct bucket *r)
187{
188 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
189}
190
191static unsigned bucket_heap_top(struct cache *ca)
192{
193 return GC_SECTORS_USED(heap_peek(&ca->heap));
194}
195
196void bch_moving_gc(struct closure *cl)
197{
198 struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
199 struct cache *ca;
200 struct bucket *b;
201 unsigned i;
202
203 if (!c->copy_gc_enabled)
204 closure_return(cl);
205
206 mutex_lock(&c->bucket_lock);
207
208 for_each_cache(ca, c, i) {
209 unsigned sectors_to_move = 0;
210 unsigned reserve_sectors = ca->sb.bucket_size *
211 min(fifo_used(&ca->free), ca->free.size / 2);
212
213 ca->heap.used = 0;
214
215 for_each_bucket(b, ca) {
216 if (!GC_SECTORS_USED(b))
217 continue;
218
219 if (!heap_full(&ca->heap)) {
220 sectors_to_move += GC_SECTORS_USED(b);
221 heap_add(&ca->heap, b, bucket_cmp);
222 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
223 sectors_to_move -= bucket_heap_top(ca);
224 sectors_to_move += GC_SECTORS_USED(b);
225
226 ca->heap.data[0] = b;
227 heap_sift(&ca->heap, 0, bucket_cmp);
228 }
229 }
230
231 while (sectors_to_move > reserve_sectors) {
232 heap_pop(&ca->heap, b, bucket_cmp);
233 sectors_to_move -= GC_SECTORS_USED(b);
234 }
235
236 ca->gc_move_threshold = bucket_heap_top(ca);
237
238 pr_debug("threshold %u", ca->gc_move_threshold);
239 }
240
241 mutex_unlock(&c->bucket_lock);
242
243 c->moving_gc_keys.last_scanned = ZERO_KEY;
244
245 closure_init(&c->moving_gc, cl);
246 read_moving(&c->moving_gc);
247
248 closure_return(cl);
249}
250
251void bch_moving_init_cache_set(struct cache_set *c)
252{
253 bch_keybuf_init(&c->moving_gc_keys, moving_pred);
254}