aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:44:17 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:08 -0500
commitc18536a72ddd7fe30d63e6c1500b5c930ac14594 (patch)
tree0794a00a28c810326b76a36f599e8eee1932008a
parentcc23196631fbcd1bc3eafedbb712413fdbf946a3 (diff)
bcache: Prune struct btree_op
Eventual goal is for struct btree_op to contain only what is necessary for traversing the btree. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/bset.c1
-rw-r--r--drivers/md/bcache/btree.c32
-rw-r--r--drivers/md/bcache/btree.h21
-rw-r--r--drivers/md/bcache/journal.c32
-rw-r--r--drivers/md/bcache/journal.h9
-rw-r--r--drivers/md/bcache/movinggc.c18
-rw-r--r--drivers/md/bcache/request.c177
-rw-r--r--drivers/md/bcache/request.h14
-rw-r--r--drivers/md/bcache/stats.c8
-rw-r--r--drivers/md/bcache/super.c21
-rw-r--r--drivers/md/bcache/writeback.c17
11 files changed, 179 insertions, 171 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 14c2a23d3884..fae5b7b3f5ab 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1197,7 +1197,6 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)
1197 1197
1198 memset(&t, 0, sizeof(struct bset_stats)); 1198 memset(&t, 0, sizeof(struct bset_stats));
1199 bch_btree_op_init_stack(&t.op); 1199 bch_btree_op_init_stack(&t.op);
1200 t.op.c = c;
1201 1200
1202 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats); 1201 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1203 if (ret < 0) 1202 if (ret < 0)
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 3949673cb1b0..5cb59c313dc3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -503,7 +503,7 @@ static void btree_node_write_work(struct work_struct *w)
503 rw_unlock(true, b); 503 rw_unlock(true, b);
504} 504}
505 505
506static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) 506static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
507{ 507{
508 struct bset *i = b->sets[b->nsets].data; 508 struct bset *i = b->sets[b->nsets].data;
509 struct btree_write *w = btree_current_write(b); 509 struct btree_write *w = btree_current_write(b);
@@ -516,15 +516,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
516 516
517 set_btree_node_dirty(b); 517 set_btree_node_dirty(b);
518 518
519 if (op->journal) { 519 if (journal_ref) {
520 if (w->journal && 520 if (w->journal &&
521 journal_pin_cmp(b->c, w, op)) { 521 journal_pin_cmp(b->c, w->journal, journal_ref)) {
522 atomic_dec_bug(w->journal); 522 atomic_dec_bug(w->journal);
523 w->journal = NULL; 523 w->journal = NULL;
524 } 524 }
525 525
526 if (!w->journal) { 526 if (!w->journal) {
527 w->journal = op->journal; 527 w->journal = journal_ref;
528 atomic_inc(w->journal); 528 atomic_inc(w->journal);
529 } 529 }
530 } 530 }
@@ -1663,13 +1663,16 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
1663 return 0; 1663 return 0;
1664} 1664}
1665 1665
1666int bch_btree_check(struct cache_set *c, struct btree_op *op) 1666int bch_btree_check(struct cache_set *c)
1667{ 1667{
1668 int ret = -ENOMEM; 1668 int ret = -ENOMEM;
1669 unsigned i; 1669 unsigned i;
1670 unsigned long *seen[MAX_CACHES_PER_SET]; 1670 unsigned long *seen[MAX_CACHES_PER_SET];
1671 struct btree_op op;
1671 1672
1672 memset(seen, 0, sizeof(seen)); 1673 memset(seen, 0, sizeof(seen));
1674 bch_btree_op_init_stack(&op);
1675 op.lock = SHRT_MAX;
1673 1676
1674 for (i = 0; c->cache[i]; i++) { 1677 for (i = 0; c->cache[i]; i++) {
1675 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); 1678 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1681,7 +1684,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
1681 memset(seen[i], 0xFF, n); 1684 memset(seen[i], 0xFF, n);
1682 } 1685 }
1683 1686
1684 ret = btree_root(check_recurse, c, op, seen); 1687 ret = btree_root(check_recurse, c, &op, seen);
1685err: 1688err:
1686 for (i = 0; i < MAX_CACHES_PER_SET; i++) 1689 for (i = 0; i < MAX_CACHES_PER_SET; i++)
1687 kfree(seen[i]); 1690 kfree(seen[i]);
@@ -2091,7 +2094,8 @@ err:
2091} 2094}
2092 2095
2093static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2096static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2094 struct keylist *insert_keys) 2097 struct keylist *insert_keys,
2098 atomic_t *journal_ref)
2095{ 2099{
2096 int ret = 0; 2100 int ret = 0;
2097 struct keylist split_keys; 2101 struct keylist split_keys;
@@ -2123,7 +2127,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2123 2127
2124 if (bch_btree_insert_keys(b, op, insert_keys)) { 2128 if (bch_btree_insert_keys(b, op, insert_keys)) {
2125 if (!b->level) 2129 if (!b->level)
2126 bch_btree_leaf_dirty(b, op); 2130 bch_btree_leaf_dirty(b, journal_ref);
2127 else 2131 else
2128 bch_btree_node_write(b, &op->cl); 2132 bch_btree_node_write(b, &op->cl);
2129 } 2133 }
@@ -2162,7 +2166,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2162 2166
2163 BUG_ON(op->type != BTREE_INSERT); 2167 BUG_ON(op->type != BTREE_INSERT);
2164 2168
2165 ret = bch_btree_insert_node(b, op, &insert); 2169 ret = bch_btree_insert_node(b, op, &insert, NULL);
2166 2170
2167 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2171 BUG_ON(!ret && !bch_keylist_empty(&insert));
2168out: 2172out:
@@ -2172,7 +2176,7 @@ out:
2172} 2176}
2173 2177
2174static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, 2178static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2175 struct keylist *keys) 2179 struct keylist *keys, atomic_t *journal_ref)
2176{ 2180{
2177 if (bch_keylist_empty(keys)) 2181 if (bch_keylist_empty(keys))
2178 return 0; 2182 return 0;
@@ -2189,14 +2193,14 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2189 return -EIO; 2193 return -EIO;
2190 } 2194 }
2191 2195
2192 return btree(insert_recurse, k, b, op, keys); 2196 return btree(insert_recurse, k, b, op, keys, journal_ref);
2193 } else { 2197 } else {
2194 return bch_btree_insert_node(b, op, keys); 2198 return bch_btree_insert_node(b, op, keys, journal_ref);
2195 } 2199 }
2196} 2200}
2197 2201
2198int bch_btree_insert(struct btree_op *op, struct cache_set *c, 2202int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2199 struct keylist *keys) 2203 struct keylist *keys, atomic_t *journal_ref)
2200{ 2204{
2201 int ret = 0; 2205 int ret = 0;
2202 2206
@@ -2210,7 +2214,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2210 2214
2211 while (!bch_keylist_empty(keys)) { 2215 while (!bch_keylist_empty(keys)) {
2212 op->lock = 0; 2216 op->lock = 0;
2213 ret = btree_root(insert_recurse, c, op, keys); 2217 ret = btree_root(insert_recurse, c, op, keys, journal_ref);
2214 2218
2215 if (ret == -EAGAIN) { 2219 if (ret == -EAGAIN) {
2216 ret = 0; 2220 ret = 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 60dadd722ace..3f820b67150c 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -238,17 +238,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);
238 238
239struct btree_op { 239struct btree_op {
240 struct closure cl; 240 struct closure cl;
241 struct cache_set *c;
242
243 /* Journal entry we have a refcount on */
244 atomic_t *journal;
245
246 /* Bio to be inserted into the cache */
247 struct bio *cache_bio;
248
249 unsigned inode;
250
251 uint16_t write_prio;
252 241
253 /* Btree level at which we start taking write locks */ 242 /* Btree level at which we start taking write locks */
254 short lock; 243 short lock;
@@ -259,11 +248,6 @@ struct btree_op {
259 BTREE_REPLACE 248 BTREE_REPLACE
260 } type:8; 249 } type:8;
261 250
262 unsigned csum:1;
263 unsigned bypass:1;
264 unsigned flush_journal:1;
265
266 unsigned insert_data_done:1;
267 unsigned insert_collision:1; 251 unsigned insert_collision:1;
268 252
269 BKEY_PADDED(replace); 253 BKEY_PADDED(replace);
@@ -303,12 +287,13 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
303 287
304int bch_btree_insert_check_key(struct btree *, struct btree_op *, 288int bch_btree_insert_check_key(struct btree *, struct btree_op *,
305 struct bkey *); 289 struct bkey *);
306int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *); 290int bch_btree_insert(struct btree_op *, struct cache_set *,
291 struct keylist *, atomic_t *);
307 292
308int bch_gc_thread_start(struct cache_set *); 293int bch_gc_thread_start(struct cache_set *);
309size_t bch_btree_gc_finish(struct cache_set *); 294size_t bch_btree_gc_finish(struct cache_set *);
310void bch_moving_gc(struct cache_set *); 295void bch_moving_gc(struct cache_set *);
311int bch_btree_check(struct cache_set *, struct btree_op *); 296int bch_btree_check(struct cache_set *);
312uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); 297uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
313 298
314static inline void wake_up_gc(struct cache_set *c) 299static inline void wake_up_gc(struct cache_set *c)
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6f4daf031410..725c8eb9a62a 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -30,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
30} 30}
31 31
32static int journal_read_bucket(struct cache *ca, struct list_head *list, 32static int journal_read_bucket(struct cache *ca, struct list_head *list,
33 struct btree_op *op, unsigned bucket_index) 33 unsigned bucket_index)
34{ 34{
35 struct journal_device *ja = &ca->journal; 35 struct journal_device *ja = &ca->journal;
36 struct bio *bio = &ja->bio; 36 struct bio *bio = &ja->bio;
37 37
38 struct journal_replay *i; 38 struct journal_replay *i;
39 struct jset *j, *data = ca->set->journal.w[0].data; 39 struct jset *j, *data = ca->set->journal.w[0].data;
40 struct closure cl;
40 unsigned len, left, offset = 0; 41 unsigned len, left, offset = 0;
41 int ret = 0; 42 int ret = 0;
42 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); 43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
43 44
45 closure_init_stack(&cl);
46
44 pr_debug("reading %llu", (uint64_t) bucket); 47 pr_debug("reading %llu", (uint64_t) bucket);
45 48
46 while (offset < ca->sb.bucket_size) { 49 while (offset < ca->sb.bucket_size) {
@@ -54,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
54 bio->bi_size = len << 9; 57 bio->bi_size = len << 9;
55 58
56 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
57 bio->bi_private = &op->cl; 60 bio->bi_private = &cl;
58 bch_bio_map(bio, data); 61 bch_bio_map(bio, data);
59 62
60 closure_bio_submit(bio, &op->cl, ca); 63 closure_bio_submit(bio, &cl, ca);
61 closure_sync(&op->cl); 64 closure_sync(&cl);
62 65
63 /* This function could be simpler now since we no longer write 66 /* This function could be simpler now since we no longer write
64 * journal entries that overlap bucket boundaries; this means 67 * journal entries that overlap bucket boundaries; this means
@@ -128,12 +131,11 @@ next_set:
128 return ret; 131 return ret;
129} 132}
130 133
131int bch_journal_read(struct cache_set *c, struct list_head *list, 134int bch_journal_read(struct cache_set *c, struct list_head *list)
132 struct btree_op *op)
133{ 135{
134#define read_bucket(b) \ 136#define read_bucket(b) \
135 ({ \ 137 ({ \
136 int ret = journal_read_bucket(ca, list, op, b); \ 138 int ret = journal_read_bucket(ca, list, b); \
137 __set_bit(b, bitmap); \ 139 __set_bit(b, bitmap); \
138 if (ret < 0) \ 140 if (ret < 0) \
139 return ret; \ 141 return ret; \
@@ -291,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
291 } 293 }
292} 294}
293 295
294int bch_journal_replay(struct cache_set *s, struct list_head *list, 296int bch_journal_replay(struct cache_set *s, struct list_head *list)
295 struct btree_op *op)
296{ 297{
297 int ret = 0, keys = 0, entries = 0; 298 int ret = 0, keys = 0, entries = 0;
298 struct bkey *k; 299 struct bkey *k;
@@ -301,8 +302,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
301 302
302 uint64_t start = i->j.last_seq, end = i->j.seq, n = start; 303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
303 struct keylist keylist; 304 struct keylist keylist;
305 struct btree_op op;
304 306
305 bch_keylist_init(&keylist); 307 bch_keylist_init(&keylist);
308 bch_btree_op_init_stack(&op);
309 op.lock = SHRT_MAX;
306 310
307 list_for_each_entry(i, list, list) { 311 list_for_each_entry(i, list, list) {
308 BUG_ON(i->pin && atomic_read(i->pin) != 1); 312 BUG_ON(i->pin && atomic_read(i->pin) != 1);
@@ -319,9 +323,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
319 bkey_copy(keylist.top, k); 323 bkey_copy(keylist.top, k);
320 bch_keylist_push(&keylist); 324 bch_keylist_push(&keylist);
321 325
322 op->journal = i->pin; 326 ret = bch_btree_insert(&op, s, &keylist, i->pin);
323
324 ret = bch_btree_insert(op, s, &keylist);
325 if (ret) 327 if (ret)
326 goto err; 328 goto err;
327 329
@@ -346,7 +348,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
346 kfree(i); 348 kfree(i);
347 } 349 }
348err: 350err:
349 closure_sync(&op->cl); 351 closure_sync(&op.cl);
350 return ret; 352 return ret;
351} 353}
352 354
@@ -368,8 +370,8 @@ retry:
368 if (!best) 370 if (!best)
369 best = b; 371 best = b;
370 else if (journal_pin_cmp(c, 372 else if (journal_pin_cmp(c,
371 btree_current_write(best), 373 btree_current_write(best)->journal,
372 btree_current_write(b))) { 374 btree_current_write(b)->journal)) {
373 best = b; 375 best = b;
374 } 376 }
375 } 377 }
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 7045e6fd2d5a..5e9edb9ef376 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -189,8 +189,7 @@ struct journal_device {
189}; 189};
190 190
191#define journal_pin_cmp(c, l, r) \ 191#define journal_pin_cmp(c, l, r) \
192 (fifo_idx(&(c)->journal.pin, (l)->journal) > \ 192 (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
193 fifo_idx(&(c)->journal.pin, (r)->journal))
194 193
195#define JOURNAL_PIN 20000 194#define JOURNAL_PIN 20000
196 195
@@ -206,10 +205,8 @@ atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
206void bch_journal_next(struct journal *); 205void bch_journal_next(struct journal *);
207void bch_journal_mark(struct cache_set *, struct list_head *); 206void bch_journal_mark(struct cache_set *, struct list_head *);
208void bch_journal_meta(struct cache_set *, struct closure *); 207void bch_journal_meta(struct cache_set *, struct closure *);
209int bch_journal_read(struct cache_set *, struct list_head *, 208int bch_journal_read(struct cache_set *, struct list_head *);
210 struct btree_op *); 209int bch_journal_replay(struct cache_set *, struct list_head *);
211int bch_journal_replay(struct cache_set *, struct list_head *,
212 struct btree_op *);
213 210
214void bch_journal_free(struct cache_set *); 211void bch_journal_free(struct cache_set *);
215int bch_journal_alloc(struct cache_set *); 212int bch_journal_alloc(struct cache_set *);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 6ba050456ec8..80e30d77221e 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -55,9 +55,9 @@ static void write_moving_finish(struct closure *cl)
55 if (io->s.op.insert_collision) 55 if (io->s.op.insert_collision)
56 trace_bcache_gc_copy_collision(&io->w->key); 56 trace_bcache_gc_copy_collision(&io->w->key);
57 57
58 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); 58 bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
59 59
60 up(&io->s.op.c->moving_in_flight); 60 up(&io->s.c->moving_in_flight);
61 61
62 closure_return_with_destructor(cl, moving_io_destructor); 62 closure_return_with_destructor(cl, moving_io_destructor);
63} 63}
@@ -70,7 +70,7 @@ static void read_moving_endio(struct bio *bio, int error)
70 if (error) 70 if (error)
71 io->s.error = error; 71 io->s.error = error;
72 72
73 bch_bbio_endio(io->s.op.c, bio, error, "reading data to move"); 73 bch_bbio_endio(io->s.c, bio, error, "reading data to move");
74} 74}
75 75
76static void moving_init(struct moving_io *io) 76static void moving_init(struct moving_io *io)
@@ -99,11 +99,11 @@ static void write_moving(struct closure *cl)
99 99
100 io->bio.bio.bi_sector = KEY_START(&io->w->key); 100 io->bio.bio.bi_sector = KEY_START(&io->w->key);
101 s->op.lock = -1; 101 s->op.lock = -1;
102 s->op.write_prio = 1; 102 s->write_prio = 1;
103 s->op.cache_bio = &io->bio.bio; 103 s->cache_bio = &io->bio.bio;
104 104
105 s->writeback = KEY_DIRTY(&io->w->key); 105 s->writeback = KEY_DIRTY(&io->w->key);
106 s->op.csum = KEY_CSUM(&io->w->key); 106 s->csum = KEY_CSUM(&io->w->key);
107 107
108 s->op.type = BTREE_REPLACE; 108 s->op.type = BTREE_REPLACE;
109 bkey_copy(&s->op.replace, &io->w->key); 109 bkey_copy(&s->op.replace, &io->w->key);
@@ -121,7 +121,7 @@ static void read_moving_submit(struct closure *cl)
121 struct moving_io *io = container_of(s, struct moving_io, s); 121 struct moving_io *io = container_of(s, struct moving_io, s);
122 struct bio *bio = &io->bio.bio; 122 struct bio *bio = &io->bio.bio;
123 123
124 bch_submit_bbio(bio, s->op.c, &io->w->key, 0); 124 bch_submit_bbio(bio, s->c, &io->w->key, 0);
125 125
126 continue_at(cl, write_moving, system_wq); 126 continue_at(cl, write_moving, system_wq);
127} 127}
@@ -151,8 +151,8 @@ static void read_moving(struct cache_set *c)
151 151
152 w->private = io; 152 w->private = io;
153 io->w = w; 153 io->w = w;
154 io->s.op.inode = KEY_INODE(&w->key); 154 io->s.inode = KEY_INODE(&w->key);
155 io->s.op.c = c; 155 io->s.c = c;
156 156
157 moving_init(io); 157 moving_init(io);
158 bio = &io->bio.bio; 158 bio = &io->bio.bio;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index de3fc76ffcfc..818e2e39e71f 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -217,6 +217,7 @@ static void bch_data_insert_keys(struct closure *cl)
217{ 217{
218 struct btree_op *op = container_of(cl, struct btree_op, cl); 218 struct btree_op *op = container_of(cl, struct btree_op, cl);
219 struct search *s = container_of(op, struct search, op); 219 struct search *s = container_of(op, struct search, op);
220 atomic_t *journal_ref = NULL;
220 221
221 /* 222 /*
222 * If we're looping, might already be waiting on 223 * If we're looping, might already be waiting on
@@ -231,20 +232,19 @@ static void bch_data_insert_keys(struct closure *cl)
231#endif 232#endif
232 233
233 if (s->write) 234 if (s->write)
234 op->journal = bch_journal(op->c, &s->insert_keys, 235 journal_ref = bch_journal(s->c, &s->insert_keys,
235 op->flush_journal 236 s->flush_journal
236 ? &s->cl : NULL); 237 ? &s->cl : NULL);
237 238
238 if (bch_btree_insert(op, op->c, &s->insert_keys)) { 239 if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) {
239 s->error = -ENOMEM; 240 s->error = -ENOMEM;
240 op->insert_data_done = true; 241 s->insert_data_done = true;
241 } 242 }
242 243
243 if (op->journal) 244 if (journal_ref)
244 atomic_dec_bug(op->journal); 245 atomic_dec_bug(journal_ref);
245 op->journal = NULL;
246 246
247 if (!op->insert_data_done) 247 if (!s->insert_data_done)
248 continue_at(cl, bch_data_insert_start, bcache_wq); 248 continue_at(cl, bch_data_insert_start, bcache_wq);
249 249
250 bch_keylist_free(&s->insert_keys); 250 bch_keylist_free(&s->insert_keys);
@@ -347,7 +347,7 @@ found:
347static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, 347static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
348 struct search *s) 348 struct search *s)
349{ 349{
350 struct cache_set *c = s->op.c; 350 struct cache_set *c = s->c;
351 struct open_bucket *b; 351 struct open_bucket *b;
352 BKEY_PADDED(key) alloc; 352 BKEY_PADDED(key) alloc;
353 unsigned i; 353 unsigned i;
@@ -363,7 +363,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
363 spin_lock(&c->data_bucket_lock); 363 spin_lock(&c->data_bucket_lock);
364 364
365 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) { 365 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
366 unsigned watermark = s->op.write_prio 366 unsigned watermark = s->write_prio
367 ? WATERMARK_MOVINGGC 367 ? WATERMARK_MOVINGGC
368 : WATERMARK_NONE; 368 : WATERMARK_NONE;
369 369
@@ -435,7 +435,7 @@ static void bch_data_invalidate(struct closure *cl)
435{ 435{
436 struct btree_op *op = container_of(cl, struct btree_op, cl); 436 struct btree_op *op = container_of(cl, struct btree_op, cl);
437 struct search *s = container_of(op, struct search, op); 437 struct search *s = container_of(op, struct search, op);
438 struct bio *bio = op->cache_bio; 438 struct bio *bio = s->cache_bio;
439 439
440 pr_debug("invalidating %i sectors from %llu", 440 pr_debug("invalidating %i sectors from %llu",
441 bio_sectors(bio), (uint64_t) bio->bi_sector); 441 bio_sectors(bio), (uint64_t) bio->bi_sector);
@@ -443,17 +443,17 @@ static void bch_data_invalidate(struct closure *cl)
443 while (bio_sectors(bio)) { 443 while (bio_sectors(bio)) {
444 unsigned len = min(bio_sectors(bio), 1U << 14); 444 unsigned len = min(bio_sectors(bio), 1U << 14);
445 445
446 if (bch_keylist_realloc(&s->insert_keys, 0, op->c)) 446 if (bch_keylist_realloc(&s->insert_keys, 0, s->c))
447 goto out; 447 goto out;
448 448
449 bio->bi_sector += len; 449 bio->bi_sector += len;
450 bio->bi_size -= len << 9; 450 bio->bi_size -= len << 9;
451 451
452 bch_keylist_add(&s->insert_keys, 452 bch_keylist_add(&s->insert_keys,
453 &KEY(op->inode, bio->bi_sector, len)); 453 &KEY(s->inode, bio->bi_sector, len));
454 } 454 }
455 455
456 op->insert_data_done = true; 456 s->insert_data_done = true;
457 bio_put(bio); 457 bio_put(bio);
458out: 458out:
459 continue_at(cl, bch_data_insert_keys, bcache_wq); 459 continue_at(cl, bch_data_insert_keys, bcache_wq);
@@ -506,21 +506,21 @@ static void bch_data_insert_endio(struct bio *bio, int error)
506 set_closure_fn(cl, NULL, NULL); 506 set_closure_fn(cl, NULL, NULL);
507 } 507 }
508 508
509 bch_bbio_endio(op->c, bio, error, "writing data to cache"); 509 bch_bbio_endio(s->c, bio, error, "writing data to cache");
510} 510}
511 511
512static void bch_data_insert_start(struct closure *cl) 512static void bch_data_insert_start(struct closure *cl)
513{ 513{
514 struct btree_op *op = container_of(cl, struct btree_op, cl); 514 struct btree_op *op = container_of(cl, struct btree_op, cl);
515 struct search *s = container_of(op, struct search, op); 515 struct search *s = container_of(op, struct search, op);
516 struct bio *bio = op->cache_bio, *n; 516 struct bio *bio = s->cache_bio, *n;
517 517
518 if (op->bypass) 518 if (s->bypass)
519 return bch_data_invalidate(cl); 519 return bch_data_invalidate(cl);
520 520
521 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { 521 if (atomic_sub_return(bio_sectors(bio), &s->c->sectors_to_gc) < 0) {
522 set_gc_sectors(op->c); 522 set_gc_sectors(s->c);
523 wake_up_gc(op->c); 523 wake_up_gc(s->c);
524 } 524 }
525 525
526 /* 526 /*
@@ -533,17 +533,17 @@ static void bch_data_insert_start(struct closure *cl)
533 unsigned i; 533 unsigned i;
534 struct bkey *k; 534 struct bkey *k;
535 struct bio_set *split = s->d 535 struct bio_set *split = s->d
536 ? s->d->bio_split : op->c->bio_split; 536 ? s->d->bio_split : s->c->bio_split;
537 537
538 /* 1 for the device pointer and 1 for the chksum */ 538 /* 1 for the device pointer and 1 for the chksum */
539 if (bch_keylist_realloc(&s->insert_keys, 539 if (bch_keylist_realloc(&s->insert_keys,
540 1 + (op->csum ? 1 : 0), 540 1 + (s->csum ? 1 : 0),
541 op->c)) 541 s->c))
542 continue_at(cl, bch_data_insert_keys, bcache_wq); 542 continue_at(cl, bch_data_insert_keys, bcache_wq);
543 543
544 k = s->insert_keys.top; 544 k = s->insert_keys.top;
545 bkey_init(k); 545 bkey_init(k);
546 SET_KEY_INODE(k, op->inode); 546 SET_KEY_INODE(k, s->inode);
547 SET_KEY_OFFSET(k, bio->bi_sector); 547 SET_KEY_OFFSET(k, bio->bi_sector);
548 548
549 if (!bch_alloc_sectors(k, bio_sectors(bio), s)) 549 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
@@ -558,11 +558,11 @@ static void bch_data_insert_start(struct closure *cl)
558 SET_KEY_DIRTY(k, true); 558 SET_KEY_DIRTY(k, true);
559 559
560 for (i = 0; i < KEY_PTRS(k); i++) 560 for (i = 0; i < KEY_PTRS(k); i++)
561 SET_GC_MARK(PTR_BUCKET(op->c, k, i), 561 SET_GC_MARK(PTR_BUCKET(s->c, k, i),
562 GC_MARK_DIRTY); 562 GC_MARK_DIRTY);
563 } 563 }
564 564
565 SET_KEY_CSUM(k, op->csum); 565 SET_KEY_CSUM(k, s->csum);
566 if (KEY_CSUM(k)) 566 if (KEY_CSUM(k))
567 bio_csum(n, k); 567 bio_csum(n, k);
568 568
@@ -570,10 +570,10 @@ static void bch_data_insert_start(struct closure *cl)
570 bch_keylist_push(&s->insert_keys); 570 bch_keylist_push(&s->insert_keys);
571 571
572 n->bi_rw |= REQ_WRITE; 572 n->bi_rw |= REQ_WRITE;
573 bch_submit_bbio(n, op->c, k, 0); 573 bch_submit_bbio(n, s->c, k, 0);
574 } while (n != bio); 574 } while (n != bio);
575 575
576 op->insert_data_done = true; 576 s->insert_data_done = true;
577 continue_at(cl, bch_data_insert_keys, bcache_wq); 577 continue_at(cl, bch_data_insert_keys, bcache_wq);
578err: 578err:
579 /* bch_alloc_sectors() blocks if s->writeback = true */ 579 /* bch_alloc_sectors() blocks if s->writeback = true */
@@ -592,14 +592,14 @@ err:
592 * we wait for buckets to be freed up, so just invalidate the 592 * we wait for buckets to be freed up, so just invalidate the
593 * rest of the write. 593 * rest of the write.
594 */ 594 */
595 op->bypass = true; 595 s->bypass = true;
596 return bch_data_invalidate(cl); 596 return bch_data_invalidate(cl);
597 } else { 597 } else {
598 /* 598 /*
599 * From a cache miss, we can just insert the keys for the data 599 * From a cache miss, we can just insert the keys for the data
600 * we have written or bail out if we didn't do anything. 600 * we have written or bail out if we didn't do anything.
601 */ 601 */
602 op->insert_data_done = true; 602 s->insert_data_done = true;
603 bio_put(bio); 603 bio_put(bio);
604 604
605 if (!bch_keylist_empty(&s->insert_keys)) 605 if (!bch_keylist_empty(&s->insert_keys))
@@ -622,11 +622,11 @@ err:
622 * data is written it calls bch_journal, and after the keys have been added to 622 * data is written it calls bch_journal, and after the keys have been added to
623 * the next journal write they're inserted into the btree. 623 * the next journal write they're inserted into the btree.
624 * 624 *
625 * It inserts the data in op->cache_bio; bi_sector is used for the key offset, 625 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
626 * and op->inode is used for the key inode. 626 * and op->inode is used for the key inode.
627 * 627 *
628 * If op->bypass is true, instead of inserting the data it invalidates the 628 * If s->bypass is true, instead of inserting the data it invalidates the
629 * region of the cache represented by op->cache_bio and op->inode. 629 * region of the cache represented by s->cache_bio and op->inode.
630 */ 630 */
631void bch_data_insert(struct closure *cl) 631void bch_data_insert(struct closure *cl)
632{ 632{
@@ -634,7 +634,7 @@ void bch_data_insert(struct closure *cl)
634 struct search *s = container_of(op, struct search, op); 634 struct search *s = container_of(op, struct search, op);
635 635
636 bch_keylist_init(&s->insert_keys); 636 bch_keylist_init(&s->insert_keys);
637 bio_get(op->cache_bio); 637 bio_get(s->cache_bio);
638 bch_data_insert_start(cl); 638 bch_data_insert_start(cl);
639} 639}
640 640
@@ -655,12 +655,12 @@ static void bch_cache_read_endio(struct bio *bio, int error)
655 655
656 if (error) 656 if (error)
657 s->error = error; 657 s->error = error;
658 else if (ptr_stale(s->op.c, &b->key, 0)) { 658 else if (ptr_stale(s->c, &b->key, 0)) {
659 atomic_long_inc(&s->op.c->cache_read_races); 659 atomic_long_inc(&s->c->cache_read_races);
660 s->error = -EINTR; 660 s->error = -EINTR;
661 } 661 }
662 662
663 bch_bbio_endio(s->op.c, bio, error, "reading from cache"); 663 bch_bbio_endio(s->c, bio, error, "reading from cache");
664} 664}
665 665
666/* 666/*
@@ -674,13 +674,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
674 struct bkey *bio_key; 674 struct bkey *bio_key;
675 unsigned ptr; 675 unsigned ptr;
676 676
677 if (bkey_cmp(k, &KEY(op->inode, bio->bi_sector, 0)) <= 0) 677 if (bkey_cmp(k, &KEY(s->inode, bio->bi_sector, 0)) <= 0)
678 return MAP_CONTINUE; 678 return MAP_CONTINUE;
679 679
680 if (KEY_INODE(k) != s->op.inode || 680 if (KEY_INODE(k) != s->inode ||
681 KEY_START(k) > bio->bi_sector) { 681 KEY_START(k) > bio->bi_sector) {
682 unsigned bio_sectors = bio_sectors(bio); 682 unsigned bio_sectors = bio_sectors(bio);
683 unsigned sectors = KEY_INODE(k) == s->op.inode 683 unsigned sectors = KEY_INODE(k) == s->inode
684 ? min_t(uint64_t, INT_MAX, 684 ? min_t(uint64_t, INT_MAX,
685 KEY_START(k) - bio->bi_sector) 685 KEY_START(k) - bio->bi_sector)
686 : INT_MAX; 686 : INT_MAX;
@@ -708,8 +708,8 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
708 bio_key = &container_of(n, struct bbio, bio)->key; 708 bio_key = &container_of(n, struct bbio, bio)->key;
709 bch_bkey_copy_single_ptr(bio_key, k, ptr); 709 bch_bkey_copy_single_ptr(bio_key, k, ptr);
710 710
711 bch_cut_front(&KEY(s->op.inode, n->bi_sector, 0), bio_key); 711 bch_cut_front(&KEY(s->inode, n->bi_sector, 0), bio_key);
712 bch_cut_back(&KEY(s->op.inode, bio_end_sector(n), 0), bio_key); 712 bch_cut_back(&KEY(s->inode, bio_end_sector(n), 0), bio_key);
713 713
714 n->bi_end_io = bch_cache_read_endio; 714 n->bi_end_io = bch_cache_read_endio;
715 n->bi_private = &s->cl; 715 n->bi_private = &s->cl;
@@ -735,8 +735,8 @@ static void cache_lookup(struct closure *cl)
735 struct search *s = container_of(op, struct search, op); 735 struct search *s = container_of(op, struct search, op);
736 struct bio *bio = &s->bio.bio; 736 struct bio *bio = &s->bio.bio;
737 737
738 int ret = bch_btree_map_keys(op, op->c, 738 int ret = bch_btree_map_keys(op, s->c,
739 &KEY(op->inode, bio->bi_sector, 0), 739 &KEY(s->inode, bio->bi_sector, 0),
740 cache_lookup_fn, MAP_END_KEY); 740 cache_lookup_fn, MAP_END_KEY);
741 if (ret == -EAGAIN) 741 if (ret == -EAGAIN)
742 continue_at(cl, cache_lookup, bcache_wq); 742 continue_at(cl, cache_lookup, bcache_wq);
@@ -793,8 +793,8 @@ static void search_free(struct closure *cl)
793 struct search *s = container_of(cl, struct search, cl); 793 struct search *s = container_of(cl, struct search, cl);
794 bio_complete(s); 794 bio_complete(s);
795 795
796 if (s->op.cache_bio) 796 if (s->cache_bio)
797 bio_put(s->op.cache_bio); 797 bio_put(s->cache_bio);
798 798
799 if (s->unaligned_bvec) 799 if (s->unaligned_bvec)
800 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); 800 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
@@ -813,14 +813,14 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
813 813
814 __closure_init(&s->cl, NULL); 814 __closure_init(&s->cl, NULL);
815 815
816 s->op.inode = d->id; 816 s->inode = d->id;
817 s->op.c = d->c; 817 s->c = d->c;
818 s->d = d; 818 s->d = d;
819 s->op.lock = -1; 819 s->op.lock = -1;
820 s->task = current; 820 s->task = current;
821 s->orig_bio = bio; 821 s->orig_bio = bio;
822 s->write = (bio->bi_rw & REQ_WRITE) != 0; 822 s->write = (bio->bi_rw & REQ_WRITE) != 0;
823 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 823 s->flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
824 s->recoverable = 1; 824 s->recoverable = 1;
825 s->start_time = jiffies; 825 s->start_time = jiffies;
826 do_bio_hook(s); 826 do_bio_hook(s);
@@ -891,7 +891,7 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
891 891
892static bool check_should_bypass(struct cached_dev *dc, struct search *s) 892static bool check_should_bypass(struct cached_dev *dc, struct search *s)
893{ 893{
894 struct cache_set *c = s->op.c; 894 struct cache_set *c = s->c;
895 struct bio *bio = &s->bio.bio; 895 struct bio *bio = &s->bio.bio;
896 unsigned mode = cache_mode(dc, bio); 896 unsigned mode = cache_mode(dc, bio);
897 unsigned sectors, congested = bch_get_congested(c); 897 unsigned sectors, congested = bch_get_congested(c);
@@ -985,11 +985,11 @@ static void cached_dev_cache_miss_done(struct closure *cl)
985 if (s->op.insert_collision) 985 if (s->op.insert_collision)
986 bch_mark_cache_miss_collision(s); 986 bch_mark_cache_miss_collision(s);
987 987
988 if (s->op.cache_bio) { 988 if (s->cache_bio) {
989 int i; 989 int i;
990 struct bio_vec *bv; 990 struct bio_vec *bv;
991 991
992 __bio_for_each_segment(bv, s->op.cache_bio, i, 0) 992 bio_for_each_segment_all(bv, s->cache_bio, i)
993 __free_page(bv->bv_page); 993 __free_page(bv->bv_page);
994 } 994 }
995 995
@@ -1042,14 +1042,15 @@ static void cached_dev_read_done(struct closure *cl)
1042 * to the buffers the original bio pointed to: 1042 * to the buffers the original bio pointed to:
1043 */ 1043 */
1044 1044
1045 if (s->op.cache_bio) { 1045 if (s->cache_bio) {
1046 bio_reset(s->op.cache_bio); 1046 bio_reset(s->cache_bio);
1047 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; 1047 s->cache_bio->bi_sector =
1048 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; 1048 s->cache_miss->bi_sector;
1049 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; 1049 s->cache_bio->bi_bdev = s->cache_miss->bi_bdev;
1050 bch_bio_map(s->op.cache_bio, NULL); 1050 s->cache_bio->bi_size = s->cache_bio_sectors << 9;
1051 bch_bio_map(s->cache_bio, NULL);
1051 1052
1052 bio_copy_data(s->cache_miss, s->op.cache_bio); 1053 bio_copy_data(s->cache_miss, s->cache_bio);
1053 1054
1054 bio_put(s->cache_miss); 1055 bio_put(s->cache_miss);
1055 s->cache_miss = NULL; 1056 s->cache_miss = NULL;
@@ -1060,8 +1061,8 @@ static void cached_dev_read_done(struct closure *cl)
1060 1061
1061 bio_complete(s); 1062 bio_complete(s);
1062 1063
1063 if (s->op.cache_bio && 1064 if (s->cache_bio &&
1064 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) { 1065 !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
1065 s->op.type = BTREE_REPLACE; 1066 s->op.type = BTREE_REPLACE;
1066 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1067 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1067 } 1068 }
@@ -1074,12 +1075,12 @@ static void cached_dev_read_done_bh(struct closure *cl)
1074 struct search *s = container_of(cl, struct search, cl); 1075 struct search *s = container_of(cl, struct search, cl);
1075 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 1076 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1076 1077
1077 bch_mark_cache_accounting(s, !s->cache_miss, s->op.bypass); 1078 bch_mark_cache_accounting(s, !s->cache_miss, s->bypass);
1078 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.bypass); 1079 trace_bcache_read(s->orig_bio, !s->cache_miss, s->bypass);
1079 1080
1080 if (s->error) 1081 if (s->error)
1081 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 1082 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
1082 else if (s->op.cache_bio || verify(dc, &s->bio.bio)) 1083 else if (s->cache_bio || verify(dc, &s->bio.bio))
1083 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); 1084 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
1084 else 1085 else
1085 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); 1086 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
@@ -1093,7 +1094,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1093 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 1094 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1094 struct bio *miss, *cache_bio; 1095 struct bio *miss, *cache_bio;
1095 1096
1096 if (s->cache_miss || s->op.bypass) { 1097 if (s->cache_miss || s->bypass) {
1097 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1098 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1098 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 1099 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
1099 goto out_submit; 1100 goto out_submit;
@@ -1101,13 +1102,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1101 1102
1102 if (!(bio->bi_rw & REQ_RAHEAD) && 1103 if (!(bio->bi_rw & REQ_RAHEAD) &&
1103 !(bio->bi_rw & REQ_META) && 1104 !(bio->bi_rw & REQ_META) &&
1104 s->op.c->gc_stats.in_use < CUTOFF_CACHE_READA) 1105 s->c->gc_stats.in_use < CUTOFF_CACHE_READA)
1105 reada = min_t(sector_t, dc->readahead >> 9, 1106 reada = min_t(sector_t, dc->readahead >> 9,
1106 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); 1107 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
1107 1108
1108 s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada); 1109 s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);
1109 1110
1110 s->op.replace = KEY(s->op.inode, bio->bi_sector + 1111 s->op.replace = KEY(s->inode, bio->bi_sector +
1111 s->cache_bio_sectors, s->cache_bio_sectors); 1112 s->cache_bio_sectors, s->cache_bio_sectors);
1112 1113
1113 ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace); 1114 ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace);
@@ -1137,7 +1138,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1137 goto out_put; 1138 goto out_put;
1138 1139
1139 s->cache_miss = miss; 1140 s->cache_miss = miss;
1140 s->op.cache_bio = cache_bio; 1141 s->cache_bio = cache_bio;
1141 bio_get(cache_bio); 1142 bio_get(cache_bio);
1142 closure_bio_submit(cache_bio, &s->cl, s->d); 1143 closure_bio_submit(cache_bio, &s->cl, s->d);
1143 1144
@@ -1177,7 +1178,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1177 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1178 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
1178 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1179 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1179 1180
1180 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); 1181 bch_keybuf_check_overlapping(&s->c->moving_gc_keys, &start, &end);
1181 1182
1182 down_read_non_owner(&dc->writeback_lock); 1183 down_read_non_owner(&dc->writeback_lock);
1183 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { 1184 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
@@ -1185,7 +1186,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1185 * We overlap with some dirty data undergoing background 1186 * We overlap with some dirty data undergoing background
1186 * writeback, force this write to writeback 1187 * writeback, force this write to writeback
1187 */ 1188 */
1188 s->op.bypass = false; 1189 s->bypass = false;
1189 s->writeback = true; 1190 s->writeback = true;
1190 } 1191 }
1191 1192
@@ -1197,27 +1198,27 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1197 * so we still want to call it. 1198 * so we still want to call it.
1198 */ 1199 */
1199 if (bio->bi_rw & REQ_DISCARD) 1200 if (bio->bi_rw & REQ_DISCARD)
1200 s->op.bypass = true; 1201 s->bypass = true;
1201 1202
1202 if (should_writeback(dc, s->orig_bio, 1203 if (should_writeback(dc, s->orig_bio,
1203 cache_mode(dc, bio), 1204 cache_mode(dc, bio),
1204 s->op.bypass)) { 1205 s->bypass)) {
1205 s->op.bypass = false; 1206 s->bypass = false;
1206 s->writeback = true; 1207 s->writeback = true;
1207 } 1208 }
1208 1209
1209 trace_bcache_write(s->orig_bio, s->writeback, s->op.bypass); 1210 trace_bcache_write(s->orig_bio, s->writeback, s->bypass);
1210 1211
1211 if (s->op.bypass) { 1212 if (s->bypass) {
1212 s->op.cache_bio = s->orig_bio; 1213 s->cache_bio = s->orig_bio;
1213 bio_get(s->op.cache_bio); 1214 bio_get(s->cache_bio);
1214 1215
1215 if (!(bio->bi_rw & REQ_DISCARD) || 1216 if (!(bio->bi_rw & REQ_DISCARD) ||
1216 blk_queue_discard(bdev_get_queue(dc->bdev))) 1217 blk_queue_discard(bdev_get_queue(dc->bdev)))
1217 closure_bio_submit(bio, cl, s->d); 1218 closure_bio_submit(bio, cl, s->d);
1218 } else if (s->writeback) { 1219 } else if (s->writeback) {
1219 bch_writeback_add(dc); 1220 bch_writeback_add(dc);
1220 s->op.cache_bio = bio; 1221 s->cache_bio = bio;
1221 1222
1222 if (bio->bi_rw & REQ_FLUSH) { 1223 if (bio->bi_rw & REQ_FLUSH) {
1223 /* Also need to send a flush to the backing device */ 1224 /* Also need to send a flush to the backing device */
@@ -1232,8 +1233,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1232 closure_bio_submit(flush, cl, s->d); 1233 closure_bio_submit(flush, cl, s->d);
1233 } 1234 }
1234 } else { 1235 } else {
1235 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1236 s->cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1236 dc->disk.bio_split); 1237 dc->disk.bio_split);
1237 1238
1238 closure_bio_submit(bio, cl, s->d); 1239 closure_bio_submit(bio, cl, s->d);
1239 } 1240 }
@@ -1247,8 +1248,8 @@ static void cached_dev_nodata(struct closure *cl)
1247 struct search *s = container_of(cl, struct search, cl); 1248 struct search *s = container_of(cl, struct search, cl);
1248 struct bio *bio = &s->bio.bio; 1249 struct bio *bio = &s->bio.bio;
1249 1250
1250 if (s->op.flush_journal) 1251 if (s->flush_journal)
1251 bch_journal_meta(s->op.c, cl); 1252 bch_journal_meta(s->c, cl);
1252 1253
1253 /* If it's a flush, we send the flush to the backing device too */ 1254 /* If it's a flush, we send the flush to the backing device too */
1254 closure_bio_submit(bio, cl, s->d); 1255 closure_bio_submit(bio, cl, s->d);
@@ -1286,7 +1287,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1286 cached_dev_nodata, 1287 cached_dev_nodata,
1287 bcache_wq); 1288 bcache_wq);
1288 } else { 1289 } else {
1289 s->op.bypass = check_should_bypass(dc, s); 1290 s->bypass = check_should_bypass(dc, s);
1290 1291
1291 if (rw) 1292 if (rw)
1292 cached_dev_write(dc, s); 1293 cached_dev_write(dc, s);
@@ -1376,8 +1377,8 @@ static void flash_dev_nodata(struct closure *cl)
1376{ 1377{
1377 struct search *s = container_of(cl, struct search, cl); 1378 struct search *s = container_of(cl, struct search, cl);
1378 1379
1379 if (s->op.flush_journal) 1380 if (s->flush_journal)
1380 bch_journal_meta(s->op.c, cl); 1381 bch_journal_meta(s->c, cl);
1381 1382
1382 continue_at(cl, search_free, NULL); 1383 continue_at(cl, search_free, NULL);
1383} 1384}
@@ -1409,13 +1410,13 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1409 flash_dev_nodata, 1410 flash_dev_nodata,
1410 bcache_wq); 1411 bcache_wq);
1411 } else if (rw) { 1412 } else if (rw) {
1412 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, 1413 bch_keybuf_check_overlapping(&s->c->moving_gc_keys,
1413 &KEY(d->id, bio->bi_sector, 0), 1414 &KEY(d->id, bio->bi_sector, 0),
1414 &KEY(d->id, bio_end_sector(bio), 0)); 1415 &KEY(d->id, bio_end_sector(bio), 0));
1415 1416
1416 s->op.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1417 s->bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1417 s->writeback = true; 1418 s->writeback = true;
1418 s->op.cache_bio = bio; 1419 s->cache_bio = bio;
1419 1420
1420 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1421 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1421 } else { 1422 } else {
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index b0b4b0b5b7e9..0f79177c4f33 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -8,19 +8,33 @@ struct search {
8 struct closure cl; 8 struct closure cl;
9 9
10 struct bcache_device *d; 10 struct bcache_device *d;
11 struct cache_set *c;
11 struct task_struct *task; 12 struct task_struct *task;
12 13
13 struct bbio bio; 14 struct bbio bio;
14 struct bio *orig_bio; 15 struct bio *orig_bio;
15 struct bio *cache_miss; 16 struct bio *cache_miss;
17
18 /* Bio to be inserted into the cache */
19 struct bio *cache_bio;
16 unsigned cache_bio_sectors; 20 unsigned cache_bio_sectors;
17 21
22 unsigned inode;
23
18 unsigned recoverable:1; 24 unsigned recoverable:1;
19 unsigned unaligned_bvec:1; 25 unsigned unaligned_bvec:1;
20 26
21 unsigned write:1; 27 unsigned write:1;
22 unsigned writeback:1; 28 unsigned writeback:1;
23 29
30 unsigned csum:1;
31 unsigned bypass:1;
32 unsigned flush_journal:1;
33
34 unsigned insert_data_done:1;
35
36 uint16_t write_prio;
37
24 /* IO error returned to s->bio */ 38 /* IO error returned to s->bio */
25 short error; 39 short error;
26 unsigned long start_time; 40 unsigned long start_time;
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index b8730e714d69..ea77263cf7ef 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -200,7 +200,7 @@ void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
200{ 200{
201 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 201 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
202 mark_cache_stats(&dc->accounting.collector, hit, bypass); 202 mark_cache_stats(&dc->accounting.collector, hit, bypass);
203 mark_cache_stats(&s->op.c->accounting.collector, hit, bypass); 203 mark_cache_stats(&s->c->accounting.collector, hit, bypass);
204#ifdef CONFIG_CGROUP_BCACHE 204#ifdef CONFIG_CGROUP_BCACHE
205 mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); 205 mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
206#endif 206#endif
@@ -210,21 +210,21 @@ void bch_mark_cache_readahead(struct search *s)
210{ 210{
211 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 211 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
212 atomic_inc(&dc->accounting.collector.cache_readaheads); 212 atomic_inc(&dc->accounting.collector.cache_readaheads);
213 atomic_inc(&s->op.c->accounting.collector.cache_readaheads); 213 atomic_inc(&s->c->accounting.collector.cache_readaheads);
214} 214}
215 215
216void bch_mark_cache_miss_collision(struct search *s) 216void bch_mark_cache_miss_collision(struct search *s)
217{ 217{
218 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 218 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
219 atomic_inc(&dc->accounting.collector.cache_miss_collisions); 219 atomic_inc(&dc->accounting.collector.cache_miss_collisions);
220 atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions); 220 atomic_inc(&s->c->accounting.collector.cache_miss_collisions);
221} 221}
222 222
223void bch_mark_sectors_bypassed(struct search *s, int sectors) 223void bch_mark_sectors_bypassed(struct search *s, int sectors)
224{ 224{
225 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 225 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
226 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 226 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
227 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 227 atomic_add(sectors, &s->c->accounting.collector.sectors_bypassed);
228} 228}
229 229
230void bch_cache_accounting_init(struct cache_accounting *acc, 230void bch_cache_accounting_init(struct cache_accounting *acc,
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b79dd5a6679e..a314c771263f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1493,11 +1493,10 @@ static void run_cache_set(struct cache_set *c)
1493 const char *err = "cannot allocate memory"; 1493 const char *err = "cannot allocate memory";
1494 struct cached_dev *dc, *t; 1494 struct cached_dev *dc, *t;
1495 struct cache *ca; 1495 struct cache *ca;
1496 struct closure cl;
1496 unsigned i; 1497 unsigned i;
1497 1498
1498 struct btree_op op; 1499 closure_init_stack(&cl);
1499 bch_btree_op_init_stack(&op);
1500 op.lock = SHRT_MAX;
1501 1500
1502 for_each_cache(ca, c, i) 1501 for_each_cache(ca, c, i)
1503 c->nbuckets += ca->sb.nbuckets; 1502 c->nbuckets += ca->sb.nbuckets;
@@ -1508,7 +1507,7 @@ static void run_cache_set(struct cache_set *c)
1508 struct jset *j; 1507 struct jset *j;
1509 1508
1510 err = "cannot allocate memory for journal"; 1509 err = "cannot allocate memory for journal";
1511 if (bch_journal_read(c, &journal, &op)) 1510 if (bch_journal_read(c, &journal))
1512 goto err; 1511 goto err;
1513 1512
1514 pr_debug("btree_journal_read() done"); 1513 pr_debug("btree_journal_read() done");
@@ -1543,12 +1542,12 @@ static void run_cache_set(struct cache_set *c)
1543 list_del_init(&c->root->list); 1542 list_del_init(&c->root->list);
1544 rw_unlock(true, c->root); 1543 rw_unlock(true, c->root);
1545 1544
1546 err = uuid_read(c, j, &op.cl); 1545 err = uuid_read(c, j, &cl);
1547 if (err) 1546 if (err)
1548 goto err; 1547 goto err;
1549 1548
1550 err = "error in recovery"; 1549 err = "error in recovery";
1551 if (bch_btree_check(c, &op)) 1550 if (bch_btree_check(c))
1552 goto err; 1551 goto err;
1553 1552
1554 bch_journal_mark(c, &journal); 1553 bch_journal_mark(c, &journal);
@@ -1580,7 +1579,7 @@ static void run_cache_set(struct cache_set *c)
1580 if (j->version < BCACHE_JSET_VERSION_UUID) 1579 if (j->version < BCACHE_JSET_VERSION_UUID)
1581 __uuid_write(c); 1580 __uuid_write(c);
1582 1581
1583 bch_journal_replay(c, &journal, &op); 1582 bch_journal_replay(c, &journal);
1584 } else { 1583 } else {
1585 pr_notice("invalidating existing data"); 1584 pr_notice("invalidating existing data");
1586 1585
@@ -1616,7 +1615,7 @@ static void run_cache_set(struct cache_set *c)
1616 goto err; 1615 goto err;
1617 1616
1618 bkey_copy_key(&c->root->key, &MAX_KEY); 1617 bkey_copy_key(&c->root->key, &MAX_KEY);
1619 bch_btree_node_write(c->root, &op.cl); 1618 bch_btree_node_write(c->root, &cl);
1620 1619
1621 bch_btree_set_root(c->root); 1620 bch_btree_set_root(c->root);
1622 rw_unlock(true, c->root); 1621 rw_unlock(true, c->root);
@@ -1629,14 +1628,14 @@ static void run_cache_set(struct cache_set *c)
1629 SET_CACHE_SYNC(&c->sb, true); 1628 SET_CACHE_SYNC(&c->sb, true);
1630 1629
1631 bch_journal_next(&c->journal); 1630 bch_journal_next(&c->journal);
1632 bch_journal_meta(c, &op.cl); 1631 bch_journal_meta(c, &cl);
1633 } 1632 }
1634 1633
1635 err = "error starting gc thread"; 1634 err = "error starting gc thread";
1636 if (bch_gc_thread_start(c)) 1635 if (bch_gc_thread_start(c))
1637 goto err; 1636 goto err;
1638 1637
1639 closure_sync(&op.cl); 1638 closure_sync(&cl);
1640 c->sb.last_mount = get_seconds(); 1639 c->sb.last_mount = get_seconds();
1641 bcache_write_super(c); 1640 bcache_write_super(c);
1642 1641
@@ -1647,7 +1646,7 @@ static void run_cache_set(struct cache_set *c)
1647 1646
1648 return; 1647 return;
1649err: 1648err:
1650 closure_sync(&op.cl); 1649 closure_sync(&cl);
1651 /* XXX: test this, it's broken */ 1650 /* XXX: test this, it's broken */
1652 bch_cache_set_error(c, err); 1651 bch_cache_set_error(c, err);
1653} 1652}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c68de9f12618..b58c2bc91e3f 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -155,7 +155,7 @@ static void write_dirty_finish(struct closure *cl)
155 for (i = 0; i < KEY_PTRS(&w->key); i++) 155 for (i = 0; i < KEY_PTRS(&w->key); i++)
156 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 156 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
157 157
158 bch_btree_insert(&op, dc->disk.c, &keys); 158 bch_btree_insert(&op, dc->disk.c, &keys, NULL);
159 closure_sync(&op.cl); 159 closure_sync(&op.cl);
160 160
161 if (op.insert_collision) 161 if (op.insert_collision)
@@ -433,9 +433,16 @@ static int bch_writeback_thread(void *arg)
433 433
434/* Init */ 434/* Init */
435 435
436static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b, 436struct sectors_dirty_init {
437 struct btree_op op;
438 unsigned inode;
439};
440
441static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
437 struct bkey *k) 442 struct bkey *k)
438{ 443{
444 struct sectors_dirty_init *op = container_of(_op,
445 struct sectors_dirty_init, op);
439 if (KEY_INODE(k) > op->inode) 446 if (KEY_INODE(k) > op->inode)
440 return MAP_DONE; 447 return MAP_DONE;
441 448
@@ -448,12 +455,12 @@ static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
448 455
449void bch_sectors_dirty_init(struct cached_dev *dc) 456void bch_sectors_dirty_init(struct cached_dev *dc)
450{ 457{
451 struct btree_op op; 458 struct sectors_dirty_init op;
452 459
453 bch_btree_op_init_stack(&op); 460 bch_btree_op_init_stack(&op.op);
454 op.inode = dc->disk.id; 461 op.inode = dc->disk.id;
455 462
456 bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0), 463 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
457 sectors_dirty_init_fn, 0); 464 sectors_dirty_init_fn, 0);
458} 465}
459 466