aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c131
1 files changed, 52 insertions, 79 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 61bcfc21d2a0..c906571997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
197 197
198static void bio_csum(struct bio *bio, struct bkey *k) 198static void bio_csum(struct bio *bio, struct bkey *k)
199{ 199{
200 struct bio_vec *bv; 200 struct bio_vec bv;
201 struct bvec_iter iter;
201 uint64_t csum = 0; 202 uint64_t csum = 0;
202 int i;
203 203
204 bio_for_each_segment(bv, bio, i) { 204 bio_for_each_segment(bv, bio, iter) {
205 void *d = kmap(bv->bv_page) + bv->bv_offset; 205 void *d = kmap(bv.bv_page) + bv.bv_offset;
206 csum = bch_crc64_update(csum, d, bv->bv_len); 206 csum = bch_crc64_update(csum, d, bv.bv_len);
207 kunmap(bv->bv_page); 207 kunmap(bv.bv_page);
208 } 208 }
209 209
210 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 210 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl)
260 struct bio *bio = op->bio; 260 struct bio *bio = op->bio;
261 261
262 pr_debug("invalidating %i sectors from %llu", 262 pr_debug("invalidating %i sectors from %llu",
263 bio_sectors(bio), (uint64_t) bio->bi_sector); 263 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
264 264
265 while (bio_sectors(bio)) { 265 while (bio_sectors(bio)) {
266 unsigned sectors = min(bio_sectors(bio), 266 unsigned sectors = min(bio_sectors(bio),
@@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl)
269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
270 goto out; 270 goto out;
271 271
272 bio->bi_sector += sectors; 272 bio->bi_iter.bi_sector += sectors;
273 bio->bi_size -= sectors << 9; 273 bio->bi_iter.bi_size -= sectors << 9;
274 274
275 bch_keylist_add(&op->insert_keys, 275 bch_keylist_add(&op->insert_keys,
276 &KEY(op->inode, bio->bi_sector, sectors)); 276 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
277 } 277 }
278 278
279 op->insert_data_done = true; 279 op->insert_data_done = true;
@@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl)
363 k = op->insert_keys.top; 363 k = op->insert_keys.top;
364 bkey_init(k); 364 bkey_init(k);
365 SET_KEY_INODE(k, op->inode); 365 SET_KEY_INODE(k, op->inode);
366 SET_KEY_OFFSET(k, bio->bi_sector); 366 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
367 367
368 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 368 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
369 op->write_point, op->write_prio, 369 op->write_point, op->write_prio,
370 op->writeback)) 370 op->writeback))
371 goto err; 371 goto err;
372 372
373 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 373 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
374 374
375 n->bi_end_io = bch_data_insert_endio; 375 n->bi_end_io = bch_data_insert_endio;
376 n->bi_private = cl; 376 n->bi_private = cl;
@@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
521 (bio->bi_rw & REQ_WRITE))) 521 (bio->bi_rw & REQ_WRITE)))
522 goto skip; 522 goto skip;
523 523
524 if (bio->bi_sector & (c->sb.block_size - 1) || 524 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
525 bio_sectors(bio) & (c->sb.block_size - 1)) { 525 bio_sectors(bio) & (c->sb.block_size - 1)) {
526 pr_debug("skipping unaligned io"); 526 pr_debug("skipping unaligned io");
527 goto skip; 527 goto skip;
@@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
545 545
546 spin_lock(&dc->io_lock); 546 spin_lock(&dc->io_lock);
547 547
548 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 548 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
549 if (i->last == bio->bi_sector && 549 if (i->last == bio->bi_iter.bi_sector &&
550 time_before(jiffies, i->jiffies)) 550 time_before(jiffies, i->jiffies))
551 goto found; 551 goto found;
552 552
@@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
555 add_sequential(task); 555 add_sequential(task);
556 i->sequential = 0; 556 i->sequential = 0;
557found: 557found:
558 if (i->sequential + bio->bi_size > i->sequential) 558 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
559 i->sequential += bio->bi_size; 559 i->sequential += bio->bi_iter.bi_size;
560 560
561 i->last = bio_end_sector(bio); 561 i->last = bio_end_sector(bio);
562 i->jiffies = jiffies + msecs_to_jiffies(5000); 562 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -605,7 +605,6 @@ struct search {
605 unsigned insert_bio_sectors; 605 unsigned insert_bio_sectors;
606 606
607 unsigned recoverable:1; 607 unsigned recoverable:1;
608 unsigned unaligned_bvec:1;
609 unsigned write:1; 608 unsigned write:1;
610 unsigned read_dirty_data:1; 609 unsigned read_dirty_data:1;
611 610
@@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
649 struct bkey *bio_key; 648 struct bkey *bio_key;
650 unsigned ptr; 649 unsigned ptr;
651 650
652 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 651 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
653 return MAP_CONTINUE; 652 return MAP_CONTINUE;
654 653
655 if (KEY_INODE(k) != s->iop.inode || 654 if (KEY_INODE(k) != s->iop.inode ||
656 KEY_START(k) > bio->bi_sector) { 655 KEY_START(k) > bio->bi_iter.bi_sector) {
657 unsigned bio_sectors = bio_sectors(bio); 656 unsigned bio_sectors = bio_sectors(bio);
658 unsigned sectors = KEY_INODE(k) == s->iop.inode 657 unsigned sectors = KEY_INODE(k) == s->iop.inode
659 ? min_t(uint64_t, INT_MAX, 658 ? min_t(uint64_t, INT_MAX,
660 KEY_START(k) - bio->bi_sector) 659 KEY_START(k) - bio->bi_iter.bi_sector)
661 : INT_MAX; 660 : INT_MAX;
662 661
663 int ret = s->d->cache_miss(b, s, bio, sectors); 662 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
679 if (KEY_DIRTY(k)) 678 if (KEY_DIRTY(k))
680 s->read_dirty_data = true; 679 s->read_dirty_data = true;
681 680
682 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 681 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
683 KEY_OFFSET(k) - bio->bi_sector), 682 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
684 GFP_NOIO, s->d->bio_split); 683 GFP_NOIO, s->d->bio_split);
685 684
686 bio_key = &container_of(n, struct bbio, bio)->key; 685 bio_key = &container_of(n, struct bbio, bio)->key;
687 bch_bkey_copy_single_ptr(bio_key, k, ptr); 686 bch_bkey_copy_single_ptr(bio_key, k, ptr);
688 687
689 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 688 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
690 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 689 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
691 690
692 n->bi_end_io = bch_cache_read_endio; 691 n->bi_end_io = bch_cache_read_endio;
@@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl)
713 struct bio *bio = &s->bio.bio; 712 struct bio *bio = &s->bio.bio;
714 713
715 int ret = bch_btree_map_keys(&s->op, s->iop.c, 714 int ret = bch_btree_map_keys(&s->op, s->iop.c,
716 &KEY(s->iop.inode, bio->bi_sector, 0), 715 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
717 cache_lookup_fn, MAP_END_KEY); 716 cache_lookup_fn, MAP_END_KEY);
718 if (ret == -EAGAIN) 717 if (ret == -EAGAIN)
719 continue_at(cl, cache_lookup, bcache_wq); 718 continue_at(cl, cache_lookup, bcache_wq);
@@ -758,10 +757,12 @@ static void bio_complete(struct search *s)
758static void do_bio_hook(struct search *s) 757static void do_bio_hook(struct search *s)
759{ 758{
760 struct bio *bio = &s->bio.bio; 759 struct bio *bio = &s->bio.bio;
761 memcpy(bio, s->orig_bio, sizeof(struct bio));
762 760
761 bio_init(bio);
762 __bio_clone_fast(bio, s->orig_bio);
763 bio->bi_end_io = request_endio; 763 bio->bi_end_io = request_endio;
764 bio->bi_private = &s->cl; 764 bio->bi_private = &s->cl;
765
765 atomic_set(&bio->bi_cnt, 3); 766 atomic_set(&bio->bi_cnt, 3);
766} 767}
767 768
@@ -773,9 +774,6 @@ static void search_free(struct closure *cl)
773 if (s->iop.bio) 774 if (s->iop.bio)
774 bio_put(s->iop.bio); 775 bio_put(s->iop.bio);
775 776
776 if (s->unaligned_bvec)
777 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
778
779 closure_debug_destroy(cl); 777 closure_debug_destroy(cl);
780 mempool_free(s, s->d->c->search); 778 mempool_free(s, s->d->c->search);
781} 779}
@@ -783,7 +781,6 @@ static void search_free(struct closure *cl)
783static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 781static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
784{ 782{
785 struct search *s; 783 struct search *s;
786 struct bio_vec *bv;
787 784
788 s = mempool_alloc(d->c->search, GFP_NOIO); 785 s = mempool_alloc(d->c->search, GFP_NOIO);
789 memset(s, 0, offsetof(struct search, iop.insert_keys)); 786 memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
802 s->start_time = jiffies; 799 s->start_time = jiffies;
803 do_bio_hook(s); 800 do_bio_hook(s);
804 801
805 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
806 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
807 memcpy(bv, bio_iovec(bio),
808 sizeof(struct bio_vec) * bio_segments(bio));
809
810 s->bio.bio.bi_io_vec = bv;
811 s->unaligned_bvec = 1;
812 }
813
814 return s; 802 return s;
815} 803}
816 804
@@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl)
849{ 837{
850 struct search *s = container_of(cl, struct search, cl); 838 struct search *s = container_of(cl, struct search, cl);
851 struct bio *bio = &s->bio.bio; 839 struct bio *bio = &s->bio.bio;
852 struct bio_vec *bv;
853 int i;
854 840
855 if (s->recoverable) { 841 if (s->recoverable) {
856 /* Retry from the backing device: */ 842 /* Retry from the backing device: */
857 trace_bcache_read_retry(s->orig_bio); 843 trace_bcache_read_retry(s->orig_bio);
858 844
859 s->iop.error = 0; 845 s->iop.error = 0;
860 bv = s->bio.bio.bi_io_vec;
861 do_bio_hook(s); 846 do_bio_hook(s);
862 s->bio.bio.bi_io_vec = bv;
863
864 if (!s->unaligned_bvec)
865 bio_for_each_segment(bv, s->orig_bio, i)
866 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
867 else
868 memcpy(s->bio.bio.bi_io_vec,
869 bio_iovec(s->orig_bio),
870 sizeof(struct bio_vec) *
871 bio_segments(s->orig_bio));
872 847
873 /* XXX: invalidate cache */ 848 /* XXX: invalidate cache */
874 849
@@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl)
893 868
894 if (s->iop.bio) { 869 if (s->iop.bio) {
895 bio_reset(s->iop.bio); 870 bio_reset(s->iop.bio);
896 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 871 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
897 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 872 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
898 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 873 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
899 bch_bio_map(s->iop.bio, NULL); 874 bch_bio_map(s->iop.bio, NULL);
900 875
901 bio_copy_data(s->cache_miss, s->iop.bio); 876 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl)
904 s->cache_miss = NULL; 879 s->cache_miss = NULL;
905 } 880 }
906 881
907 if (verify(dc, &s->bio.bio) && s->recoverable && 882 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
908 !s->unaligned_bvec && !s->read_dirty_data)
909 bch_data_verify(dc, s->orig_bio); 883 bch_data_verify(dc, s->orig_bio);
910 884
911 bio_complete(s); 885 bio_complete(s);
@@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
945 struct bio *miss, *cache_bio; 919 struct bio *miss, *cache_bio;
946 920
947 if (s->cache_miss || s->iop.bypass) { 921 if (s->cache_miss || s->iop.bypass) {
948 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 922 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
949 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 923 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
950 goto out_submit; 924 goto out_submit;
951 } 925 }
@@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
959 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 933 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
960 934
961 s->iop.replace_key = KEY(s->iop.inode, 935 s->iop.replace_key = KEY(s->iop.inode,
962 bio->bi_sector + s->insert_bio_sectors, 936 bio->bi_iter.bi_sector + s->insert_bio_sectors,
963 s->insert_bio_sectors); 937 s->insert_bio_sectors);
964 938
965 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 939 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
968 942
969 s->iop.replace = true; 943 s->iop.replace = true;
970 944
971 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 945 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
972 946
973 /* btree_search_recurse()'s btree iterator is no good anymore */ 947 /* btree_search_recurse()'s btree iterator is no good anymore */
974 ret = miss == bio ? MAP_DONE : -EINTR; 948 ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
979 if (!cache_bio) 953 if (!cache_bio)
980 goto out_submit; 954 goto out_submit;
981 955
982 cache_bio->bi_sector = miss->bi_sector; 956 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
983 cache_bio->bi_bdev = miss->bi_bdev; 957 cache_bio->bi_bdev = miss->bi_bdev;
984 cache_bio->bi_size = s->insert_bio_sectors << 9; 958 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
985 959
986 cache_bio->bi_end_io = request_endio; 960 cache_bio->bi_end_io = request_endio;
987 cache_bio->bi_private = &s->cl; 961 cache_bio->bi_private = &s->cl;
@@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1031{ 1005{
1032 struct closure *cl = &s->cl; 1006 struct closure *cl = &s->cl;
1033 struct bio *bio = &s->bio.bio; 1007 struct bio *bio = &s->bio.bio;
1034 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1008 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1035 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1009 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1036 1010
1037 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1011 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1087 closure_bio_submit(flush, cl, s->d); 1061 closure_bio_submit(flush, cl, s->d);
1088 } 1062 }
1089 } else { 1063 } else {
1090 s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, 1064 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
1091 dc->disk.bio_split);
1092 1065
1093 closure_bio_submit(bio, cl, s->d); 1066 closure_bio_submit(bio, cl, s->d);
1094 } 1067 }
@@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1126 part_stat_unlock(); 1099 part_stat_unlock();
1127 1100
1128 bio->bi_bdev = dc->bdev; 1101 bio->bi_bdev = dc->bdev;
1129 bio->bi_sector += dc->sb.data_offset; 1102 bio->bi_iter.bi_sector += dc->sb.data_offset;
1130 1103
1131 if (cached_dev_get(dc)) { 1104 if (cached_dev_get(dc)) {
1132 s = search_alloc(bio, d); 1105 s = search_alloc(bio, d);
1133 trace_bcache_request_start(s->d, bio); 1106 trace_bcache_request_start(s->d, bio);
1134 1107
1135 if (!bio->bi_size) { 1108 if (!bio->bi_iter.bi_size) {
1136 /* 1109 /*
1137 * can't call bch_journal_meta from under 1110 * can't call bch_journal_meta from under
1138 * generic_make_request 1111 * generic_make_request
@@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1204static int flash_dev_cache_miss(struct btree *b, struct search *s, 1177static int flash_dev_cache_miss(struct btree *b, struct search *s,
1205 struct bio *bio, unsigned sectors) 1178 struct bio *bio, unsigned sectors)
1206{ 1179{
1207 struct bio_vec *bv; 1180 struct bio_vec bv;
1208 int i; 1181 struct bvec_iter iter;
1209 1182
1210 /* Zero fill bio */ 1183 /* Zero fill bio */
1211 1184
1212 bio_for_each_segment(bv, bio, i) { 1185 bio_for_each_segment(bv, bio, iter) {
1213 unsigned j = min(bv->bv_len >> 9, sectors); 1186 unsigned j = min(bv.bv_len >> 9, sectors);
1214 1187
1215 void *p = kmap(bv->bv_page); 1188 void *p = kmap(bv.bv_page);
1216 memset(p + bv->bv_offset, 0, j << 9); 1189 memset(p + bv.bv_offset, 0, j << 9);
1217 kunmap(bv->bv_page); 1190 kunmap(bv.bv_page);
1218 1191
1219 sectors -= j; 1192 sectors -= j;
1220 } 1193 }
1221 1194
1222 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1195 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1223 1196
1224 if (!bio->bi_size) 1197 if (!bio->bi_iter.bi_size)
1225 return MAP_DONE; 1198 return MAP_DONE;
1226 1199
1227 return MAP_CONTINUE; 1200 return MAP_CONTINUE;
@@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1255 1228
1256 trace_bcache_request_start(s->d, bio); 1229 trace_bcache_request_start(s->d, bio);
1257 1230
1258 if (!bio->bi_size) { 1231 if (!bio->bi_iter.bi_size) {
1259 /* 1232 /*
1260 * can't call bch_journal_meta from under 1233 * can't call bch_journal_meta from under
1261 * generic_make_request 1234 * generic_make_request
@@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1265 bcache_wq); 1238 bcache_wq);
1266 } else if (rw) { 1239 } else if (rw) {
1267 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1240 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1268 &KEY(d->id, bio->bi_sector, 0), 1241 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1269 &KEY(d->id, bio_end_sector(bio), 0)); 1242 &KEY(d->id, bio_end_sector(bio), 0));
1270 1243
1271 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1244 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;