diff options
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 204 |
1 files changed, 101 insertions, 103 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index fbcc851ed5a5..5d5d031cf381 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -163,7 +163,6 @@ static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) | |||
163 | static void bcachecg_destroy(struct cgroup *cgroup) | 163 | static void bcachecg_destroy(struct cgroup *cgroup) |
164 | { | 164 | { |
165 | struct bch_cgroup *cg = cgroup_to_bcache(cgroup); | 165 | struct bch_cgroup *cg = cgroup_to_bcache(cgroup); |
166 | free_css_id(&bcache_subsys, &cg->css); | ||
167 | kfree(cg); | 166 | kfree(cg); |
168 | } | 167 | } |
169 | 168 | ||
@@ -198,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio) | |||
198 | 197 | ||
199 | static void bio_csum(struct bio *bio, struct bkey *k) | 198 | static void bio_csum(struct bio *bio, struct bkey *k) |
200 | { | 199 | { |
201 | struct bio_vec *bv; | 200 | struct bio_vec bv; |
201 | struct bvec_iter iter; | ||
202 | uint64_t csum = 0; | 202 | uint64_t csum = 0; |
203 | int i; | ||
204 | 203 | ||
205 | bio_for_each_segment(bv, bio, i) { | 204 | bio_for_each_segment(bv, bio, iter) { |
206 | void *d = kmap(bv->bv_page) + bv->bv_offset; | 205 | void *d = kmap(bv.bv_page) + bv.bv_offset; |
207 | csum = bch_crc64_update(csum, d, bv->bv_len); | 206 | csum = bch_crc64_update(csum, d, bv.bv_len); |
208 | kunmap(bv->bv_page); | 207 | kunmap(bv.bv_page); |
209 | } | 208 | } |
210 | 209 | ||
211 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | 210 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); |
@@ -255,26 +254,44 @@ static void bch_data_insert_keys(struct closure *cl) | |||
255 | closure_return(cl); | 254 | closure_return(cl); |
256 | } | 255 | } |
257 | 256 | ||
257 | static int bch_keylist_realloc(struct keylist *l, unsigned u64s, | ||
258 | struct cache_set *c) | ||
259 | { | ||
260 | size_t oldsize = bch_keylist_nkeys(l); | ||
261 | size_t newsize = oldsize + u64s; | ||
262 | |||
263 | /* | ||
264 | * The journalling code doesn't handle the case where the keys to insert | ||
265 | * is bigger than an empty write: If we just return -ENOMEM here, | ||
266 | * bio_insert() and bio_invalidate() will insert the keys created so far | ||
267 | * and finish the rest when the keylist is empty. | ||
268 | */ | ||
269 | if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | return __bch_keylist_realloc(l, u64s); | ||
273 | } | ||
274 | |||
258 | static void bch_data_invalidate(struct closure *cl) | 275 | static void bch_data_invalidate(struct closure *cl) |
259 | { | 276 | { |
260 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 277 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
261 | struct bio *bio = op->bio; | 278 | struct bio *bio = op->bio; |
262 | 279 | ||
263 | pr_debug("invalidating %i sectors from %llu", | 280 | pr_debug("invalidating %i sectors from %llu", |
264 | bio_sectors(bio), (uint64_t) bio->bi_sector); | 281 | bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); |
265 | 282 | ||
266 | while (bio_sectors(bio)) { | 283 | while (bio_sectors(bio)) { |
267 | unsigned sectors = min(bio_sectors(bio), | 284 | unsigned sectors = min(bio_sectors(bio), |
268 | 1U << (KEY_SIZE_BITS - 1)); | 285 | 1U << (KEY_SIZE_BITS - 1)); |
269 | 286 | ||
270 | if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) | 287 | if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) |
271 | goto out; | 288 | goto out; |
272 | 289 | ||
273 | bio->bi_sector += sectors; | 290 | bio->bi_iter.bi_sector += sectors; |
274 | bio->bi_size -= sectors << 9; | 291 | bio->bi_iter.bi_size -= sectors << 9; |
275 | 292 | ||
276 | bch_keylist_add(&op->insert_keys, | 293 | bch_keylist_add(&op->insert_keys, |
277 | &KEY(op->inode, bio->bi_sector, sectors)); | 294 | &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); |
278 | } | 295 | } |
279 | 296 | ||
280 | op->insert_data_done = true; | 297 | op->insert_data_done = true; |
@@ -336,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl) | |||
336 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 353 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
337 | struct bio *bio = op->bio, *n; | 354 | struct bio *bio = op->bio, *n; |
338 | 355 | ||
339 | if (op->bypass) | ||
340 | return bch_data_invalidate(cl); | ||
341 | |||
342 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { | 356 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { |
343 | set_gc_sectors(op->c); | 357 | set_gc_sectors(op->c); |
344 | wake_up_gc(op->c); | 358 | wake_up_gc(op->c); |
345 | } | 359 | } |
346 | 360 | ||
361 | if (op->bypass) | ||
362 | return bch_data_invalidate(cl); | ||
363 | |||
347 | /* | 364 | /* |
348 | * Journal writes are marked REQ_FLUSH; if the original write was a | 365 | * Journal writes are marked REQ_FLUSH; if the original write was a |
349 | * flush, it'll wait on the journal write. | 366 | * flush, it'll wait on the journal write. |
@@ -357,21 +374,21 @@ static void bch_data_insert_start(struct closure *cl) | |||
357 | 374 | ||
358 | /* 1 for the device pointer and 1 for the chksum */ | 375 | /* 1 for the device pointer and 1 for the chksum */ |
359 | if (bch_keylist_realloc(&op->insert_keys, | 376 | if (bch_keylist_realloc(&op->insert_keys, |
360 | 1 + (op->csum ? 1 : 0), | 377 | 3 + (op->csum ? 1 : 0), |
361 | op->c)) | 378 | op->c)) |
362 | continue_at(cl, bch_data_insert_keys, bcache_wq); | 379 | continue_at(cl, bch_data_insert_keys, bcache_wq); |
363 | 380 | ||
364 | k = op->insert_keys.top; | 381 | k = op->insert_keys.top; |
365 | bkey_init(k); | 382 | bkey_init(k); |
366 | SET_KEY_INODE(k, op->inode); | 383 | SET_KEY_INODE(k, op->inode); |
367 | SET_KEY_OFFSET(k, bio->bi_sector); | 384 | SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); |
368 | 385 | ||
369 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), | 386 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), |
370 | op->write_point, op->write_prio, | 387 | op->write_point, op->write_prio, |
371 | op->writeback)) | 388 | op->writeback)) |
372 | goto err; | 389 | goto err; |
373 | 390 | ||
374 | n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); | 391 | n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); |
375 | 392 | ||
376 | n->bi_end_io = bch_data_insert_endio; | 393 | n->bi_end_io = bch_data_insert_endio; |
377 | n->bi_private = cl; | 394 | n->bi_private = cl; |
@@ -522,7 +539,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
522 | (bio->bi_rw & REQ_WRITE))) | 539 | (bio->bi_rw & REQ_WRITE))) |
523 | goto skip; | 540 | goto skip; |
524 | 541 | ||
525 | if (bio->bi_sector & (c->sb.block_size - 1) || | 542 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || |
526 | bio_sectors(bio) & (c->sb.block_size - 1)) { | 543 | bio_sectors(bio) & (c->sb.block_size - 1)) { |
527 | pr_debug("skipping unaligned io"); | 544 | pr_debug("skipping unaligned io"); |
528 | goto skip; | 545 | goto skip; |
@@ -546,8 +563,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
546 | 563 | ||
547 | spin_lock(&dc->io_lock); | 564 | spin_lock(&dc->io_lock); |
548 | 565 | ||
549 | hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) | 566 | hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) |
550 | if (i->last == bio->bi_sector && | 567 | if (i->last == bio->bi_iter.bi_sector && |
551 | time_before(jiffies, i->jiffies)) | 568 | time_before(jiffies, i->jiffies)) |
552 | goto found; | 569 | goto found; |
553 | 570 | ||
@@ -556,8 +573,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
556 | add_sequential(task); | 573 | add_sequential(task); |
557 | i->sequential = 0; | 574 | i->sequential = 0; |
558 | found: | 575 | found: |
559 | if (i->sequential + bio->bi_size > i->sequential) | 576 | if (i->sequential + bio->bi_iter.bi_size > i->sequential) |
560 | i->sequential += bio->bi_size; | 577 | i->sequential += bio->bi_iter.bi_size; |
561 | 578 | ||
562 | i->last = bio_end_sector(bio); | 579 | i->last = bio_end_sector(bio); |
563 | i->jiffies = jiffies + msecs_to_jiffies(5000); | 580 | i->jiffies = jiffies + msecs_to_jiffies(5000); |
@@ -597,16 +614,13 @@ struct search { | |||
597 | /* Stack frame for bio_complete */ | 614 | /* Stack frame for bio_complete */ |
598 | struct closure cl; | 615 | struct closure cl; |
599 | 616 | ||
600 | struct bcache_device *d; | ||
601 | |||
602 | struct bbio bio; | 617 | struct bbio bio; |
603 | struct bio *orig_bio; | 618 | struct bio *orig_bio; |
604 | struct bio *cache_miss; | 619 | struct bio *cache_miss; |
620 | struct bcache_device *d; | ||
605 | 621 | ||
606 | unsigned insert_bio_sectors; | 622 | unsigned insert_bio_sectors; |
607 | |||
608 | unsigned recoverable:1; | 623 | unsigned recoverable:1; |
609 | unsigned unaligned_bvec:1; | ||
610 | unsigned write:1; | 624 | unsigned write:1; |
611 | unsigned read_dirty_data:1; | 625 | unsigned read_dirty_data:1; |
612 | 626 | ||
@@ -631,7 +645,8 @@ static void bch_cache_read_endio(struct bio *bio, int error) | |||
631 | 645 | ||
632 | if (error) | 646 | if (error) |
633 | s->iop.error = error; | 647 | s->iop.error = error; |
634 | else if (ptr_stale(s->iop.c, &b->key, 0)) { | 648 | else if (!KEY_DIRTY(&b->key) && |
649 | ptr_stale(s->iop.c, &b->key, 0)) { | ||
635 | atomic_long_inc(&s->iop.c->cache_read_races); | 650 | atomic_long_inc(&s->iop.c->cache_read_races); |
636 | s->iop.error = -EINTR; | 651 | s->iop.error = -EINTR; |
637 | } | 652 | } |
@@ -650,15 +665,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) | |||
650 | struct bkey *bio_key; | 665 | struct bkey *bio_key; |
651 | unsigned ptr; | 666 | unsigned ptr; |
652 | 667 | ||
653 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) | 668 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) |
654 | return MAP_CONTINUE; | 669 | return MAP_CONTINUE; |
655 | 670 | ||
656 | if (KEY_INODE(k) != s->iop.inode || | 671 | if (KEY_INODE(k) != s->iop.inode || |
657 | KEY_START(k) > bio->bi_sector) { | 672 | KEY_START(k) > bio->bi_iter.bi_sector) { |
658 | unsigned bio_sectors = bio_sectors(bio); | 673 | unsigned bio_sectors = bio_sectors(bio); |
659 | unsigned sectors = KEY_INODE(k) == s->iop.inode | 674 | unsigned sectors = KEY_INODE(k) == s->iop.inode |
660 | ? min_t(uint64_t, INT_MAX, | 675 | ? min_t(uint64_t, INT_MAX, |
661 | KEY_START(k) - bio->bi_sector) | 676 | KEY_START(k) - bio->bi_iter.bi_sector) |
662 | : INT_MAX; | 677 | : INT_MAX; |
663 | 678 | ||
664 | int ret = s->d->cache_miss(b, s, bio, sectors); | 679 | int ret = s->d->cache_miss(b, s, bio, sectors); |
@@ -680,14 +695,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) | |||
680 | if (KEY_DIRTY(k)) | 695 | if (KEY_DIRTY(k)) |
681 | s->read_dirty_data = true; | 696 | s->read_dirty_data = true; |
682 | 697 | ||
683 | n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, | 698 | n = bio_next_split(bio, min_t(uint64_t, INT_MAX, |
684 | KEY_OFFSET(k) - bio->bi_sector), | 699 | KEY_OFFSET(k) - bio->bi_iter.bi_sector), |
685 | GFP_NOIO, s->d->bio_split); | 700 | GFP_NOIO, s->d->bio_split); |
686 | 701 | ||
687 | bio_key = &container_of(n, struct bbio, bio)->key; | 702 | bio_key = &container_of(n, struct bbio, bio)->key; |
688 | bch_bkey_copy_single_ptr(bio_key, k, ptr); | 703 | bch_bkey_copy_single_ptr(bio_key, k, ptr); |
689 | 704 | ||
690 | bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); | 705 | bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); |
691 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); | 706 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); |
692 | 707 | ||
693 | n->bi_end_io = bch_cache_read_endio; | 708 | n->bi_end_io = bch_cache_read_endio; |
@@ -712,10 +727,13 @@ static void cache_lookup(struct closure *cl) | |||
712 | { | 727 | { |
713 | struct search *s = container_of(cl, struct search, iop.cl); | 728 | struct search *s = container_of(cl, struct search, iop.cl); |
714 | struct bio *bio = &s->bio.bio; | 729 | struct bio *bio = &s->bio.bio; |
730 | int ret; | ||
731 | |||
732 | bch_btree_op_init(&s->op, -1); | ||
715 | 733 | ||
716 | int ret = bch_btree_map_keys(&s->op, s->iop.c, | 734 | ret = bch_btree_map_keys(&s->op, s->iop.c, |
717 | &KEY(s->iop.inode, bio->bi_sector, 0), | 735 | &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), |
718 | cache_lookup_fn, MAP_END_KEY); | 736 | cache_lookup_fn, MAP_END_KEY); |
719 | if (ret == -EAGAIN) | 737 | if (ret == -EAGAIN) |
720 | continue_at(cl, cache_lookup, bcache_wq); | 738 | continue_at(cl, cache_lookup, bcache_wq); |
721 | 739 | ||
@@ -756,13 +774,15 @@ static void bio_complete(struct search *s) | |||
756 | } | 774 | } |
757 | } | 775 | } |
758 | 776 | ||
759 | static void do_bio_hook(struct search *s) | 777 | static void do_bio_hook(struct search *s, struct bio *orig_bio) |
760 | { | 778 | { |
761 | struct bio *bio = &s->bio.bio; | 779 | struct bio *bio = &s->bio.bio; |
762 | memcpy(bio, s->orig_bio, sizeof(struct bio)); | ||
763 | 780 | ||
781 | bio_init(bio); | ||
782 | __bio_clone_fast(bio, orig_bio); | ||
764 | bio->bi_end_io = request_endio; | 783 | bio->bi_end_io = request_endio; |
765 | bio->bi_private = &s->cl; | 784 | bio->bi_private = &s->cl; |
785 | |||
766 | atomic_set(&bio->bi_cnt, 3); | 786 | atomic_set(&bio->bi_cnt, 3); |
767 | } | 787 | } |
768 | 788 | ||
@@ -774,43 +794,36 @@ static void search_free(struct closure *cl) | |||
774 | if (s->iop.bio) | 794 | if (s->iop.bio) |
775 | bio_put(s->iop.bio); | 795 | bio_put(s->iop.bio); |
776 | 796 | ||
777 | if (s->unaligned_bvec) | ||
778 | mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); | ||
779 | |||
780 | closure_debug_destroy(cl); | 797 | closure_debug_destroy(cl); |
781 | mempool_free(s, s->d->c->search); | 798 | mempool_free(s, s->d->c->search); |
782 | } | 799 | } |
783 | 800 | ||
784 | static struct search *search_alloc(struct bio *bio, struct bcache_device *d) | 801 | static inline struct search *search_alloc(struct bio *bio, |
802 | struct bcache_device *d) | ||
785 | { | 803 | { |
786 | struct search *s; | 804 | struct search *s; |
787 | struct bio_vec *bv; | ||
788 | 805 | ||
789 | s = mempool_alloc(d->c->search, GFP_NOIO); | 806 | s = mempool_alloc(d->c->search, GFP_NOIO); |
790 | memset(s, 0, offsetof(struct search, iop.insert_keys)); | ||
791 | 807 | ||
792 | __closure_init(&s->cl, NULL); | 808 | closure_init(&s->cl, NULL); |
809 | do_bio_hook(s, bio); | ||
793 | 810 | ||
794 | s->iop.inode = d->id; | ||
795 | s->iop.c = d->c; | ||
796 | s->d = d; | ||
797 | s->op.lock = -1; | ||
798 | s->iop.write_point = hash_long((unsigned long) current, 16); | ||
799 | s->orig_bio = bio; | 811 | s->orig_bio = bio; |
800 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | 812 | s->cache_miss = NULL; |
801 | s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; | 813 | s->d = d; |
802 | s->recoverable = 1; | 814 | s->recoverable = 1; |
815 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | ||
816 | s->read_dirty_data = 0; | ||
803 | s->start_time = jiffies; | 817 | s->start_time = jiffies; |
804 | do_bio_hook(s); | ||
805 | 818 | ||
806 | if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { | 819 | s->iop.c = d->c; |
807 | bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); | 820 | s->iop.bio = NULL; |
808 | memcpy(bv, bio_iovec(bio), | 821 | s->iop.inode = d->id; |
809 | sizeof(struct bio_vec) * bio_segments(bio)); | 822 | s->iop.write_point = hash_long((unsigned long) current, 16); |
810 | 823 | s->iop.write_prio = 0; | |
811 | s->bio.bio.bi_io_vec = bv; | 824 | s->iop.error = 0; |
812 | s->unaligned_bvec = 1; | 825 | s->iop.flags = 0; |
813 | } | 826 | s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; |
814 | 827 | ||
815 | return s; | 828 | return s; |
816 | } | 829 | } |
@@ -850,26 +863,13 @@ static void cached_dev_read_error(struct closure *cl) | |||
850 | { | 863 | { |
851 | struct search *s = container_of(cl, struct search, cl); | 864 | struct search *s = container_of(cl, struct search, cl); |
852 | struct bio *bio = &s->bio.bio; | 865 | struct bio *bio = &s->bio.bio; |
853 | struct bio_vec *bv; | ||
854 | int i; | ||
855 | 866 | ||
856 | if (s->recoverable) { | 867 | if (s->recoverable) { |
857 | /* Retry from the backing device: */ | 868 | /* Retry from the backing device: */ |
858 | trace_bcache_read_retry(s->orig_bio); | 869 | trace_bcache_read_retry(s->orig_bio); |
859 | 870 | ||
860 | s->iop.error = 0; | 871 | s->iop.error = 0; |
861 | bv = s->bio.bio.bi_io_vec; | 872 | do_bio_hook(s, s->orig_bio); |
862 | do_bio_hook(s); | ||
863 | s->bio.bio.bi_io_vec = bv; | ||
864 | |||
865 | if (!s->unaligned_bvec) | ||
866 | bio_for_each_segment(bv, s->orig_bio, i) | ||
867 | bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; | ||
868 | else | ||
869 | memcpy(s->bio.bio.bi_io_vec, | ||
870 | bio_iovec(s->orig_bio), | ||
871 | sizeof(struct bio_vec) * | ||
872 | bio_segments(s->orig_bio)); | ||
873 | 873 | ||
874 | /* XXX: invalidate cache */ | 874 | /* XXX: invalidate cache */ |
875 | 875 | ||
@@ -894,9 +894,9 @@ static void cached_dev_read_done(struct closure *cl) | |||
894 | 894 | ||
895 | if (s->iop.bio) { | 895 | if (s->iop.bio) { |
896 | bio_reset(s->iop.bio); | 896 | bio_reset(s->iop.bio); |
897 | s->iop.bio->bi_sector = s->cache_miss->bi_sector; | 897 | s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; |
898 | s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; | 898 | s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; |
899 | s->iop.bio->bi_size = s->insert_bio_sectors << 9; | 899 | s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
900 | bch_bio_map(s->iop.bio, NULL); | 900 | bch_bio_map(s->iop.bio, NULL); |
901 | 901 | ||
902 | bio_copy_data(s->cache_miss, s->iop.bio); | 902 | bio_copy_data(s->cache_miss, s->iop.bio); |
@@ -905,8 +905,7 @@ static void cached_dev_read_done(struct closure *cl) | |||
905 | s->cache_miss = NULL; | 905 | s->cache_miss = NULL; |
906 | } | 906 | } |
907 | 907 | ||
908 | if (verify(dc, &s->bio.bio) && s->recoverable && | 908 | if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) |
909 | !s->unaligned_bvec && !s->read_dirty_data) | ||
910 | bch_data_verify(dc, s->orig_bio); | 909 | bch_data_verify(dc, s->orig_bio); |
911 | 910 | ||
912 | bio_complete(s); | 911 | bio_complete(s); |
@@ -946,7 +945,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
946 | struct bio *miss, *cache_bio; | 945 | struct bio *miss, *cache_bio; |
947 | 946 | ||
948 | if (s->cache_miss || s->iop.bypass) { | 947 | if (s->cache_miss || s->iop.bypass) { |
949 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 948 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
950 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; | 949 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; |
951 | goto out_submit; | 950 | goto out_submit; |
952 | } | 951 | } |
@@ -960,7 +959,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
960 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); | 959 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); |
961 | 960 | ||
962 | s->iop.replace_key = KEY(s->iop.inode, | 961 | s->iop.replace_key = KEY(s->iop.inode, |
963 | bio->bi_sector + s->insert_bio_sectors, | 962 | bio->bi_iter.bi_sector + s->insert_bio_sectors, |
964 | s->insert_bio_sectors); | 963 | s->insert_bio_sectors); |
965 | 964 | ||
966 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); | 965 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); |
@@ -969,7 +968,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
969 | 968 | ||
970 | s->iop.replace = true; | 969 | s->iop.replace = true; |
971 | 970 | ||
972 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 971 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
973 | 972 | ||
974 | /* btree_search_recurse()'s btree iterator is no good anymore */ | 973 | /* btree_search_recurse()'s btree iterator is no good anymore */ |
975 | ret = miss == bio ? MAP_DONE : -EINTR; | 974 | ret = miss == bio ? MAP_DONE : -EINTR; |
@@ -980,9 +979,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
980 | if (!cache_bio) | 979 | if (!cache_bio) |
981 | goto out_submit; | 980 | goto out_submit; |
982 | 981 | ||
983 | cache_bio->bi_sector = miss->bi_sector; | 982 | cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; |
984 | cache_bio->bi_bdev = miss->bi_bdev; | 983 | cache_bio->bi_bdev = miss->bi_bdev; |
985 | cache_bio->bi_size = s->insert_bio_sectors << 9; | 984 | cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
986 | 985 | ||
987 | cache_bio->bi_end_io = request_endio; | 986 | cache_bio->bi_end_io = request_endio; |
988 | cache_bio->bi_private = &s->cl; | 987 | cache_bio->bi_private = &s->cl; |
@@ -1032,7 +1031,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
1032 | { | 1031 | { |
1033 | struct closure *cl = &s->cl; | 1032 | struct closure *cl = &s->cl; |
1034 | struct bio *bio = &s->bio.bio; | 1033 | struct bio *bio = &s->bio.bio; |
1035 | struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); | 1034 | struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); |
1036 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); | 1035 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); |
1037 | 1036 | ||
1038 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); | 1037 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); |
@@ -1088,8 +1087,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
1088 | closure_bio_submit(flush, cl, s->d); | 1087 | closure_bio_submit(flush, cl, s->d); |
1089 | } | 1088 | } |
1090 | } else { | 1089 | } else { |
1091 | s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, | 1090 | s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); |
1092 | dc->disk.bio_split); | ||
1093 | 1091 | ||
1094 | closure_bio_submit(bio, cl, s->d); | 1092 | closure_bio_submit(bio, cl, s->d); |
1095 | } | 1093 | } |
@@ -1127,13 +1125,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1127 | part_stat_unlock(); | 1125 | part_stat_unlock(); |
1128 | 1126 | ||
1129 | bio->bi_bdev = dc->bdev; | 1127 | bio->bi_bdev = dc->bdev; |
1130 | bio->bi_sector += dc->sb.data_offset; | 1128 | bio->bi_iter.bi_sector += dc->sb.data_offset; |
1131 | 1129 | ||
1132 | if (cached_dev_get(dc)) { | 1130 | if (cached_dev_get(dc)) { |
1133 | s = search_alloc(bio, d); | 1131 | s = search_alloc(bio, d); |
1134 | trace_bcache_request_start(s->d, bio); | 1132 | trace_bcache_request_start(s->d, bio); |
1135 | 1133 | ||
1136 | if (!bio->bi_size) { | 1134 | if (!bio->bi_iter.bi_size) { |
1137 | /* | 1135 | /* |
1138 | * can't call bch_journal_meta from under | 1136 | * can't call bch_journal_meta from under |
1139 | * generic_make_request | 1137 | * generic_make_request |
@@ -1205,24 +1203,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc) | |||
1205 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | 1203 | static int flash_dev_cache_miss(struct btree *b, struct search *s, |
1206 | struct bio *bio, unsigned sectors) | 1204 | struct bio *bio, unsigned sectors) |
1207 | { | 1205 | { |
1208 | struct bio_vec *bv; | 1206 | struct bio_vec bv; |
1209 | int i; | 1207 | struct bvec_iter iter; |
1210 | 1208 | ||
1211 | /* Zero fill bio */ | 1209 | /* Zero fill bio */ |
1212 | 1210 | ||
1213 | bio_for_each_segment(bv, bio, i) { | 1211 | bio_for_each_segment(bv, bio, iter) { |
1214 | unsigned j = min(bv->bv_len >> 9, sectors); | 1212 | unsigned j = min(bv.bv_len >> 9, sectors); |
1215 | 1213 | ||
1216 | void *p = kmap(bv->bv_page); | 1214 | void *p = kmap(bv.bv_page); |
1217 | memset(p + bv->bv_offset, 0, j << 9); | 1215 | memset(p + bv.bv_offset, 0, j << 9); |
1218 | kunmap(bv->bv_page); | 1216 | kunmap(bv.bv_page); |
1219 | 1217 | ||
1220 | sectors -= j; | 1218 | sectors -= j; |
1221 | } | 1219 | } |
1222 | 1220 | ||
1223 | bio_advance(bio, min(sectors << 9, bio->bi_size)); | 1221 | bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); |
1224 | 1222 | ||
1225 | if (!bio->bi_size) | 1223 | if (!bio->bi_iter.bi_size) |
1226 | return MAP_DONE; | 1224 | return MAP_DONE; |
1227 | 1225 | ||
1228 | return MAP_CONTINUE; | 1226 | return MAP_CONTINUE; |
@@ -1256,7 +1254,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1256 | 1254 | ||
1257 | trace_bcache_request_start(s->d, bio); | 1255 | trace_bcache_request_start(s->d, bio); |
1258 | 1256 | ||
1259 | if (!bio->bi_size) { | 1257 | if (!bio->bi_iter.bi_size) { |
1260 | /* | 1258 | /* |
1261 | * can't call bch_journal_meta from under | 1259 | * can't call bch_journal_meta from under |
1262 | * generic_make_request | 1260 | * generic_make_request |
@@ -1266,7 +1264,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1266 | bcache_wq); | 1264 | bcache_wq); |
1267 | } else if (rw) { | 1265 | } else if (rw) { |
1268 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, | 1266 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, |
1269 | &KEY(d->id, bio->bi_sector, 0), | 1267 | &KEY(d->id, bio->bi_iter.bi_sector, 0), |
1270 | &KEY(d->id, bio_end_sector(bio), 0)); | 1268 | &KEY(d->id, bio_end_sector(bio), 0)); |
1271 | 1269 | ||
1272 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; | 1270 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; |