aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-22 22:37:48 -0500
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:47 -0500
commited9c47bebeeea4a468b07cfd745c690190f8014c (patch)
tree804f88d900daabb8612ed881b8b7e1ab88872d79 /drivers/md/bcache/request.c
parent2c30c71bd653afcbed7f6754e8fe3d16e0e708a1 (diff)
bcache: Kill unaligned bvec hack
Bcache has a hack to avoid cloning the biovec if it's all full pages - but with immutable biovecs coming this won't be necessary anymore. For now, we remove the special case and always clone the bvec array so that the immutable biovec patches are simpler. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c37
1 files changed, 7 insertions, 30 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index fbcc851ed5a5..78bab4154e97 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -606,7 +606,6 @@ struct search {
606 unsigned insert_bio_sectors; 606 unsigned insert_bio_sectors;
607 607
608 unsigned recoverable:1; 608 unsigned recoverable:1;
609 unsigned unaligned_bvec:1;
610 unsigned write:1; 609 unsigned write:1;
611 unsigned read_dirty_data:1; 610 unsigned read_dirty_data:1;
612 611
@@ -614,6 +613,7 @@ struct search {
614 613
615 struct btree_op op; 614 struct btree_op op;
616 struct data_insert_op iop; 615 struct data_insert_op iop;
616 struct bio_vec bv[BIO_MAX_PAGES];
617}; 617};
618 618
619static void bch_cache_read_endio(struct bio *bio, int error) 619static void bch_cache_read_endio(struct bio *bio, int error)
@@ -759,10 +759,14 @@ static void bio_complete(struct search *s)
759static void do_bio_hook(struct search *s) 759static void do_bio_hook(struct search *s)
760{ 760{
761 struct bio *bio = &s->bio.bio; 761 struct bio *bio = &s->bio.bio;
762 memcpy(bio, s->orig_bio, sizeof(struct bio));
763 762
763 bio_init(bio);
764 bio->bi_io_vec = s->bv;
765 bio->bi_max_vecs = BIO_MAX_PAGES;
766 __bio_clone(bio, s->orig_bio);
764 bio->bi_end_io = request_endio; 767 bio->bi_end_io = request_endio;
765 bio->bi_private = &s->cl; 768 bio->bi_private = &s->cl;
769
766 atomic_set(&bio->bi_cnt, 3); 770 atomic_set(&bio->bi_cnt, 3);
767} 771}
768 772
@@ -774,9 +778,6 @@ static void search_free(struct closure *cl)
774 if (s->iop.bio) 778 if (s->iop.bio)
775 bio_put(s->iop.bio); 779 bio_put(s->iop.bio);
776 780
777 if (s->unaligned_bvec)
778 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
779
780 closure_debug_destroy(cl); 781 closure_debug_destroy(cl);
781 mempool_free(s, s->d->c->search); 782 mempool_free(s, s->d->c->search);
782} 783}
@@ -784,7 +785,6 @@ static void search_free(struct closure *cl)
784static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 785static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
785{ 786{
786 struct search *s; 787 struct search *s;
787 struct bio_vec *bv;
788 788
789 s = mempool_alloc(d->c->search, GFP_NOIO); 789 s = mempool_alloc(d->c->search, GFP_NOIO);
790 memset(s, 0, offsetof(struct search, iop.insert_keys)); 790 memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -803,15 +803,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
803 s->start_time = jiffies; 803 s->start_time = jiffies;
804 do_bio_hook(s); 804 do_bio_hook(s);
805 805
806 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
807 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
808 memcpy(bv, bio_iovec(bio),
809 sizeof(struct bio_vec) * bio_segments(bio));
810
811 s->bio.bio.bi_io_vec = bv;
812 s->unaligned_bvec = 1;
813 }
814
815 return s; 806 return s;
816} 807}
817 808
@@ -850,26 +841,13 @@ static void cached_dev_read_error(struct closure *cl)
850{ 841{
851 struct search *s = container_of(cl, struct search, cl); 842 struct search *s = container_of(cl, struct search, cl);
852 struct bio *bio = &s->bio.bio; 843 struct bio *bio = &s->bio.bio;
853 struct bio_vec *bv;
854 int i;
855 844
856 if (s->recoverable) { 845 if (s->recoverable) {
857 /* Retry from the backing device: */ 846 /* Retry from the backing device: */
858 trace_bcache_read_retry(s->orig_bio); 847 trace_bcache_read_retry(s->orig_bio);
859 848
860 s->iop.error = 0; 849 s->iop.error = 0;
861 bv = s->bio.bio.bi_io_vec;
862 do_bio_hook(s); 850 do_bio_hook(s);
863 s->bio.bio.bi_io_vec = bv;
864
865 if (!s->unaligned_bvec)
866 bio_for_each_segment(bv, s->orig_bio, i)
867 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
868 else
869 memcpy(s->bio.bio.bi_io_vec,
870 bio_iovec(s->orig_bio),
871 sizeof(struct bio_vec) *
872 bio_segments(s->orig_bio));
873 851
874 /* XXX: invalidate cache */ 852 /* XXX: invalidate cache */
875 853
@@ -905,8 +883,7 @@ static void cached_dev_read_done(struct closure *cl)
905 s->cache_miss = NULL; 883 s->cache_miss = NULL;
906 } 884 }
907 885
908 if (verify(dc, &s->bio.bio) && s->recoverable && 886 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
909 !s->unaligned_bvec && !s->read_dirty_data)
910 bch_data_verify(dc, s->orig_bio); 887 bch_data_verify(dc, s->orig_bio);
911 888
912 bio_complete(s); 889 bio_complete(s);