diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-11 18:45:43 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-24 01:33:51 -0500 |
commit | 003b5c5719f159f4f4bf97511c4702a0638313dd (patch) | |
tree | 1b3cac74e22ae5a87fdb6e3066f2d728913e6e0c | |
parent | 458b76ed2f9517becb74dcc8eedd70d3068ea6e4 (diff) |
block: Convert drivers to immutable biovecs
Now that we've got a mechanism for immutable biovecs -
bi_iter.bi_bvec_done - we need to convert drivers to use primitives that
respect it instead of using the bvec array directly.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: NeilBrown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
-rw-r--r-- | drivers/block/umem.c | 50 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 49 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 31 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-verity.c | 52 | ||||
-rw-r--r-- | fs/bio.c | 14 | ||||
-rw-r--r-- | include/linux/dm-io.h | 4 |
7 files changed, 89 insertions, 119 deletions
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index dab4f1afeae9..4cf81b5bf0f7 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -108,8 +108,7 @@ struct cardinfo { | |||
108 | * have been written | 108 | * have been written |
109 | */ | 109 | */ |
110 | struct bio *bio, *currentbio, **biotail; | 110 | struct bio *bio, *currentbio, **biotail; |
111 | int current_idx; | 111 | struct bvec_iter current_iter; |
112 | sector_t current_sector; | ||
113 | 112 | ||
114 | struct request_queue *queue; | 113 | struct request_queue *queue; |
115 | 114 | ||
@@ -118,7 +117,7 @@ struct cardinfo { | |||
118 | struct mm_dma_desc *desc; | 117 | struct mm_dma_desc *desc; |
119 | int cnt, headcnt; | 118 | int cnt, headcnt; |
120 | struct bio *bio, **biotail; | 119 | struct bio *bio, **biotail; |
121 | int idx; | 120 | struct bvec_iter iter; |
122 | } mm_pages[2]; | 121 | } mm_pages[2]; |
123 | #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) | 122 | #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) |
124 | 123 | ||
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card) | |||
344 | dma_addr_t dma_handle; | 343 | dma_addr_t dma_handle; |
345 | int offset; | 344 | int offset; |
346 | struct bio *bio; | 345 | struct bio *bio; |
347 | struct bio_vec *vec; | 346 | struct bio_vec vec; |
348 | int idx; | ||
349 | int rw; | 347 | int rw; |
350 | int len; | ||
351 | 348 | ||
352 | bio = card->currentbio; | 349 | bio = card->currentbio; |
353 | if (!bio && card->bio) { | 350 | if (!bio && card->bio) { |
354 | card->currentbio = card->bio; | 351 | card->currentbio = card->bio; |
355 | card->current_idx = card->bio->bi_iter.bi_idx; | 352 | card->current_iter = card->bio->bi_iter; |
356 | card->current_sector = card->bio->bi_iter.bi_sector; | ||
357 | card->bio = card->bio->bi_next; | 353 | card->bio = card->bio->bi_next; |
358 | if (card->bio == NULL) | 354 | if (card->bio == NULL) |
359 | card->biotail = &card->bio; | 355 | card->biotail = &card->bio; |
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card) | |||
362 | } | 358 | } |
363 | if (!bio) | 359 | if (!bio) |
364 | return 0; | 360 | return 0; |
365 | idx = card->current_idx; | ||
366 | 361 | ||
367 | rw = bio_rw(bio); | 362 | rw = bio_rw(bio); |
368 | if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) | 363 | if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) |
369 | return 0; | 364 | return 0; |
370 | 365 | ||
371 | vec = bio_iovec_idx(bio, idx); | 366 | vec = bio_iter_iovec(bio, card->current_iter); |
372 | len = vec->bv_len; | 367 | |
373 | dma_handle = pci_map_page(card->dev, | 368 | dma_handle = pci_map_page(card->dev, |
374 | vec->bv_page, | 369 | vec.bv_page, |
375 | vec->bv_offset, | 370 | vec.bv_offset, |
376 | len, | 371 | vec.bv_len, |
377 | (rw == READ) ? | 372 | (rw == READ) ? |
378 | PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | 373 | PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); |
379 | 374 | ||
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card) | |||
381 | desc = &p->desc[p->cnt]; | 376 | desc = &p->desc[p->cnt]; |
382 | p->cnt++; | 377 | p->cnt++; |
383 | if (p->bio == NULL) | 378 | if (p->bio == NULL) |
384 | p->idx = idx; | 379 | p->iter = card->current_iter; |
385 | if ((p->biotail) != &bio->bi_next) { | 380 | if ((p->biotail) != &bio->bi_next) { |
386 | *(p->biotail) = bio; | 381 | *(p->biotail) = bio; |
387 | p->biotail = &(bio->bi_next); | 382 | p->biotail = &(bio->bi_next); |
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card) | |||
391 | desc->data_dma_handle = dma_handle; | 386 | desc->data_dma_handle = dma_handle; |
392 | 387 | ||
393 | desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); | 388 | desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); |
394 | desc->local_addr = cpu_to_le64(card->current_sector << 9); | 389 | desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9); |
395 | desc->transfer_size = cpu_to_le32(len); | 390 | desc->transfer_size = cpu_to_le32(vec.bv_len); |
396 | offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); | 391 | offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); |
397 | desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); | 392 | desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); |
398 | desc->zero1 = desc->zero2 = 0; | 393 | desc->zero1 = desc->zero2 = 0; |
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card) | |||
407 | desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); | 402 | desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); |
408 | desc->sem_control_bits = desc->control_bits; | 403 | desc->sem_control_bits = desc->control_bits; |
409 | 404 | ||
410 | card->current_sector += (len >> 9); | 405 | |
411 | idx++; | 406 | bio_advance_iter(bio, &card->current_iter, vec.bv_len); |
412 | card->current_idx = idx; | 407 | if (!card->current_iter.bi_size) |
413 | if (idx >= bio->bi_vcnt) | ||
414 | card->currentbio = NULL; | 408 | card->currentbio = NULL; |
415 | 409 | ||
416 | return 1; | 410 | return 1; |
@@ -439,23 +433,25 @@ static void process_page(unsigned long data) | |||
439 | struct mm_dma_desc *desc = &page->desc[page->headcnt]; | 433 | struct mm_dma_desc *desc = &page->desc[page->headcnt]; |
440 | int control = le32_to_cpu(desc->sem_control_bits); | 434 | int control = le32_to_cpu(desc->sem_control_bits); |
441 | int last = 0; | 435 | int last = 0; |
442 | int idx; | 436 | struct bio_vec vec; |
443 | 437 | ||
444 | if (!(control & DMASCR_DMA_COMPLETE)) { | 438 | if (!(control & DMASCR_DMA_COMPLETE)) { |
445 | control = dma_status; | 439 | control = dma_status; |
446 | last = 1; | 440 | last = 1; |
447 | } | 441 | } |
442 | |||
448 | page->headcnt++; | 443 | page->headcnt++; |
449 | idx = page->idx; | 444 | vec = bio_iter_iovec(bio, page->iter); |
450 | page->idx++; | 445 | bio_advance_iter(bio, &page->iter, vec.bv_len); |
451 | if (page->idx >= bio->bi_vcnt) { | 446 | |
447 | if (!page->iter.bi_size) { | ||
452 | page->bio = bio->bi_next; | 448 | page->bio = bio->bi_next; |
453 | if (page->bio) | 449 | if (page->bio) |
454 | page->idx = page->bio->bi_iter.bi_idx; | 450 | page->iter = page->bio->bi_iter; |
455 | } | 451 | } |
456 | 452 | ||
457 | pci_unmap_page(card->dev, desc->data_dma_handle, | 453 | pci_unmap_page(card->dev, desc->data_dma_handle, |
458 | bio_iovec_idx(bio, idx)->bv_len, | 454 | vec.bv_len, |
459 | (control & DMASCR_TRANSFER_READ) ? | 455 | (control & DMASCR_TRANSFER_READ) ? |
460 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | 456 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); |
461 | if (control & DMASCR_HARD_ERROR) { | 457 | if (control & DMASCR_HARD_ERROR) { |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1e2e5465d28e..784695d22fde 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -39,10 +39,8 @@ struct convert_context { | |||
39 | struct completion restart; | 39 | struct completion restart; |
40 | struct bio *bio_in; | 40 | struct bio *bio_in; |
41 | struct bio *bio_out; | 41 | struct bio *bio_out; |
42 | unsigned int offset_in; | 42 | struct bvec_iter iter_in; |
43 | unsigned int offset_out; | 43 | struct bvec_iter iter_out; |
44 | unsigned int idx_in; | ||
45 | unsigned int idx_out; | ||
46 | sector_t cc_sector; | 44 | sector_t cc_sector; |
47 | atomic_t cc_pending; | 45 | atomic_t cc_pending; |
48 | }; | 46 | }; |
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc, | |||
826 | { | 824 | { |
827 | ctx->bio_in = bio_in; | 825 | ctx->bio_in = bio_in; |
828 | ctx->bio_out = bio_out; | 826 | ctx->bio_out = bio_out; |
829 | ctx->offset_in = 0; | 827 | if (bio_in) |
830 | ctx->offset_out = 0; | 828 | ctx->iter_in = bio_in->bi_iter; |
831 | ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0; | 829 | if (bio_out) |
832 | ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0; | 830 | ctx->iter_out = bio_out->bi_iter; |
833 | ctx->cc_sector = sector + cc->iv_offset; | 831 | ctx->cc_sector = sector + cc->iv_offset; |
834 | init_completion(&ctx->restart); | 832 | init_completion(&ctx->restart); |
835 | } | 833 | } |
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc, | |||
857 | struct convert_context *ctx, | 855 | struct convert_context *ctx, |
858 | struct ablkcipher_request *req) | 856 | struct ablkcipher_request *req) |
859 | { | 857 | { |
860 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | 858 | struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); |
861 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | 859 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); |
862 | struct dm_crypt_request *dmreq; | 860 | struct dm_crypt_request *dmreq; |
863 | u8 *iv; | 861 | u8 *iv; |
864 | int r; | 862 | int r; |
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc, | |||
869 | dmreq->iv_sector = ctx->cc_sector; | 867 | dmreq->iv_sector = ctx->cc_sector; |
870 | dmreq->ctx = ctx; | 868 | dmreq->ctx = ctx; |
871 | sg_init_table(&dmreq->sg_in, 1); | 869 | sg_init_table(&dmreq->sg_in, 1); |
872 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | 870 | sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, |
873 | bv_in->bv_offset + ctx->offset_in); | 871 | bv_in.bv_offset); |
874 | 872 | ||
875 | sg_init_table(&dmreq->sg_out, 1); | 873 | sg_init_table(&dmreq->sg_out, 1); |
876 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, | 874 | sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, |
877 | bv_out->bv_offset + ctx->offset_out); | 875 | bv_out.bv_offset); |
878 | 876 | ||
879 | ctx->offset_in += 1 << SECTOR_SHIFT; | 877 | bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); |
880 | if (ctx->offset_in >= bv_in->bv_len) { | 878 | bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); |
881 | ctx->offset_in = 0; | ||
882 | ctx->idx_in++; | ||
883 | } | ||
884 | |||
885 | ctx->offset_out += 1 << SECTOR_SHIFT; | ||
886 | if (ctx->offset_out >= bv_out->bv_len) { | ||
887 | ctx->offset_out = 0; | ||
888 | ctx->idx_out++; | ||
889 | } | ||
890 | 879 | ||
891 | if (cc->iv_gen_ops) { | 880 | if (cc->iv_gen_ops) { |
892 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); | 881 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); |
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
937 | 926 | ||
938 | atomic_set(&ctx->cc_pending, 1); | 927 | atomic_set(&ctx->cc_pending, 1); |
939 | 928 | ||
940 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | 929 | while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { |
941 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | ||
942 | 930 | ||
943 | crypt_alloc_req(cc, ctx); | 931 | crypt_alloc_req(cc, ctx); |
944 | 932 | ||
@@ -1207,7 +1195,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |||
1207 | } | 1195 | } |
1208 | 1196 | ||
1209 | /* crypt_convert should have filled the clone bio */ | 1197 | /* crypt_convert should have filled the clone bio */ |
1210 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | 1198 | BUG_ON(io->ctx.iter_out.bi_size); |
1211 | 1199 | ||
1212 | clone->bi_iter.bi_sector = cc->start + io->sector; | 1200 | clone->bi_iter.bi_sector = cc->start + io->sector; |
1213 | 1201 | ||
@@ -1246,7 +1234,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1246 | } | 1234 | } |
1247 | 1235 | ||
1248 | io->ctx.bio_out = clone; | 1236 | io->ctx.bio_out = clone; |
1249 | io->ctx.idx_out = 0; | 1237 | io->ctx.iter_out = clone->bi_iter; |
1250 | 1238 | ||
1251 | remaining -= clone->bi_iter.bi_size; | 1239 | remaining -= clone->bi_iter.bi_size; |
1252 | sector += bio_sectors(clone); | 1240 | sector += bio_sectors(clone); |
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1290 | crypt_inc_pending(new_io); | 1278 | crypt_inc_pending(new_io); |
1291 | crypt_convert_init(cc, &new_io->ctx, NULL, | 1279 | crypt_convert_init(cc, &new_io->ctx, NULL, |
1292 | io->base_bio, sector); | 1280 | io->base_bio, sector); |
1293 | new_io->ctx.idx_in = io->ctx.idx_in; | 1281 | new_io->ctx.iter_in = io->ctx.iter_in; |
1294 | new_io->ctx.offset_in = io->ctx.offset_in; | ||
1295 | 1282 | ||
1296 | /* | 1283 | /* |
1297 | * Fragments after the first use the base_io | 1284 | * Fragments after the first use the base_io |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 01558b093307..b2b8a10e8427 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse | |||
201 | /* | 201 | /* |
202 | * Functions for getting the pages from a bvec. | 202 | * Functions for getting the pages from a bvec. |
203 | */ | 203 | */ |
204 | static void bvec_get_page(struct dpages *dp, | 204 | static void bio_get_page(struct dpages *dp, |
205 | struct page **p, unsigned long *len, unsigned *offset) | 205 | struct page **p, unsigned long *len, unsigned *offset) |
206 | { | 206 | { |
207 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | 207 | struct bio *bio = dp->context_ptr; |
208 | *p = bvec->bv_page; | 208 | struct bio_vec bvec = bio_iovec(bio); |
209 | *len = bvec->bv_len; | 209 | *p = bvec.bv_page; |
210 | *offset = bvec->bv_offset; | 210 | *len = bvec.bv_len; |
211 | *offset = bvec.bv_offset; | ||
211 | } | 212 | } |
212 | 213 | ||
213 | static void bvec_next_page(struct dpages *dp) | 214 | static void bio_next_page(struct dpages *dp) |
214 | { | 215 | { |
215 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | 216 | struct bio *bio = dp->context_ptr; |
216 | dp->context_ptr = bvec + 1; | 217 | struct bio_vec bvec = bio_iovec(bio); |
218 | |||
219 | bio_advance(bio, bvec.bv_len); | ||
217 | } | 220 | } |
218 | 221 | ||
219 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) | 222 | static void bio_dp_init(struct dpages *dp, struct bio *bio) |
220 | { | 223 | { |
221 | dp->get_page = bvec_get_page; | 224 | dp->get_page = bio_get_page; |
222 | dp->next_page = bvec_next_page; | 225 | dp->next_page = bio_next_page; |
223 | dp->context_ptr = bvec; | 226 | dp->context_ptr = bio; |
224 | } | 227 | } |
225 | 228 | ||
226 | /* | 229 | /* |
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, | |||
457 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | 460 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); |
458 | break; | 461 | break; |
459 | 462 | ||
460 | case DM_IO_BVEC: | 463 | case DM_IO_BIO: |
461 | bvec_dp_init(dp, io_req->mem.ptr.bvec); | 464 | bio_dp_init(dp, io_req->mem.ptr.bio); |
462 | break; | 465 | break; |
463 | 466 | ||
464 | case DM_IO_VMA: | 467 | case DM_IO_VMA: |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9f6d8e6baa7d..f284e0bfb25f 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio) | |||
526 | struct dm_io_region io; | 526 | struct dm_io_region io; |
527 | struct dm_io_request io_req = { | 527 | struct dm_io_request io_req = { |
528 | .bi_rw = READ, | 528 | .bi_rw = READ, |
529 | .mem.type = DM_IO_BVEC, | 529 | .mem.type = DM_IO_BIO, |
530 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, | 530 | .mem.ptr.bio = bio, |
531 | .notify.fn = read_callback, | 531 | .notify.fn = read_callback, |
532 | .notify.context = bio, | 532 | .notify.context = bio, |
533 | .client = m->ms->io_client, | 533 | .client = m->ms->io_client, |
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
629 | struct mirror *m; | 629 | struct mirror *m; |
630 | struct dm_io_request io_req = { | 630 | struct dm_io_request io_req = { |
631 | .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), | 631 | .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), |
632 | .mem.type = DM_IO_BVEC, | 632 | .mem.type = DM_IO_BIO, |
633 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, | 633 | .mem.ptr.bio = bio, |
634 | .notify.fn = write_callback, | 634 | .notify.fn = write_callback, |
635 | .notify.context = bio, | 635 | .notify.context = bio, |
636 | .client = ms->io_client, | 636 | .client = ms->io_client, |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 5392135924ca..ac35e959d49b 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -73,15 +73,10 @@ struct dm_verity_io { | |||
73 | sector_t block; | 73 | sector_t block; |
74 | unsigned n_blocks; | 74 | unsigned n_blocks; |
75 | 75 | ||
76 | /* saved bio vector */ | 76 | struct bvec_iter iter; |
77 | struct bio_vec *io_vec; | ||
78 | unsigned io_vec_size; | ||
79 | 77 | ||
80 | struct work_struct work; | 78 | struct work_struct work; |
81 | 79 | ||
82 | /* A space for short vectors; longer vectors are allocated separately. */ | ||
83 | struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE]; | ||
84 | |||
85 | /* | 80 | /* |
86 | * Three variably-size fields follow this struct: | 81 | * Three variably-size fields follow this struct: |
87 | * | 82 | * |
@@ -284,9 +279,10 @@ release_ret_r: | |||
284 | static int verity_verify_io(struct dm_verity_io *io) | 279 | static int verity_verify_io(struct dm_verity_io *io) |
285 | { | 280 | { |
286 | struct dm_verity *v = io->v; | 281 | struct dm_verity *v = io->v; |
282 | struct bio *bio = dm_bio_from_per_bio_data(io, | ||
283 | v->ti->per_bio_data_size); | ||
287 | unsigned b; | 284 | unsigned b; |
288 | int i; | 285 | int i; |
289 | unsigned vector = 0, offset = 0; | ||
290 | 286 | ||
291 | for (b = 0; b < io->n_blocks; b++) { | 287 | for (b = 0; b < io->n_blocks; b++) { |
292 | struct shash_desc *desc; | 288 | struct shash_desc *desc; |
@@ -336,31 +332,22 @@ test_block_hash: | |||
336 | } | 332 | } |
337 | 333 | ||
338 | todo = 1 << v->data_dev_block_bits; | 334 | todo = 1 << v->data_dev_block_bits; |
339 | do { | 335 | while (io->iter.bi_size) { |
340 | struct bio_vec *bv; | ||
341 | u8 *page; | 336 | u8 *page; |
342 | unsigned len; | 337 | struct bio_vec bv = bio_iter_iovec(bio, io->iter); |
343 | 338 | ||
344 | BUG_ON(vector >= io->io_vec_size); | 339 | page = kmap_atomic(bv.bv_page); |
345 | bv = &io->io_vec[vector]; | 340 | r = crypto_shash_update(desc, page + bv.bv_offset, |
346 | page = kmap_atomic(bv->bv_page); | 341 | bv.bv_len); |
347 | len = bv->bv_len - offset; | ||
348 | if (likely(len >= todo)) | ||
349 | len = todo; | ||
350 | r = crypto_shash_update(desc, | ||
351 | page + bv->bv_offset + offset, len); | ||
352 | kunmap_atomic(page); | 342 | kunmap_atomic(page); |
343 | |||
353 | if (r < 0) { | 344 | if (r < 0) { |
354 | DMERR("crypto_shash_update failed: %d", r); | 345 | DMERR("crypto_shash_update failed: %d", r); |
355 | return r; | 346 | return r; |
356 | } | 347 | } |
357 | offset += len; | 348 | |
358 | if (likely(offset == bv->bv_len)) { | 349 | bio_advance_iter(bio, &io->iter, bv.bv_len); |
359 | offset = 0; | 350 | } |
360 | vector++; | ||
361 | } | ||
362 | todo -= len; | ||
363 | } while (todo); | ||
364 | 351 | ||
365 | if (!v->version) { | 352 | if (!v->version) { |
366 | r = crypto_shash_update(desc, v->salt, v->salt_size); | 353 | r = crypto_shash_update(desc, v->salt, v->salt_size); |
@@ -383,8 +370,6 @@ test_block_hash: | |||
383 | return -EIO; | 370 | return -EIO; |
384 | } | 371 | } |
385 | } | 372 | } |
386 | BUG_ON(vector != io->io_vec_size); | ||
387 | BUG_ON(offset); | ||
388 | 373 | ||
389 | return 0; | 374 | return 0; |
390 | } | 375 | } |
@@ -400,9 +385,6 @@ static void verity_finish_io(struct dm_verity_io *io, int error) | |||
400 | bio->bi_end_io = io->orig_bi_end_io; | 385 | bio->bi_end_io = io->orig_bi_end_io; |
401 | bio->bi_private = io->orig_bi_private; | 386 | bio->bi_private = io->orig_bi_private; |
402 | 387 | ||
403 | if (io->io_vec != io->io_vec_inline) | ||
404 | mempool_free(io->io_vec, v->vec_mempool); | ||
405 | |||
406 | bio_endio(bio, error); | 388 | bio_endio(bio, error); |
407 | } | 389 | } |
408 | 390 | ||
@@ -519,13 +501,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) | |||
519 | 501 | ||
520 | bio->bi_end_io = verity_end_io; | 502 | bio->bi_end_io = verity_end_io; |
521 | bio->bi_private = io; | 503 | bio->bi_private = io; |
522 | io->io_vec_size = bio_segments(bio); | 504 | io->iter = bio->bi_iter; |
523 | if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE) | ||
524 | io->io_vec = io->io_vec_inline; | ||
525 | else | ||
526 | io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO); | ||
527 | memcpy(io->io_vec, __bio_iovec(bio), | ||
528 | io->io_vec_size * sizeof(struct bio_vec)); | ||
529 | 505 | ||
530 | verity_submit_prefetch(v, io); | 506 | verity_submit_prefetch(v, io); |
531 | 507 | ||
@@ -525,8 +525,17 @@ EXPORT_SYMBOL(bio_phys_segments); | |||
525 | */ | 525 | */ |
526 | void __bio_clone(struct bio *bio, struct bio *bio_src) | 526 | void __bio_clone(struct bio *bio, struct bio *bio_src) |
527 | { | 527 | { |
528 | memcpy(bio->bi_io_vec, bio_src->bi_io_vec, | 528 | if (bio_is_rw(bio_src)) { |
529 | bio_src->bi_max_vecs * sizeof(struct bio_vec)); | 529 | struct bio_vec bv; |
530 | struct bvec_iter iter; | ||
531 | |||
532 | bio_for_each_segment(bv, bio_src, iter) | ||
533 | bio->bi_io_vec[bio->bi_vcnt++] = bv; | ||
534 | } else if (bio_has_data(bio_src)) { | ||
535 | memcpy(bio->bi_io_vec, bio_src->bi_io_vec, | ||
536 | bio_src->bi_max_vecs * sizeof(struct bio_vec)); | ||
537 | bio->bi_vcnt = bio_src->bi_vcnt; | ||
538 | } | ||
530 | 539 | ||
531 | /* | 540 | /* |
532 | * most users will be overriding ->bi_bdev with a new target, | 541 | * most users will be overriding ->bi_bdev with a new target, |
@@ -535,7 +544,6 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
535 | bio->bi_bdev = bio_src->bi_bdev; | 544 | bio->bi_bdev = bio_src->bi_bdev; |
536 | bio->bi_flags |= 1 << BIO_CLONED; | 545 | bio->bi_flags |= 1 << BIO_CLONED; |
537 | bio->bi_rw = bio_src->bi_rw; | 546 | bio->bi_rw = bio_src->bi_rw; |
538 | bio->bi_vcnt = bio_src->bi_vcnt; | ||
539 | bio->bi_iter = bio_src->bi_iter; | 547 | bio->bi_iter = bio_src->bi_iter; |
540 | } | 548 | } |
541 | EXPORT_SYMBOL(__bio_clone); | 549 | EXPORT_SYMBOL(__bio_clone); |
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index f4b0aa3126f5..a68cbe59e6ad 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h | |||
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context); | |||
29 | 29 | ||
30 | enum dm_io_mem_type { | 30 | enum dm_io_mem_type { |
31 | DM_IO_PAGE_LIST,/* Page list */ | 31 | DM_IO_PAGE_LIST,/* Page list */ |
32 | DM_IO_BVEC, /* Bio vector */ | 32 | DM_IO_BIO, /* Bio vector */ |
33 | DM_IO_VMA, /* Virtual memory area */ | 33 | DM_IO_VMA, /* Virtual memory area */ |
34 | DM_IO_KMEM, /* Kernel memory */ | 34 | DM_IO_KMEM, /* Kernel memory */ |
35 | }; | 35 | }; |
@@ -41,7 +41,7 @@ struct dm_io_memory { | |||
41 | 41 | ||
42 | union { | 42 | union { |
43 | struct page_list *pl; | 43 | struct page_list *pl; |
44 | struct bio_vec *bvec; | 44 | struct bio *bio; |
45 | void *vma; | 45 | void *vma; |
46 | void *addr; | 46 | void *addr; |
47 | } ptr; | 47 | } ptr; |