diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-11-23 21:21:01 -0500 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-24 01:33:57 -0500 |
commit | 20d0189b1012a37d2533a87fb451f7852f2418d1 (patch) | |
tree | 5ceaa6cfc0e1f1cec423c6c9f5de72d49f2d63a1 /drivers/block/nvme-core.c | |
parent | ee67891bf132612feb7b999ee1f3350b40867cb4 (diff) |
block: Introduce new bio_split()
The new bio_split() can split arbitrary bios - it's not restricted to
single page bios, like the old bio_split() (previously renamed to
bio_pair_split()). It also has different semantics - it doesn't allocate
a struct bio_pair, leaving it up to the caller to handle completions.
Then convert the existing bio_pair_split() users to the new bio_split()
- and also nvme, which was open coding bio splitting.
(We have to take that BUG_ON() out of bio_integrity_trim() because this
bio_split() needs to use it, and there's no reason it has to be used on
bios marked as cloned; BIO_CLONED doesn't seem to have clearly
documented semantics anyways.)
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r-- | drivers/block/nvme-core.c | 106 |
1 files changed, 9 insertions, 97 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 5539d2920872..1f14ac403945 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, | |||
441 | return total_len; | 441 | return total_len; |
442 | } | 442 | } |
443 | 443 | ||
444 | struct nvme_bio_pair { | ||
445 | struct bio b1, b2, *parent; | ||
446 | struct bio_vec *bv1, *bv2; | ||
447 | int err; | ||
448 | atomic_t cnt; | ||
449 | }; | ||
450 | |||
451 | static void nvme_bio_pair_endio(struct bio *bio, int err) | ||
452 | { | ||
453 | struct nvme_bio_pair *bp = bio->bi_private; | ||
454 | |||
455 | if (err) | ||
456 | bp->err = err; | ||
457 | |||
458 | if (atomic_dec_and_test(&bp->cnt)) { | ||
459 | bio_endio(bp->parent, bp->err); | ||
460 | kfree(bp->bv1); | ||
461 | kfree(bp->bv2); | ||
462 | kfree(bp); | ||
463 | } | ||
464 | } | ||
465 | |||
466 | static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, | ||
467 | int len, int offset) | ||
468 | { | ||
469 | struct nvme_bio_pair *bp; | ||
470 | |||
471 | BUG_ON(len > bio->bi_iter.bi_size); | ||
472 | BUG_ON(idx > bio->bi_vcnt); | ||
473 | |||
474 | bp = kmalloc(sizeof(*bp), GFP_ATOMIC); | ||
475 | if (!bp) | ||
476 | return NULL; | ||
477 | bp->err = 0; | ||
478 | |||
479 | bp->b1 = *bio; | ||
480 | bp->b2 = *bio; | ||
481 | |||
482 | bp->b1.bi_iter.bi_size = len; | ||
483 | bp->b2.bi_iter.bi_size -= len; | ||
484 | bp->b1.bi_vcnt = idx; | ||
485 | bp->b2.bi_iter.bi_idx = idx; | ||
486 | bp->b2.bi_iter.bi_sector += len >> 9; | ||
487 | |||
488 | if (offset) { | ||
489 | bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), | ||
490 | GFP_ATOMIC); | ||
491 | if (!bp->bv1) | ||
492 | goto split_fail_1; | ||
493 | |||
494 | bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), | ||
495 | GFP_ATOMIC); | ||
496 | if (!bp->bv2) | ||
497 | goto split_fail_2; | ||
498 | |||
499 | memcpy(bp->bv1, bio->bi_io_vec, | ||
500 | bio->bi_max_vecs * sizeof(struct bio_vec)); | ||
501 | memcpy(bp->bv2, bio->bi_io_vec, | ||
502 | bio->bi_max_vecs * sizeof(struct bio_vec)); | ||
503 | |||
504 | bp->b1.bi_io_vec = bp->bv1; | ||
505 | bp->b2.bi_io_vec = bp->bv2; | ||
506 | bp->b2.bi_io_vec[idx].bv_offset += offset; | ||
507 | bp->b2.bi_io_vec[idx].bv_len -= offset; | ||
508 | bp->b1.bi_io_vec[idx].bv_len = offset; | ||
509 | bp->b1.bi_vcnt++; | ||
510 | } else | ||
511 | bp->bv1 = bp->bv2 = NULL; | ||
512 | |||
513 | bp->b1.bi_private = bp; | ||
514 | bp->b2.bi_private = bp; | ||
515 | |||
516 | bp->b1.bi_end_io = nvme_bio_pair_endio; | ||
517 | bp->b2.bi_end_io = nvme_bio_pair_endio; | ||
518 | |||
519 | bp->parent = bio; | ||
520 | atomic_set(&bp->cnt, 2); | ||
521 | |||
522 | return bp; | ||
523 | |||
524 | split_fail_2: | ||
525 | kfree(bp->bv1); | ||
526 | split_fail_1: | ||
527 | kfree(bp); | ||
528 | return NULL; | ||
529 | } | ||
530 | |||
531 | static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, | 444 | static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, |
532 | int idx, int len, int offset) | 445 | int len) |
533 | { | 446 | { |
534 | struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); | 447 | struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL); |
535 | if (!bp) | 448 | if (!split) |
536 | return -ENOMEM; | 449 | return -ENOMEM; |
537 | 450 | ||
451 | bio_chain(split, bio); | ||
452 | |||
538 | if (bio_list_empty(&nvmeq->sq_cong)) | 453 | if (bio_list_empty(&nvmeq->sq_cong)) |
539 | add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); | 454 | add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); |
540 | bio_list_add(&nvmeq->sq_cong, &bp->b1); | 455 | bio_list_add(&nvmeq->sq_cong, split); |
541 | bio_list_add(&nvmeq->sq_cong, &bp->b2); | 456 | bio_list_add(&nvmeq->sq_cong, bio); |
542 | 457 | ||
543 | return 0; | 458 | return 0; |
544 | } | 459 | } |
@@ -568,8 +483,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, | |||
568 | } else { | 483 | } else { |
569 | if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) | 484 | if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) |
570 | return nvme_split_and_submit(bio, nvmeq, | 485 | return nvme_split_and_submit(bio, nvmeq, |
571 | iter.bi_idx, | 486 | length); |
572 | length, 0); | ||
573 | 487 | ||
574 | sg = sg ? sg + 1 : iod->sg; | 488 | sg = sg ? sg + 1 : iod->sg; |
575 | sg_set_page(sg, bvec.bv_page, | 489 | sg_set_page(sg, bvec.bv_page, |
@@ -578,9 +492,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, | |||
578 | } | 492 | } |
579 | 493 | ||
580 | if (split_len - length < bvec.bv_len) | 494 | if (split_len - length < bvec.bv_len) |
581 | return nvme_split_and_submit(bio, nvmeq, iter.bi_idx, | 495 | return nvme_split_and_submit(bio, nvmeq, split_len); |
582 | split_len, | ||
583 | split_len - length); | ||
584 | length += bvec.bv_len; | 496 | length += bvec.bv_len; |
585 | bvprv = bvec; | 497 | bvprv = bvec; |
586 | first = 0; | 498 | first = 0; |