diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-04-11 06:56:49 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-21 03:50:08 -0400 |
commit | c5dec1c3034f1ae3503efbf641ff3b0273b64797 (patch) | |
tree | d8c2d81ff2d944a0ba43e6ef7284482898af8b3b | |
parent | 476a4813cfddf7cf159956cc0e2d3c830c1507e3 (diff) |
block: convert bio_copy_user to bio_copy_user_iov
This patch enables bio_copy_user to take struct sg_iovec (renamed
bio_copy_user_iov). bio_copy_user uses bio_copy_user_iov internally as
bio_map_user uses bio_map_user_iov.
The major changes are:
- adds sg_iovec array to struct bio_map_data
- adds __bio_copy_iov that copy data between bio and
sg_iovec. bio_copy_user_iov and bio_uncopy_user use it.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Tejun Heo <htejun@gmail.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | fs/bio.c | 158 | ||||
-rw-r--r-- | include/linux/bio.h | 2 |
2 files changed, 119 insertions, 41 deletions
@@ -444,22 +444,27 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
444 | 444 | ||
445 | struct bio_map_data { | 445 | struct bio_map_data { |
446 | struct bio_vec *iovecs; | 446 | struct bio_vec *iovecs; |
447 | void __user *userptr; | 447 | int nr_sgvecs; |
448 | struct sg_iovec *sgvecs; | ||
448 | }; | 449 | }; |
449 | 450 | ||
450 | static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio) | 451 | static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, |
452 | struct sg_iovec *iov, int iov_count) | ||
451 | { | 453 | { |
452 | memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); | 454 | memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); |
455 | memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); | ||
456 | bmd->nr_sgvecs = iov_count; | ||
453 | bio->bi_private = bmd; | 457 | bio->bi_private = bmd; |
454 | } | 458 | } |
455 | 459 | ||
456 | static void bio_free_map_data(struct bio_map_data *bmd) | 460 | static void bio_free_map_data(struct bio_map_data *bmd) |
457 | { | 461 | { |
458 | kfree(bmd->iovecs); | 462 | kfree(bmd->iovecs); |
463 | kfree(bmd->sgvecs); | ||
459 | kfree(bmd); | 464 | kfree(bmd); |
460 | } | 465 | } |
461 | 466 | ||
462 | static struct bio_map_data *bio_alloc_map_data(int nr_segs) | 467 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count) |
463 | { | 468 | { |
464 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); | 469 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); |
465 | 470 | ||
@@ -467,13 +472,71 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs) | |||
467 | return NULL; | 472 | return NULL; |
468 | 473 | ||
469 | bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); | 474 | bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); |
470 | if (bmd->iovecs) | 475 | if (!bmd->iovecs) { |
476 | kfree(bmd); | ||
477 | return NULL; | ||
478 | } | ||
479 | |||
480 | bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL); | ||
481 | if (bmd->sgvecs) | ||
471 | return bmd; | 482 | return bmd; |
472 | 483 | ||
484 | kfree(bmd->iovecs); | ||
473 | kfree(bmd); | 485 | kfree(bmd); |
474 | return NULL; | 486 | return NULL; |
475 | } | 487 | } |
476 | 488 | ||
489 | static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, | ||
490 | int uncopy) | ||
491 | { | ||
492 | int ret = 0, i; | ||
493 | struct bio_vec *bvec; | ||
494 | int iov_idx = 0; | ||
495 | unsigned int iov_off = 0; | ||
496 | int read = bio_data_dir(bio) == READ; | ||
497 | |||
498 | __bio_for_each_segment(bvec, bio, i, 0) { | ||
499 | char *bv_addr = page_address(bvec->bv_page); | ||
500 | unsigned int bv_len = bvec->bv_len; | ||
501 | |||
502 | while (bv_len && iov_idx < iov_count) { | ||
503 | unsigned int bytes; | ||
504 | char *iov_addr; | ||
505 | |||
506 | bytes = min_t(unsigned int, | ||
507 | iov[iov_idx].iov_len - iov_off, bv_len); | ||
508 | iov_addr = iov[iov_idx].iov_base + iov_off; | ||
509 | |||
510 | if (!ret) { | ||
511 | if (!read && !uncopy) | ||
512 | ret = copy_from_user(bv_addr, iov_addr, | ||
513 | bytes); | ||
514 | if (read && uncopy) | ||
515 | ret = copy_to_user(iov_addr, bv_addr, | ||
516 | bytes); | ||
517 | |||
518 | if (ret) | ||
519 | ret = -EFAULT; | ||
520 | } | ||
521 | |||
522 | bv_len -= bytes; | ||
523 | bv_addr += bytes; | ||
524 | iov_addr += bytes; | ||
525 | iov_off += bytes; | ||
526 | |||
527 | if (iov[iov_idx].iov_len == iov_off) { | ||
528 | iov_idx++; | ||
529 | iov_off = 0; | ||
530 | } | ||
531 | } | ||
532 | |||
533 | if (uncopy) | ||
534 | __free_page(bvec->bv_page); | ||
535 | } | ||
536 | |||
537 | return ret; | ||
538 | } | ||
539 | |||
477 | /** | 540 | /** |
478 | * bio_uncopy_user - finish previously mapped bio | 541 | * bio_uncopy_user - finish previously mapped bio |
479 | * @bio: bio being terminated | 542 | * @bio: bio being terminated |
@@ -484,55 +547,56 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs) | |||
484 | int bio_uncopy_user(struct bio *bio) | 547 | int bio_uncopy_user(struct bio *bio) |
485 | { | 548 | { |
486 | struct bio_map_data *bmd = bio->bi_private; | 549 | struct bio_map_data *bmd = bio->bi_private; |
487 | const int read = bio_data_dir(bio) == READ; | 550 | int ret; |
488 | struct bio_vec *bvec; | ||
489 | int i, ret = 0; | ||
490 | 551 | ||
491 | __bio_for_each_segment(bvec, bio, i, 0) { | 552 | ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1); |
492 | char *addr = page_address(bvec->bv_page); | ||
493 | unsigned int len = bmd->iovecs[i].bv_len; | ||
494 | 553 | ||
495 | if (read && !ret && copy_to_user(bmd->userptr, addr, len)) | ||
496 | ret = -EFAULT; | ||
497 | |||
498 | __free_page(bvec->bv_page); | ||
499 | bmd->userptr += len; | ||
500 | } | ||
501 | bio_free_map_data(bmd); | 554 | bio_free_map_data(bmd); |
502 | bio_put(bio); | 555 | bio_put(bio); |
503 | return ret; | 556 | return ret; |
504 | } | 557 | } |
505 | 558 | ||
506 | /** | 559 | /** |
507 | * bio_copy_user - copy user data to bio | 560 | * bio_copy_user_iov - copy user data to bio |
508 | * @q: destination block queue | 561 | * @q: destination block queue |
509 | * @uaddr: start of user address | 562 | * @iov: the iovec. |
510 | * @len: length in bytes | 563 | * @iov_count: number of elements in the iovec |
511 | * @write_to_vm: bool indicating writing to pages or not | 564 | * @write_to_vm: bool indicating writing to pages or not |
512 | * | 565 | * |
513 | * Prepares and returns a bio for indirect user io, bouncing data | 566 | * Prepares and returns a bio for indirect user io, bouncing data |
514 | * to/from kernel pages as necessary. Must be paired with | 567 | * to/from kernel pages as necessary. Must be paired with |
515 | * call bio_uncopy_user() on io completion. | 568 | * call bio_uncopy_user() on io completion. |
516 | */ | 569 | */ |
517 | struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, | 570 | struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, |
518 | unsigned int len, int write_to_vm) | 571 | int iov_count, int write_to_vm) |
519 | { | 572 | { |
520 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
521 | unsigned long start = uaddr >> PAGE_SHIFT; | ||
522 | struct bio_map_data *bmd; | 573 | struct bio_map_data *bmd; |
523 | struct bio_vec *bvec; | 574 | struct bio_vec *bvec; |
524 | struct page *page; | 575 | struct page *page; |
525 | struct bio *bio; | 576 | struct bio *bio; |
526 | int i, ret; | 577 | int i, ret; |
578 | int nr_pages = 0; | ||
579 | unsigned int len = 0; | ||
527 | 580 | ||
528 | bmd = bio_alloc_map_data(end - start); | 581 | for (i = 0; i < iov_count; i++) { |
582 | unsigned long uaddr; | ||
583 | unsigned long end; | ||
584 | unsigned long start; | ||
585 | |||
586 | uaddr = (unsigned long)iov[i].iov_base; | ||
587 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
588 | start = uaddr >> PAGE_SHIFT; | ||
589 | |||
590 | nr_pages += end - start; | ||
591 | len += iov[i].iov_len; | ||
592 | } | ||
593 | |||
594 | bmd = bio_alloc_map_data(nr_pages, iov_count); | ||
529 | if (!bmd) | 595 | if (!bmd) |
530 | return ERR_PTR(-ENOMEM); | 596 | return ERR_PTR(-ENOMEM); |
531 | 597 | ||
532 | bmd->userptr = (void __user *) uaddr; | ||
533 | |||
534 | ret = -ENOMEM; | 598 | ret = -ENOMEM; |
535 | bio = bio_alloc(GFP_KERNEL, end - start); | 599 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
536 | if (!bio) | 600 | if (!bio) |
537 | goto out_bmd; | 601 | goto out_bmd; |
538 | 602 | ||
@@ -564,22 +628,12 @@ struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, | |||
564 | * success | 628 | * success |
565 | */ | 629 | */ |
566 | if (!write_to_vm) { | 630 | if (!write_to_vm) { |
567 | char __user *p = (char __user *) uaddr; | 631 | ret = __bio_copy_iov(bio, iov, iov_count, 0); |
568 | 632 | if (ret) | |
569 | /* | 633 | goto cleanup; |
570 | * for a write, copy in data to kernel pages | ||
571 | */ | ||
572 | ret = -EFAULT; | ||
573 | bio_for_each_segment(bvec, bio, i) { | ||
574 | char *addr = page_address(bvec->bv_page); | ||
575 | |||
576 | if (copy_from_user(addr, p, bvec->bv_len)) | ||
577 | goto cleanup; | ||
578 | p += bvec->bv_len; | ||
579 | } | ||
580 | } | 634 | } |
581 | 635 | ||
582 | bio_set_map_data(bmd, bio); | 636 | bio_set_map_data(bmd, bio, iov, iov_count); |
583 | return bio; | 637 | return bio; |
584 | cleanup: | 638 | cleanup: |
585 | bio_for_each_segment(bvec, bio, i) | 639 | bio_for_each_segment(bvec, bio, i) |
@@ -591,6 +645,28 @@ out_bmd: | |||
591 | return ERR_PTR(ret); | 645 | return ERR_PTR(ret); |
592 | } | 646 | } |
593 | 647 | ||
648 | /** | ||
649 | * bio_copy_user - copy user data to bio | ||
650 | * @q: destination block queue | ||
651 | * @uaddr: start of user address | ||
652 | * @len: length in bytes | ||
653 | * @write_to_vm: bool indicating writing to pages or not | ||
654 | * | ||
655 | * Prepares and returns a bio for indirect user io, bouncing data | ||
656 | * to/from kernel pages as necessary. Must be paired with | ||
657 | * call bio_uncopy_user() on io completion. | ||
658 | */ | ||
659 | struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, | ||
660 | unsigned int len, int write_to_vm) | ||
661 | { | ||
662 | struct sg_iovec iov; | ||
663 | |||
664 | iov.iov_base = (void __user *)uaddr; | ||
665 | iov.iov_len = len; | ||
666 | |||
667 | return bio_copy_user_iov(q, &iov, 1, write_to_vm); | ||
668 | } | ||
669 | |||
594 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 670 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
595 | struct block_device *bdev, | 671 | struct block_device *bdev, |
596 | struct sg_iovec *iov, int iov_count, | 672 | struct sg_iovec *iov, int iov_count, |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 4c59bdccd3ee..d259690863fb 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -327,6 +327,8 @@ extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, | |||
327 | extern void bio_set_pages_dirty(struct bio *bio); | 327 | extern void bio_set_pages_dirty(struct bio *bio); |
328 | extern void bio_check_pages_dirty(struct bio *bio); | 328 | extern void bio_check_pages_dirty(struct bio *bio); |
329 | extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); | 329 | extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); |
330 | extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, | ||
331 | int, int); | ||
330 | extern int bio_uncopy_user(struct bio *); | 332 | extern int bio_uncopy_user(struct bio *); |
331 | void zero_fill_bio(struct bio *bio); | 333 | void zero_fill_bio(struct bio *bio); |
332 | 334 | ||