diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 13:30:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 13:30:18 -0400 |
commit | 16b9057804c02e2d351e9c8f606e909b43cbd9e7 (patch) | |
tree | a3ac6e1d9d57a8abf4267e5ead3f2de1309335eb /fs/nfs/direct.c | |
parent | 5c02c392cd2320e8d612376d6b72b6548a680923 (diff) | |
parent | c2338f2dc7c1e9f6202f370c64ffd7f44f3d4b51 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs updates from Al Viro:
"This the bunch that sat in -next + lock_parent() fix. This is the
minimal set; there's more pending stuff.
In particular, I really hope to get acct.c fixes merged this cycle -
we need that to deal sanely with delayed-mntput stuff. In the next
pile, hopefully - that series is fairly short and localized
(kernel/acct.c, fs/super.c and fs/namespace.c). In this pile: more
iov_iter work. Most of prereqs for ->splice_write with sane locking
order are there and Kent's dio rewrite would also fit nicely on top of
this pile"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (70 commits)
lock_parent: don't step on stale ->d_parent of all-but-freed one
kill generic_file_splice_write()
ceph: switch to iter_file_splice_write()
shmem: switch to iter_file_splice_write()
nfs: switch to iter_splice_write_file()
fs/splice.c: remove unneeded exports
ocfs2: switch to iter_file_splice_write()
->splice_write() via ->write_iter()
bio_vec-backed iov_iter
optimize copy_page_{to,from}_iter()
bury generic_file_aio_{read,write}
lustre: get rid of messing with iovecs
ceph: switch to ->write_iter()
ceph_sync_direct_write: stop poking into iov_iter guts
ceph_sync_read: stop poking into iov_iter guts
new helper: copy_page_from_iter()
fuse: switch to ->write_iter()
btrfs: switch to ->write_iter()
ocfs2: switch to ->write_iter()
xfs: switch to ->write_iter()
...
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r-- | fs/nfs/direct.c | 326 |
1 files changed, 102 insertions, 224 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 4ad7bc388679..8f98138cbc43 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -212,20 +212,20 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, | |||
212 | * shunt off direct read and write requests before the VFS gets them, | 212 | * shunt off direct read and write requests before the VFS gets them, |
213 | * so this method is only ever called for swap. | 213 | * so this method is only ever called for swap. |
214 | */ | 214 | */ |
215 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) | 215 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) |
216 | { | 216 | { |
217 | #ifndef CONFIG_NFS_SWAP | 217 | #ifndef CONFIG_NFS_SWAP |
218 | dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", | 218 | dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", |
219 | iocb->ki_filp, (long long) pos, nr_segs); | 219 | iocb->ki_filp, (long long) pos, iter->nr_segs); |
220 | 220 | ||
221 | return -EINVAL; | 221 | return -EINVAL; |
222 | #else | 222 | #else |
223 | VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE); | 223 | VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE); |
224 | 224 | ||
225 | if (rw == READ || rw == KERNEL_READ) | 225 | if (rw == READ || rw == KERNEL_READ) |
226 | return nfs_file_direct_read(iocb, iov, nr_segs, pos, | 226 | return nfs_file_direct_read(iocb, iter, pos, |
227 | rw == READ ? true : false); | 227 | rw == READ ? true : false); |
228 | return nfs_file_direct_write(iocb, iov, nr_segs, pos, | 228 | return nfs_file_direct_write(iocb, iter, pos, |
229 | rw == WRITE ? true : false); | 229 | rw == WRITE ? true : false); |
230 | #endif /* CONFIG_NFS_SWAP */ | 230 | #endif /* CONFIG_NFS_SWAP */ |
231 | } | 231 | } |
@@ -414,60 +414,37 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = { | |||
414 | * handled automatically by nfs_direct_read_result(). Otherwise, if | 414 | * handled automatically by nfs_direct_read_result(). Otherwise, if |
415 | * no requests have been sent, just return an error. | 415 | * no requests have been sent, just return an error. |
416 | */ | 416 | */ |
417 | static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc, | ||
418 | const struct iovec *iov, | ||
419 | loff_t pos, bool uio) | ||
420 | { | ||
421 | struct nfs_direct_req *dreq = desc->pg_dreq; | ||
422 | struct nfs_open_context *ctx = dreq->ctx; | ||
423 | struct inode *inode = ctx->dentry->d_inode; | ||
424 | unsigned long user_addr = (unsigned long)iov->iov_base; | ||
425 | size_t count = iov->iov_len; | ||
426 | size_t rsize = NFS_SERVER(inode)->rsize; | ||
427 | unsigned int pgbase; | ||
428 | int result; | ||
429 | ssize_t started = 0; | ||
430 | struct page **pagevec = NULL; | ||
431 | unsigned int npages; | ||
432 | |||
433 | do { | ||
434 | size_t bytes; | ||
435 | int i; | ||
436 | 417 | ||
437 | pgbase = user_addr & ~PAGE_MASK; | 418 | static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, |
438 | bytes = min(max_t(size_t, rsize, PAGE_SIZE), count); | 419 | struct iov_iter *iter, |
420 | loff_t pos) | ||
421 | { | ||
422 | struct nfs_pageio_descriptor desc; | ||
423 | struct inode *inode = dreq->inode; | ||
424 | ssize_t result = -EINVAL; | ||
425 | size_t requested_bytes = 0; | ||
426 | size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE); | ||
439 | 427 | ||
440 | result = -ENOMEM; | 428 | nfs_pageio_init_read(&desc, dreq->inode, false, |
441 | npages = nfs_page_array_len(pgbase, bytes); | 429 | &nfs_direct_read_completion_ops); |
442 | if (!pagevec) | 430 | get_dreq(dreq); |
443 | pagevec = kmalloc(npages * sizeof(struct page *), | 431 | desc.pg_dreq = dreq; |
444 | GFP_KERNEL); | 432 | atomic_inc(&inode->i_dio_count); |
445 | if (!pagevec) | ||
446 | break; | ||
447 | if (uio) { | ||
448 | down_read(¤t->mm->mmap_sem); | ||
449 | result = get_user_pages(current, current->mm, user_addr, | ||
450 | npages, 1, 0, pagevec, NULL); | ||
451 | up_read(¤t->mm->mmap_sem); | ||
452 | if (result < 0) | ||
453 | break; | ||
454 | } else { | ||
455 | WARN_ON(npages != 1); | ||
456 | result = get_kernel_page(user_addr, 1, pagevec); | ||
457 | if (WARN_ON(result != 1)) | ||
458 | break; | ||
459 | } | ||
460 | 433 | ||
461 | if ((unsigned)result < npages) { | 434 | while (iov_iter_count(iter)) { |
462 | bytes = result * PAGE_SIZE; | 435 | struct page **pagevec; |
463 | if (bytes <= pgbase) { | 436 | size_t bytes; |
464 | nfs_direct_release_pages(pagevec, result); | 437 | size_t pgbase; |
465 | break; | 438 | unsigned npages, i; |
466 | } | ||
467 | bytes -= pgbase; | ||
468 | npages = result; | ||
469 | } | ||
470 | 439 | ||
440 | result = iov_iter_get_pages_alloc(iter, &pagevec, | ||
441 | rsize, &pgbase); | ||
442 | if (result < 0) | ||
443 | break; | ||
444 | |||
445 | bytes = result; | ||
446 | iov_iter_advance(iter, bytes); | ||
447 | npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; | ||
471 | for (i = 0; i < npages; i++) { | 448 | for (i = 0; i < npages; i++) { |
472 | struct nfs_page *req; | 449 | struct nfs_page *req; |
473 | unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); | 450 | unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); |
@@ -480,56 +457,21 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de | |||
480 | } | 457 | } |
481 | req->wb_index = pos >> PAGE_SHIFT; | 458 | req->wb_index = pos >> PAGE_SHIFT; |
482 | req->wb_offset = pos & ~PAGE_MASK; | 459 | req->wb_offset = pos & ~PAGE_MASK; |
483 | if (!nfs_pageio_add_request(desc, req)) { | 460 | if (!nfs_pageio_add_request(&desc, req)) { |
484 | result = desc->pg_error; | 461 | result = desc.pg_error; |
485 | nfs_release_request(req); | 462 | nfs_release_request(req); |
486 | break; | 463 | break; |
487 | } | 464 | } |
488 | pgbase = 0; | 465 | pgbase = 0; |
489 | bytes -= req_len; | 466 | bytes -= req_len; |
490 | started += req_len; | 467 | requested_bytes += req_len; |
491 | user_addr += req_len; | ||
492 | pos += req_len; | 468 | pos += req_len; |
493 | count -= req_len; | ||
494 | dreq->bytes_left -= req_len; | 469 | dreq->bytes_left -= req_len; |
495 | } | 470 | } |
496 | /* The nfs_page now hold references to these pages */ | ||
497 | nfs_direct_release_pages(pagevec, npages); | 471 | nfs_direct_release_pages(pagevec, npages); |
498 | } while (count != 0 && result >= 0); | 472 | kvfree(pagevec); |
499 | |||
500 | kfree(pagevec); | ||
501 | |||
502 | if (started) | ||
503 | return started; | ||
504 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
505 | } | ||
506 | |||
507 | static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | ||
508 | const struct iovec *iov, | ||
509 | unsigned long nr_segs, | ||
510 | loff_t pos, bool uio) | ||
511 | { | ||
512 | struct nfs_pageio_descriptor desc; | ||
513 | struct inode *inode = dreq->inode; | ||
514 | ssize_t result = -EINVAL; | ||
515 | size_t requested_bytes = 0; | ||
516 | unsigned long seg; | ||
517 | |||
518 | nfs_pageio_init_read(&desc, dreq->inode, false, | ||
519 | &nfs_direct_read_completion_ops); | ||
520 | get_dreq(dreq); | ||
521 | desc.pg_dreq = dreq; | ||
522 | atomic_inc(&inode->i_dio_count); | ||
523 | |||
524 | for (seg = 0; seg < nr_segs; seg++) { | ||
525 | const struct iovec *vec = &iov[seg]; | ||
526 | result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio); | ||
527 | if (result < 0) | 473 | if (result < 0) |
528 | break; | 474 | break; |
529 | requested_bytes += result; | ||
530 | if ((size_t)result < vec->iov_len) | ||
531 | break; | ||
532 | pos += vec->iov_len; | ||
533 | } | 475 | } |
534 | 476 | ||
535 | nfs_pageio_complete(&desc); | 477 | nfs_pageio_complete(&desc); |
@@ -552,8 +494,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | |||
552 | /** | 494 | /** |
553 | * nfs_file_direct_read - file direct read operation for NFS files | 495 | * nfs_file_direct_read - file direct read operation for NFS files |
554 | * @iocb: target I/O control block | 496 | * @iocb: target I/O control block |
555 | * @iov: vector of user buffers into which to read data | 497 | * @iter: vector of user buffers into which to read data |
556 | * @nr_segs: size of iov vector | ||
557 | * @pos: byte offset in file where reading starts | 498 | * @pos: byte offset in file where reading starts |
558 | * | 499 | * |
559 | * We use this function for direct reads instead of calling | 500 | * We use this function for direct reads instead of calling |
@@ -570,8 +511,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | |||
570 | * client must read the updated atime from the server back into its | 511 | * client must read the updated atime from the server back into its |
571 | * cache. | 512 | * cache. |
572 | */ | 513 | */ |
573 | ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | 514 | ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, |
574 | unsigned long nr_segs, loff_t pos, bool uio) | 515 | loff_t pos, bool uio) |
575 | { | 516 | { |
576 | struct file *file = iocb->ki_filp; | 517 | struct file *file = iocb->ki_filp; |
577 | struct address_space *mapping = file->f_mapping; | 518 | struct address_space *mapping = file->f_mapping; |
@@ -579,9 +520,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
579 | struct nfs_direct_req *dreq; | 520 | struct nfs_direct_req *dreq; |
580 | struct nfs_lock_context *l_ctx; | 521 | struct nfs_lock_context *l_ctx; |
581 | ssize_t result = -EINVAL; | 522 | ssize_t result = -EINVAL; |
582 | size_t count; | 523 | size_t count = iov_iter_count(iter); |
583 | |||
584 | count = iov_length(iov, nr_segs); | ||
585 | nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); | 524 | nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); |
586 | 525 | ||
587 | dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", | 526 | dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", |
@@ -604,7 +543,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
604 | goto out_unlock; | 543 | goto out_unlock; |
605 | 544 | ||
606 | dreq->inode = inode; | 545 | dreq->inode = inode; |
607 | dreq->bytes_left = iov_length(iov, nr_segs); | 546 | dreq->bytes_left = count; |
608 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | 547 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); |
609 | l_ctx = nfs_get_lock_context(dreq->ctx); | 548 | l_ctx = nfs_get_lock_context(dreq->ctx); |
610 | if (IS_ERR(l_ctx)) { | 549 | if (IS_ERR(l_ctx)) { |
@@ -615,8 +554,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
615 | if (!is_sync_kiocb(iocb)) | 554 | if (!is_sync_kiocb(iocb)) |
616 | dreq->iocb = iocb; | 555 | dreq->iocb = iocb; |
617 | 556 | ||
618 | NFS_I(inode)->read_io += iov_length(iov, nr_segs); | 557 | NFS_I(inode)->read_io += count; |
619 | result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio); | 558 | result = nfs_direct_read_schedule_iovec(dreq, iter, pos); |
620 | 559 | ||
621 | mutex_unlock(&inode->i_mutex); | 560 | mutex_unlock(&inode->i_mutex); |
622 | 561 | ||
@@ -772,108 +711,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
772 | } | 711 | } |
773 | #endif | 712 | #endif |
774 | 713 | ||
775 | /* | ||
776 | * NB: Return the value of the first error return code. Subsequent | ||
777 | * errors after the first one are ignored. | ||
778 | */ | ||
779 | /* | ||
780 | * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE | ||
781 | * operation. If nfs_writedata_alloc() or get_user_pages() fails, | ||
782 | * bail and stop sending more writes. Write length accounting is | ||
783 | * handled automatically by nfs_direct_write_result(). Otherwise, if | ||
784 | * no requests have been sent, just return an error. | ||
785 | */ | ||
786 | static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc, | ||
787 | const struct iovec *iov, | ||
788 | loff_t pos, bool uio) | ||
789 | { | ||
790 | struct nfs_direct_req *dreq = desc->pg_dreq; | ||
791 | struct nfs_open_context *ctx = dreq->ctx; | ||
792 | struct inode *inode = ctx->dentry->d_inode; | ||
793 | unsigned long user_addr = (unsigned long)iov->iov_base; | ||
794 | size_t count = iov->iov_len; | ||
795 | size_t wsize = NFS_SERVER(inode)->wsize; | ||
796 | unsigned int pgbase; | ||
797 | int result; | ||
798 | ssize_t started = 0; | ||
799 | struct page **pagevec = NULL; | ||
800 | unsigned int npages; | ||
801 | |||
802 | do { | ||
803 | size_t bytes; | ||
804 | int i; | ||
805 | |||
806 | pgbase = user_addr & ~PAGE_MASK; | ||
807 | bytes = min(max_t(size_t, wsize, PAGE_SIZE), count); | ||
808 | |||
809 | result = -ENOMEM; | ||
810 | npages = nfs_page_array_len(pgbase, bytes); | ||
811 | if (!pagevec) | ||
812 | pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); | ||
813 | if (!pagevec) | ||
814 | break; | ||
815 | |||
816 | if (uio) { | ||
817 | down_read(¤t->mm->mmap_sem); | ||
818 | result = get_user_pages(current, current->mm, user_addr, | ||
819 | npages, 0, 0, pagevec, NULL); | ||
820 | up_read(¤t->mm->mmap_sem); | ||
821 | if (result < 0) | ||
822 | break; | ||
823 | } else { | ||
824 | WARN_ON(npages != 1); | ||
825 | result = get_kernel_page(user_addr, 0, pagevec); | ||
826 | if (WARN_ON(result != 1)) | ||
827 | break; | ||
828 | } | ||
829 | |||
830 | if ((unsigned)result < npages) { | ||
831 | bytes = result * PAGE_SIZE; | ||
832 | if (bytes <= pgbase) { | ||
833 | nfs_direct_release_pages(pagevec, result); | ||
834 | break; | ||
835 | } | ||
836 | bytes -= pgbase; | ||
837 | npages = result; | ||
838 | } | ||
839 | |||
840 | for (i = 0; i < npages; i++) { | ||
841 | struct nfs_page *req; | ||
842 | unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); | ||
843 | |||
844 | req = nfs_create_request(dreq->ctx, pagevec[i], NULL, | ||
845 | pgbase, req_len); | ||
846 | if (IS_ERR(req)) { | ||
847 | result = PTR_ERR(req); | ||
848 | break; | ||
849 | } | ||
850 | nfs_lock_request(req); | ||
851 | req->wb_index = pos >> PAGE_SHIFT; | ||
852 | req->wb_offset = pos & ~PAGE_MASK; | ||
853 | if (!nfs_pageio_add_request(desc, req)) { | ||
854 | result = desc->pg_error; | ||
855 | nfs_unlock_and_release_request(req); | ||
856 | break; | ||
857 | } | ||
858 | pgbase = 0; | ||
859 | bytes -= req_len; | ||
860 | started += req_len; | ||
861 | user_addr += req_len; | ||
862 | pos += req_len; | ||
863 | count -= req_len; | ||
864 | dreq->bytes_left -= req_len; | ||
865 | } | ||
866 | /* The nfs_page now hold references to these pages */ | ||
867 | nfs_direct_release_pages(pagevec, npages); | ||
868 | } while (count != 0 && result >= 0); | ||
869 | |||
870 | kfree(pagevec); | ||
871 | |||
872 | if (started) | ||
873 | return started; | ||
874 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
875 | } | ||
876 | |||
877 | static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | 714 | static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) |
878 | { | 715 | { |
879 | struct nfs_direct_req *dreq = hdr->dreq; | 716 | struct nfs_direct_req *dreq = hdr->dreq; |
@@ -956,16 +793,27 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { | |||
956 | .completion = nfs_direct_write_completion, | 793 | .completion = nfs_direct_write_completion, |
957 | }; | 794 | }; |
958 | 795 | ||
796 | |||
797 | /* | ||
798 | * NB: Return the value of the first error return code. Subsequent | ||
799 | * errors after the first one are ignored. | ||
800 | */ | ||
801 | /* | ||
802 | * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE | ||
803 | * operation. If nfs_writedata_alloc() or get_user_pages() fails, | ||
804 | * bail and stop sending more writes. Write length accounting is | ||
805 | * handled automatically by nfs_direct_write_result(). Otherwise, if | ||
806 | * no requests have been sent, just return an error. | ||
807 | */ | ||
959 | static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | 808 | static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, |
960 | const struct iovec *iov, | 809 | struct iov_iter *iter, |
961 | unsigned long nr_segs, | 810 | loff_t pos) |
962 | loff_t pos, bool uio) | ||
963 | { | 811 | { |
964 | struct nfs_pageio_descriptor desc; | 812 | struct nfs_pageio_descriptor desc; |
965 | struct inode *inode = dreq->inode; | 813 | struct inode *inode = dreq->inode; |
966 | ssize_t result = 0; | 814 | ssize_t result = 0; |
967 | size_t requested_bytes = 0; | 815 | size_t requested_bytes = 0; |
968 | unsigned long seg; | 816 | size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); |
969 | 817 | ||
970 | nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false, | 818 | nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false, |
971 | &nfs_direct_write_completion_ops); | 819 | &nfs_direct_write_completion_ops); |
@@ -973,16 +821,49 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | |||
973 | get_dreq(dreq); | 821 | get_dreq(dreq); |
974 | atomic_inc(&inode->i_dio_count); | 822 | atomic_inc(&inode->i_dio_count); |
975 | 823 | ||
976 | NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs); | 824 | NFS_I(inode)->write_io += iov_iter_count(iter); |
977 | for (seg = 0; seg < nr_segs; seg++) { | 825 | while (iov_iter_count(iter)) { |
978 | const struct iovec *vec = &iov[seg]; | 826 | struct page **pagevec; |
979 | result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio); | 827 | size_t bytes; |
828 | size_t pgbase; | ||
829 | unsigned npages, i; | ||
830 | |||
831 | result = iov_iter_get_pages_alloc(iter, &pagevec, | ||
832 | wsize, &pgbase); | ||
980 | if (result < 0) | 833 | if (result < 0) |
981 | break; | 834 | break; |
982 | requested_bytes += result; | 835 | |
983 | if ((size_t)result < vec->iov_len) | 836 | bytes = result; |
837 | iov_iter_advance(iter, bytes); | ||
838 | npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; | ||
839 | for (i = 0; i < npages; i++) { | ||
840 | struct nfs_page *req; | ||
841 | unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); | ||
842 | |||
843 | req = nfs_create_request(dreq->ctx, pagevec[i], NULL, | ||
844 | pgbase, req_len); | ||
845 | if (IS_ERR(req)) { | ||
846 | result = PTR_ERR(req); | ||
847 | break; | ||
848 | } | ||
849 | nfs_lock_request(req); | ||
850 | req->wb_index = pos >> PAGE_SHIFT; | ||
851 | req->wb_offset = pos & ~PAGE_MASK; | ||
852 | if (!nfs_pageio_add_request(&desc, req)) { | ||
853 | result = desc.pg_error; | ||
854 | nfs_unlock_and_release_request(req); | ||
855 | break; | ||
856 | } | ||
857 | pgbase = 0; | ||
858 | bytes -= req_len; | ||
859 | requested_bytes += req_len; | ||
860 | pos += req_len; | ||
861 | dreq->bytes_left -= req_len; | ||
862 | } | ||
863 | nfs_direct_release_pages(pagevec, npages); | ||
864 | kvfree(pagevec); | ||
865 | if (result < 0) | ||
984 | break; | 866 | break; |
985 | pos += vec->iov_len; | ||
986 | } | 867 | } |
987 | nfs_pageio_complete(&desc); | 868 | nfs_pageio_complete(&desc); |
988 | 869 | ||
@@ -1004,8 +885,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | |||
1004 | /** | 885 | /** |
1005 | * nfs_file_direct_write - file direct write operation for NFS files | 886 | * nfs_file_direct_write - file direct write operation for NFS files |
1006 | * @iocb: target I/O control block | 887 | * @iocb: target I/O control block |
1007 | * @iov: vector of user buffers from which to write data | 888 | * @iter: vector of user buffers from which to write data |
1008 | * @nr_segs: size of iov vector | ||
1009 | * @pos: byte offset in file where writing starts | 889 | * @pos: byte offset in file where writing starts |
1010 | * | 890 | * |
1011 | * We use this function for direct writes instead of calling | 891 | * We use this function for direct writes instead of calling |
@@ -1023,8 +903,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | |||
1023 | * Note that O_APPEND is not supported for NFS direct writes, as there | 903 | * Note that O_APPEND is not supported for NFS direct writes, as there |
1024 | * is no atomic O_APPEND write facility in the NFS protocol. | 904 | * is no atomic O_APPEND write facility in the NFS protocol. |
1025 | */ | 905 | */ |
1026 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | 906 | ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, |
1027 | unsigned long nr_segs, loff_t pos, bool uio) | 907 | loff_t pos, bool uio) |
1028 | { | 908 | { |
1029 | ssize_t result = -EINVAL; | 909 | ssize_t result = -EINVAL; |
1030 | struct file *file = iocb->ki_filp; | 910 | struct file *file = iocb->ki_filp; |
@@ -1033,9 +913,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
1033 | struct nfs_direct_req *dreq; | 913 | struct nfs_direct_req *dreq; |
1034 | struct nfs_lock_context *l_ctx; | 914 | struct nfs_lock_context *l_ctx; |
1035 | loff_t end; | 915 | loff_t end; |
1036 | size_t count; | 916 | size_t count = iov_iter_count(iter); |
1037 | |||
1038 | count = iov_length(iov, nr_segs); | ||
1039 | end = (pos + count - 1) >> PAGE_CACHE_SHIFT; | 917 | end = (pos + count - 1) >> PAGE_CACHE_SHIFT; |
1040 | 918 | ||
1041 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); | 919 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); |
@@ -1086,7 +964,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
1086 | if (!is_sync_kiocb(iocb)) | 964 | if (!is_sync_kiocb(iocb)) |
1087 | dreq->iocb = iocb; | 965 | dreq->iocb = iocb; |
1088 | 966 | ||
1089 | result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio); | 967 | result = nfs_direct_write_schedule_iovec(dreq, iter, pos); |
1090 | 968 | ||
1091 | if (mapping->nrpages) { | 969 | if (mapping->nrpages) { |
1092 | invalidate_inode_pages2_range(mapping, | 970 | invalidate_inode_pages2_range(mapping, |