aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/file.c')
-rw-r--r--fs/fuse/file.c491
1 files changed, 378 insertions, 113 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f3ab824fa302..d1c9b85b3f58 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/swap.h> 17#include <linux/swap.h>
18#include <linux/aio.h>
18 19
19static const struct file_operations fuse_direct_io_file_operations; 20static const struct file_operations fuse_direct_io_file_operations;
20 21
@@ -25,7 +26,7 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
25 struct fuse_req *req; 26 struct fuse_req *req;
26 int err; 27 int err;
27 28
28 req = fuse_get_req(fc); 29 req = fuse_get_req_nopages(fc);
29 if (IS_ERR(req)) 30 if (IS_ERR(req))
30 return PTR_ERR(req); 31 return PTR_ERR(req);
31 32
@@ -57,7 +58,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
57 return NULL; 58 return NULL;
58 59
59 ff->fc = fc; 60 ff->fc = fc;
60 ff->reserved_req = fuse_request_alloc(); 61 ff->reserved_req = fuse_request_alloc(0);
61 if (unlikely(!ff->reserved_req)) { 62 if (unlikely(!ff->reserved_req)) {
62 kfree(ff); 63 kfree(ff);
63 return NULL; 64 return NULL;
@@ -126,11 +127,13 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
126 struct fuse_req *req = ff->reserved_req; 127 struct fuse_req *req = ff->reserved_req;
127 128
128 if (sync) { 129 if (sync) {
130 req->background = 0;
129 fuse_request_send(ff->fc, req); 131 fuse_request_send(ff->fc, req);
130 path_put(&req->misc.release.path); 132 path_put(&req->misc.release.path);
131 fuse_put_request(ff->fc, req); 133 fuse_put_request(ff->fc, req);
132 } else { 134 } else {
133 req->end = fuse_release_end; 135 req->end = fuse_release_end;
136 req->background = 1;
134 fuse_request_send_background(ff->fc, req); 137 fuse_request_send_background(ff->fc, req);
135 } 138 }
136 kfree(ff); 139 kfree(ff);
@@ -282,6 +285,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
282 WARN_ON(atomic_read(&ff->count) > 1); 285 WARN_ON(atomic_read(&ff->count) > 1);
283 fuse_prepare_release(ff, flags, FUSE_RELEASE); 286 fuse_prepare_release(ff, flags, FUSE_RELEASE);
284 ff->reserved_req->force = 1; 287 ff->reserved_req->force = 1;
288 ff->reserved_req->background = 0;
285 fuse_request_send(ff->fc, ff->reserved_req); 289 fuse_request_send(ff->fc, ff->reserved_req);
286 fuse_put_request(ff->fc, ff->reserved_req); 290 fuse_put_request(ff->fc, ff->reserved_req);
287 kfree(ff); 291 kfree(ff);
@@ -355,7 +359,7 @@ static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
355 359
356static int fuse_flush(struct file *file, fl_owner_t id) 360static int fuse_flush(struct file *file, fl_owner_t id)
357{ 361{
358 struct inode *inode = file->f_path.dentry->d_inode; 362 struct inode *inode = file_inode(file);
359 struct fuse_conn *fc = get_fuse_conn(inode); 363 struct fuse_conn *fc = get_fuse_conn(inode);
360 struct fuse_file *ff = file->private_data; 364 struct fuse_file *ff = file->private_data;
361 struct fuse_req *req; 365 struct fuse_req *req;
@@ -368,7 +372,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
368 if (fc->no_flush) 372 if (fc->no_flush)
369 return 0; 373 return 0;
370 374
371 req = fuse_get_req_nofail(fc, file); 375 req = fuse_get_req_nofail_nopages(fc, file);
372 memset(&inarg, 0, sizeof(inarg)); 376 memset(&inarg, 0, sizeof(inarg));
373 inarg.fh = ff->fh; 377 inarg.fh = ff->fh;
374 inarg.lock_owner = fuse_lock_owner_id(fc, id); 378 inarg.lock_owner = fuse_lock_owner_id(fc, id);
@@ -436,7 +440,7 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
436 440
437 fuse_sync_writes(inode); 441 fuse_sync_writes(inode);
438 442
439 req = fuse_get_req(fc); 443 req = fuse_get_req_nopages(fc);
440 if (IS_ERR(req)) { 444 if (IS_ERR(req)) {
441 err = PTR_ERR(req); 445 err = PTR_ERR(req);
442 goto out; 446 goto out;
@@ -491,9 +495,115 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
491 req->out.args[0].size = count; 495 req->out.args[0].size = count;
492} 496}
493 497
494static size_t fuse_send_read(struct fuse_req *req, struct file *file, 498static void fuse_release_user_pages(struct fuse_req *req, int write)
499{
500 unsigned i;
501
502 for (i = 0; i < req->num_pages; i++) {
503 struct page *page = req->pages[i];
504 if (write)
505 set_page_dirty_lock(page);
506 put_page(page);
507 }
508}
509
510/**
511 * In case of short read, the caller sets 'pos' to the position of
512 * actual end of fuse request in IO request. Otherwise, if bytes_requested
513 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
514 *
515 * An example:
516 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
517 * both submitted asynchronously. The first of them was ACKed by userspace as
518 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
519 * second request was ACKed as short, e.g. only 1K was read, resulting in
520 * pos == 33K.
521 *
522 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
523 * will be equal to the length of the longest contiguous fragment of
524 * transferred data starting from the beginning of IO request.
525 */
526static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
527{
528 int left;
529
530 spin_lock(&io->lock);
531 if (err)
532 io->err = io->err ? : err;
533 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
534 io->bytes = pos;
535
536 left = --io->reqs;
537 spin_unlock(&io->lock);
538
539 if (!left) {
540 long res;
541
542 if (io->err)
543 res = io->err;
544 else if (io->bytes >= 0 && io->write)
545 res = -EIO;
546 else {
547 res = io->bytes < 0 ? io->size : io->bytes;
548
549 if (!is_sync_kiocb(io->iocb)) {
550 struct path *path = &io->iocb->ki_filp->f_path;
551 struct inode *inode = path->dentry->d_inode;
552 struct fuse_conn *fc = get_fuse_conn(inode);
553 struct fuse_inode *fi = get_fuse_inode(inode);
554
555 spin_lock(&fc->lock);
556 fi->attr_version = ++fc->attr_version;
557 spin_unlock(&fc->lock);
558 }
559 }
560
561 aio_complete(io->iocb, res, 0);
562 kfree(io);
563 }
564}
565
566static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
567{
568 struct fuse_io_priv *io = req->io;
569 ssize_t pos = -1;
570
571 fuse_release_user_pages(req, !io->write);
572
573 if (io->write) {
574 if (req->misc.write.in.size != req->misc.write.out.size)
575 pos = req->misc.write.in.offset - io->offset +
576 req->misc.write.out.size;
577 } else {
578 if (req->misc.read.in.size != req->out.args[0].size)
579 pos = req->misc.read.in.offset - io->offset +
580 req->out.args[0].size;
581 }
582
583 fuse_aio_complete(io, req->out.h.error, pos);
584}
585
586static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
587 size_t num_bytes, struct fuse_io_priv *io)
588{
589 spin_lock(&io->lock);
590 io->size += num_bytes;
591 io->reqs++;
592 spin_unlock(&io->lock);
593
594 req->io = io;
595 req->end = fuse_aio_complete_req;
596
597 __fuse_get_request(req);
598 fuse_request_send_background(fc, req);
599
600 return num_bytes;
601}
602
603static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
495 loff_t pos, size_t count, fl_owner_t owner) 604 loff_t pos, size_t count, fl_owner_t owner)
496{ 605{
606 struct file *file = io->file;
497 struct fuse_file *ff = file->private_data; 607 struct fuse_file *ff = file->private_data;
498 struct fuse_conn *fc = ff->fc; 608 struct fuse_conn *fc = ff->fc;
499 609
@@ -504,6 +614,10 @@ static size_t fuse_send_read(struct fuse_req *req, struct file *file,
504 inarg->read_flags |= FUSE_READ_LOCKOWNER; 614 inarg->read_flags |= FUSE_READ_LOCKOWNER;
505 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 615 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
506 } 616 }
617
618 if (io->async)
619 return fuse_async_req_send(fc, req, count, io);
620
507 fuse_request_send(fc, req); 621 fuse_request_send(fc, req);
508 return req->out.args[0].size; 622 return req->out.args[0].size;
509} 623}
@@ -524,6 +638,7 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
524 638
525static int fuse_readpage(struct file *file, struct page *page) 639static int fuse_readpage(struct file *file, struct page *page)
526{ 640{
641 struct fuse_io_priv io = { .async = 0, .file = file };
527 struct inode *inode = page->mapping->host; 642 struct inode *inode = page->mapping->host;
528 struct fuse_conn *fc = get_fuse_conn(inode); 643 struct fuse_conn *fc = get_fuse_conn(inode);
529 struct fuse_req *req; 644 struct fuse_req *req;
@@ -544,7 +659,7 @@ static int fuse_readpage(struct file *file, struct page *page)
544 */ 659 */
545 fuse_wait_on_page_writeback(inode, page->index); 660 fuse_wait_on_page_writeback(inode, page->index);
546 661
547 req = fuse_get_req(fc); 662 req = fuse_get_req(fc, 1);
548 err = PTR_ERR(req); 663 err = PTR_ERR(req);
549 if (IS_ERR(req)) 664 if (IS_ERR(req))
550 goto out; 665 goto out;
@@ -555,7 +670,8 @@ static int fuse_readpage(struct file *file, struct page *page)
555 req->out.argpages = 1; 670 req->out.argpages = 1;
556 req->num_pages = 1; 671 req->num_pages = 1;
557 req->pages[0] = page; 672 req->pages[0] = page;
558 num_read = fuse_send_read(req, file, pos, count, NULL); 673 req->page_descs[0].length = count;
674 num_read = fuse_send_read(req, &io, pos, count, NULL);
559 err = req->out.h.error; 675 err = req->out.h.error;
560 fuse_put_request(fc, req); 676 fuse_put_request(fc, req);
561 677
@@ -641,6 +757,7 @@ struct fuse_fill_data {
641 struct fuse_req *req; 757 struct fuse_req *req;
642 struct file *file; 758 struct file *file;
643 struct inode *inode; 759 struct inode *inode;
760 unsigned nr_pages;
644}; 761};
645 762
646static int fuse_readpages_fill(void *_data, struct page *page) 763static int fuse_readpages_fill(void *_data, struct page *page)
@@ -656,16 +773,31 @@ static int fuse_readpages_fill(void *_data, struct page *page)
656 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 773 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
657 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 774 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
658 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 775 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
776 int nr_alloc = min_t(unsigned, data->nr_pages,
777 FUSE_MAX_PAGES_PER_REQ);
659 fuse_send_readpages(req, data->file); 778 fuse_send_readpages(req, data->file);
660 data->req = req = fuse_get_req(fc); 779 if (fc->async_read)
780 req = fuse_get_req_for_background(fc, nr_alloc);
781 else
782 req = fuse_get_req(fc, nr_alloc);
783
784 data->req = req;
661 if (IS_ERR(req)) { 785 if (IS_ERR(req)) {
662 unlock_page(page); 786 unlock_page(page);
663 return PTR_ERR(req); 787 return PTR_ERR(req);
664 } 788 }
665 } 789 }
790
791 if (WARN_ON(req->num_pages >= req->max_pages)) {
792 fuse_put_request(fc, req);
793 return -EIO;
794 }
795
666 page_cache_get(page); 796 page_cache_get(page);
667 req->pages[req->num_pages] = page; 797 req->pages[req->num_pages] = page;
798 req->page_descs[req->num_pages].length = PAGE_SIZE;
668 req->num_pages++; 799 req->num_pages++;
800 data->nr_pages--;
669 return 0; 801 return 0;
670} 802}
671 803
@@ -676,6 +808,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
676 struct fuse_conn *fc = get_fuse_conn(inode); 808 struct fuse_conn *fc = get_fuse_conn(inode);
677 struct fuse_fill_data data; 809 struct fuse_fill_data data;
678 int err; 810 int err;
811 int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
679 812
680 err = -EIO; 813 err = -EIO;
681 if (is_bad_inode(inode)) 814 if (is_bad_inode(inode))
@@ -683,7 +816,11 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
683 816
684 data.file = file; 817 data.file = file;
685 data.inode = inode; 818 data.inode = inode;
686 data.req = fuse_get_req(fc); 819 if (fc->async_read)
820 data.req = fuse_get_req_for_background(fc, nr_alloc);
821 else
822 data.req = fuse_get_req(fc, nr_alloc);
823 data.nr_pages = nr_pages;
687 err = PTR_ERR(data.req); 824 err = PTR_ERR(data.req);
688 if (IS_ERR(data.req)) 825 if (IS_ERR(data.req))
689 goto out; 826 goto out;
@@ -744,9 +881,10 @@ static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
744 req->out.args[0].value = outarg; 881 req->out.args[0].value = outarg;
745} 882}
746 883
747static size_t fuse_send_write(struct fuse_req *req, struct file *file, 884static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
748 loff_t pos, size_t count, fl_owner_t owner) 885 loff_t pos, size_t count, fl_owner_t owner)
749{ 886{
887 struct file *file = io->file;
750 struct fuse_file *ff = file->private_data; 888 struct fuse_file *ff = file->private_data;
751 struct fuse_conn *fc = ff->fc; 889 struct fuse_conn *fc = ff->fc;
752 struct fuse_write_in *inarg = &req->misc.write.in; 890 struct fuse_write_in *inarg = &req->misc.write.in;
@@ -757,6 +895,10 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
757 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 895 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
758 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 896 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
759 } 897 }
898
899 if (io->async)
900 return fuse_async_req_send(fc, req, count, io);
901
760 fuse_request_send(fc, req); 902 fuse_request_send(fc, req);
761 return req->misc.write.out.size; 903 return req->misc.write.out.size;
762} 904}
@@ -780,13 +922,14 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
780 size_t res; 922 size_t res;
781 unsigned offset; 923 unsigned offset;
782 unsigned i; 924 unsigned i;
925 struct fuse_io_priv io = { .async = 0, .file = file };
783 926
784 for (i = 0; i < req->num_pages; i++) 927 for (i = 0; i < req->num_pages; i++)
785 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 928 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
786 929
787 res = fuse_send_write(req, file, pos, count, NULL); 930 res = fuse_send_write(req, &io, pos, count, NULL);
788 931
789 offset = req->page_offset; 932 offset = req->page_descs[0].offset;
790 count = res; 933 count = res;
791 for (i = 0; i < req->num_pages; i++) { 934 for (i = 0; i < req->num_pages; i++) {
792 struct page *page = req->pages[i]; 935 struct page *page = req->pages[i];
@@ -817,7 +960,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
817 int err; 960 int err;
818 961
819 req->in.argpages = 1; 962 req->in.argpages = 1;
820 req->page_offset = offset; 963 req->page_descs[0].offset = offset;
821 964
822 do { 965 do {
823 size_t tmp; 966 size_t tmp;
@@ -857,6 +1000,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
857 1000
858 err = 0; 1001 err = 0;
859 req->pages[req->num_pages] = page; 1002 req->pages[req->num_pages] = page;
1003 req->page_descs[req->num_pages].length = tmp;
860 req->num_pages++; 1004 req->num_pages++;
861 1005
862 iov_iter_advance(ii, tmp); 1006 iov_iter_advance(ii, tmp);
@@ -869,11 +1013,19 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
869 if (!fc->big_writes) 1013 if (!fc->big_writes)
870 break; 1014 break;
871 } while (iov_iter_count(ii) && count < fc->max_write && 1015 } while (iov_iter_count(ii) && count < fc->max_write &&
872 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); 1016 req->num_pages < req->max_pages && offset == 0);
873 1017
874 return count > 0 ? count : err; 1018 return count > 0 ? count : err;
875} 1019}
876 1020
1021static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1022{
1023 return min_t(unsigned,
1024 ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
1025 (pos >> PAGE_CACHE_SHIFT) + 1,
1026 FUSE_MAX_PAGES_PER_REQ);
1027}
1028
877static ssize_t fuse_perform_write(struct file *file, 1029static ssize_t fuse_perform_write(struct file *file,
878 struct address_space *mapping, 1030 struct address_space *mapping,
879 struct iov_iter *ii, loff_t pos) 1031 struct iov_iter *ii, loff_t pos)
@@ -889,8 +1041,9 @@ static ssize_t fuse_perform_write(struct file *file,
889 do { 1041 do {
890 struct fuse_req *req; 1042 struct fuse_req *req;
891 ssize_t count; 1043 ssize_t count;
1044 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
892 1045
893 req = fuse_get_req(fc); 1046 req = fuse_get_req(fc, nr_pages);
894 if (IS_ERR(req)) { 1047 if (IS_ERR(req)) {
895 err = PTR_ERR(req); 1048 err = PTR_ERR(req);
896 break; 1049 break;
@@ -947,7 +1100,6 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
947 return err; 1100 return err;
948 1101
949 count = ocount; 1102 count = ocount;
950 sb_start_write(inode->i_sb);
951 mutex_lock(&inode->i_mutex); 1103 mutex_lock(&inode->i_mutex);
952 1104
953 /* We can write back this queue in page reclaim */ 1105 /* We can write back this queue in page reclaim */
@@ -1006,73 +1158,127 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
1006out: 1158out:
1007 current->backing_dev_info = NULL; 1159 current->backing_dev_info = NULL;
1008 mutex_unlock(&inode->i_mutex); 1160 mutex_unlock(&inode->i_mutex);
1009 sb_end_write(inode->i_sb);
1010 1161
1011 return written ? written : err; 1162 return written ? written : err;
1012} 1163}
1013 1164
1014static void fuse_release_user_pages(struct fuse_req *req, int write) 1165static inline void fuse_page_descs_length_init(struct fuse_req *req,
1166 unsigned index, unsigned nr_pages)
1015{ 1167{
1016 unsigned i; 1168 int i;
1017 1169
1018 for (i = 0; i < req->num_pages; i++) { 1170 for (i = index; i < index + nr_pages; i++)
1019 struct page *page = req->pages[i]; 1171 req->page_descs[i].length = PAGE_SIZE -
1020 if (write) 1172 req->page_descs[i].offset;
1021 set_page_dirty_lock(page); 1173}
1022 put_page(page); 1174
1023 } 1175static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1176{
1177 return (unsigned long)ii->iov->iov_base + ii->iov_offset;
1178}
1179
1180static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1181 size_t max_size)
1182{
1183 return min(iov_iter_single_seg_count(ii), max_size);
1024} 1184}
1025 1185
1026static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 1186static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1027 size_t *nbytesp, int write) 1187 size_t *nbytesp, int write)
1028{ 1188{
1029 size_t nbytes = *nbytesp; 1189 size_t nbytes = 0; /* # bytes already packed in req */
1030 unsigned long user_addr = (unsigned long) buf;
1031 unsigned offset = user_addr & ~PAGE_MASK;
1032 int npages;
1033 1190
1034 /* Special case for kernel I/O: can copy directly into the buffer */ 1191 /* Special case for kernel I/O: can copy directly into the buffer */
1035 if (segment_eq(get_fs(), KERNEL_DS)) { 1192 if (segment_eq(get_fs(), KERNEL_DS)) {
1193 unsigned long user_addr = fuse_get_user_addr(ii);
1194 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1195
1036 if (write) 1196 if (write)
1037 req->in.args[1].value = (void *) user_addr; 1197 req->in.args[1].value = (void *) user_addr;
1038 else 1198 else
1039 req->out.args[0].value = (void *) user_addr; 1199 req->out.args[0].value = (void *) user_addr;
1040 1200
1201 iov_iter_advance(ii, frag_size);
1202 *nbytesp = frag_size;
1041 return 0; 1203 return 0;
1042 } 1204 }
1043 1205
1044 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 1206 while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
1045 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1207 unsigned npages;
1046 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 1208 unsigned long user_addr = fuse_get_user_addr(ii);
1047 npages = get_user_pages_fast(user_addr, npages, !write, req->pages); 1209 unsigned offset = user_addr & ~PAGE_MASK;
1048 if (npages < 0) 1210 size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes);
1049 return npages; 1211 int ret;
1212
1213 unsigned n = req->max_pages - req->num_pages;
1214 frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT);
1215
1216 npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1217 npages = clamp(npages, 1U, n);
1218
1219 ret = get_user_pages_fast(user_addr, npages, !write,
1220 &req->pages[req->num_pages]);
1221 if (ret < 0)
1222 return ret;
1050 1223
1051 req->num_pages = npages; 1224 npages = ret;
1052 req->page_offset = offset; 1225 frag_size = min_t(size_t, frag_size,
1226 (npages << PAGE_SHIFT) - offset);
1227 iov_iter_advance(ii, frag_size);
1228
1229 req->page_descs[req->num_pages].offset = offset;
1230 fuse_page_descs_length_init(req, req->num_pages, npages);
1231
1232 req->num_pages += npages;
1233 req->page_descs[req->num_pages - 1].length -=
1234 (npages << PAGE_SHIFT) - offset - frag_size;
1235
1236 nbytes += frag_size;
1237 }
1053 1238
1054 if (write) 1239 if (write)
1055 req->in.argpages = 1; 1240 req->in.argpages = 1;
1056 else 1241 else
1057 req->out.argpages = 1; 1242 req->out.argpages = 1;
1058 1243
1059 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 1244 *nbytesp = nbytes;
1060 *nbytesp = min(*nbytesp, nbytes);
1061 1245
1062 return 0; 1246 return 0;
1063} 1247}
1064 1248
1065ssize_t fuse_direct_io(struct file *file, const char __user *buf, 1249static inline int fuse_iter_npages(const struct iov_iter *ii_p)
1066 size_t count, loff_t *ppos, int write)
1067{ 1250{
1251 struct iov_iter ii = *ii_p;
1252 int npages = 0;
1253
1254 while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) {
1255 unsigned long user_addr = fuse_get_user_addr(&ii);
1256 unsigned offset = user_addr & ~PAGE_MASK;
1257 size_t frag_size = iov_iter_single_seg_count(&ii);
1258
1259 npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1260 iov_iter_advance(&ii, frag_size);
1261 }
1262
1263 return min(npages, FUSE_MAX_PAGES_PER_REQ);
1264}
1265
1266ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
1267 unsigned long nr_segs, size_t count, loff_t *ppos,
1268 int write)
1269{
1270 struct file *file = io->file;
1068 struct fuse_file *ff = file->private_data; 1271 struct fuse_file *ff = file->private_data;
1069 struct fuse_conn *fc = ff->fc; 1272 struct fuse_conn *fc = ff->fc;
1070 size_t nmax = write ? fc->max_write : fc->max_read; 1273 size_t nmax = write ? fc->max_write : fc->max_read;
1071 loff_t pos = *ppos; 1274 loff_t pos = *ppos;
1072 ssize_t res = 0; 1275 ssize_t res = 0;
1073 struct fuse_req *req; 1276 struct fuse_req *req;
1277 struct iov_iter ii;
1074 1278
1075 req = fuse_get_req(fc); 1279 iov_iter_init(&ii, iov, nr_segs, count, 0);
1280
1281 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1076 if (IS_ERR(req)) 1282 if (IS_ERR(req))
1077 return PTR_ERR(req); 1283 return PTR_ERR(req);
1078 1284
@@ -1080,18 +1286,19 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1080 size_t nres; 1286 size_t nres;
1081 fl_owner_t owner = current->files; 1287 fl_owner_t owner = current->files;
1082 size_t nbytes = min(count, nmax); 1288 size_t nbytes = min(count, nmax);
1083 int err = fuse_get_user_pages(req, buf, &nbytes, write); 1289 int err = fuse_get_user_pages(req, &ii, &nbytes, write);
1084 if (err) { 1290 if (err) {
1085 res = err; 1291 res = err;
1086 break; 1292 break;
1087 } 1293 }
1088 1294
1089 if (write) 1295 if (write)
1090 nres = fuse_send_write(req, file, pos, nbytes, owner); 1296 nres = fuse_send_write(req, io, pos, nbytes, owner);
1091 else 1297 else
1092 nres = fuse_send_read(req, file, pos, nbytes, owner); 1298 nres = fuse_send_read(req, io, pos, nbytes, owner);
1093 1299
1094 fuse_release_user_pages(req, !write); 1300 if (!io->async)
1301 fuse_release_user_pages(req, !write);
1095 if (req->out.h.error) { 1302 if (req->out.h.error) {
1096 if (!res) 1303 if (!res)
1097 res = req->out.h.error; 1304 res = req->out.h.error;
@@ -1103,12 +1310,11 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1103 count -= nres; 1310 count -= nres;
1104 res += nres; 1311 res += nres;
1105 pos += nres; 1312 pos += nres;
1106 buf += nres;
1107 if (nres != nbytes) 1313 if (nres != nbytes)
1108 break; 1314 break;
1109 if (count) { 1315 if (count) {
1110 fuse_put_request(fc, req); 1316 fuse_put_request(fc, req);
1111 req = fuse_get_req(fc); 1317 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1112 if (IS_ERR(req)) 1318 if (IS_ERR(req))
1113 break; 1319 break;
1114 } 1320 }
@@ -1122,34 +1328,45 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1122} 1328}
1123EXPORT_SYMBOL_GPL(fuse_direct_io); 1329EXPORT_SYMBOL_GPL(fuse_direct_io);
1124 1330
1125static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1331static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1126 size_t count, loff_t *ppos) 1332 const struct iovec *iov,
1333 unsigned long nr_segs, loff_t *ppos,
1334 size_t count)
1127{ 1335{
1128 ssize_t res; 1336 ssize_t res;
1129 struct inode *inode = file->f_path.dentry->d_inode; 1337 struct file *file = io->file;
1338 struct inode *inode = file_inode(file);
1130 1339
1131 if (is_bad_inode(inode)) 1340 if (is_bad_inode(inode))
1132 return -EIO; 1341 return -EIO;
1133 1342
1134 res = fuse_direct_io(file, buf, count, ppos, 0); 1343 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
1135 1344
1136 fuse_invalidate_attr(inode); 1345 fuse_invalidate_attr(inode);
1137 1346
1138 return res; 1347 return res;
1139} 1348}
1140 1349
1141static ssize_t __fuse_direct_write(struct file *file, const char __user *buf, 1350static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1142 size_t count, loff_t *ppos) 1351 size_t count, loff_t *ppos)
1352{
1353 struct fuse_io_priv io = { .async = 0, .file = file };
1354 struct iovec iov = { .iov_base = buf, .iov_len = count };
1355 return __fuse_direct_read(&io, &iov, 1, ppos, count);
1356}
1357
1358static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
1359 const struct iovec *iov,
1360 unsigned long nr_segs, loff_t *ppos)
1143{ 1361{
1144 struct inode *inode = file->f_path.dentry->d_inode; 1362 struct file *file = io->file;
1363 struct inode *inode = file_inode(file);
1364 size_t count = iov_length(iov, nr_segs);
1145 ssize_t res; 1365 ssize_t res;
1146 1366
1147 res = generic_write_checks(file, ppos, &count, 0); 1367 res = generic_write_checks(file, ppos, &count, 0);
1148 if (!res) { 1368 if (!res)
1149 res = fuse_direct_io(file, buf, count, ppos, 1); 1369 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
1150 if (res > 0)
1151 fuse_write_update_size(inode, *ppos);
1152 }
1153 1370
1154 fuse_invalidate_attr(inode); 1371 fuse_invalidate_attr(inode);
1155 1372
@@ -1159,15 +1376,19 @@ static ssize_t __fuse_direct_write(struct file *file, const char __user *buf,
1159static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1376static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1160 size_t count, loff_t *ppos) 1377 size_t count, loff_t *ppos)
1161{ 1378{
1162 struct inode *inode = file->f_path.dentry->d_inode; 1379 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
1380 struct inode *inode = file_inode(file);
1163 ssize_t res; 1381 ssize_t res;
1382 struct fuse_io_priv io = { .async = 0, .file = file };
1164 1383
1165 if (is_bad_inode(inode)) 1384 if (is_bad_inode(inode))
1166 return -EIO; 1385 return -EIO;
1167 1386
1168 /* Don't allow parallel writes to the same file */ 1387 /* Don't allow parallel writes to the same file */
1169 mutex_lock(&inode->i_mutex); 1388 mutex_lock(&inode->i_mutex);
1170 res = __fuse_direct_write(file, buf, count, ppos); 1389 res = __fuse_direct_write(&io, &iov, 1, ppos);
1390 if (res > 0)
1391 fuse_write_update_size(inode, *ppos);
1171 mutex_unlock(&inode->i_mutex); 1392 mutex_unlock(&inode->i_mutex);
1172 1393
1173 return res; 1394 return res;
@@ -1272,10 +1493,11 @@ static int fuse_writepage_locked(struct page *page)
1272 1493
1273 set_page_writeback(page); 1494 set_page_writeback(page);
1274 1495
1275 req = fuse_request_alloc_nofs(); 1496 req = fuse_request_alloc_nofs(1);
1276 if (!req) 1497 if (!req)
1277 goto err; 1498 goto err;
1278 1499
1500 req->background = 1; /* writeback always goes to bg_queue */
1279 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1501 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1280 if (!tmp_page) 1502 if (!tmp_page)
1281 goto err_free; 1503 goto err_free;
@@ -1293,7 +1515,8 @@ static int fuse_writepage_locked(struct page *page)
1293 req->in.argpages = 1; 1515 req->in.argpages = 1;
1294 req->num_pages = 1; 1516 req->num_pages = 1;
1295 req->pages[0] = tmp_page; 1517 req->pages[0] = tmp_page;
1296 req->page_offset = 0; 1518 req->page_descs[0].offset = 0;
1519 req->page_descs[0].length = PAGE_SIZE;
1297 req->end = fuse_writepage_end; 1520 req->end = fuse_writepage_end;
1298 req->inode = inode; 1521 req->inode = inode;
1299 1522
@@ -1385,7 +1608,7 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
1385static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 1608static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1386{ 1609{
1387 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { 1610 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1388 struct inode *inode = file->f_dentry->d_inode; 1611 struct inode *inode = file_inode(file);
1389 struct fuse_conn *fc = get_fuse_conn(inode); 1612 struct fuse_conn *fc = get_fuse_conn(inode);
1390 struct fuse_inode *fi = get_fuse_inode(inode); 1613 struct fuse_inode *fi = get_fuse_inode(inode);
1391 struct fuse_file *ff = file->private_data; 1614 struct fuse_file *ff = file->private_data;
@@ -1443,7 +1666,7 @@ static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1443 const struct file_lock *fl, int opcode, pid_t pid, 1666 const struct file_lock *fl, int opcode, pid_t pid,
1444 int flock) 1667 int flock)
1445{ 1668{
1446 struct inode *inode = file->f_path.dentry->d_inode; 1669 struct inode *inode = file_inode(file);
1447 struct fuse_conn *fc = get_fuse_conn(inode); 1670 struct fuse_conn *fc = get_fuse_conn(inode);
1448 struct fuse_file *ff = file->private_data; 1671 struct fuse_file *ff = file->private_data;
1449 struct fuse_lk_in *arg = &req->misc.lk_in; 1672 struct fuse_lk_in *arg = &req->misc.lk_in;
@@ -1465,13 +1688,13 @@ static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1465 1688
1466static int fuse_getlk(struct file *file, struct file_lock *fl) 1689static int fuse_getlk(struct file *file, struct file_lock *fl)
1467{ 1690{
1468 struct inode *inode = file->f_path.dentry->d_inode; 1691 struct inode *inode = file_inode(file);
1469 struct fuse_conn *fc = get_fuse_conn(inode); 1692 struct fuse_conn *fc = get_fuse_conn(inode);
1470 struct fuse_req *req; 1693 struct fuse_req *req;
1471 struct fuse_lk_out outarg; 1694 struct fuse_lk_out outarg;
1472 int err; 1695 int err;
1473 1696
1474 req = fuse_get_req(fc); 1697 req = fuse_get_req_nopages(fc);
1475 if (IS_ERR(req)) 1698 if (IS_ERR(req))
1476 return PTR_ERR(req); 1699 return PTR_ERR(req);
1477 1700
@@ -1490,7 +1713,7 @@ static int fuse_getlk(struct file *file, struct file_lock *fl)
1490 1713
1491static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 1714static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1492{ 1715{
1493 struct inode *inode = file->f_path.dentry->d_inode; 1716 struct inode *inode = file_inode(file);
1494 struct fuse_conn *fc = get_fuse_conn(inode); 1717 struct fuse_conn *fc = get_fuse_conn(inode);
1495 struct fuse_req *req; 1718 struct fuse_req *req;
1496 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 1719 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
@@ -1506,7 +1729,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1506 if (fl->fl_flags & FL_CLOSE) 1729 if (fl->fl_flags & FL_CLOSE)
1507 return 0; 1730 return 0;
1508 1731
1509 req = fuse_get_req(fc); 1732 req = fuse_get_req_nopages(fc);
1510 if (IS_ERR(req)) 1733 if (IS_ERR(req))
1511 return PTR_ERR(req); 1734 return PTR_ERR(req);
1512 1735
@@ -1522,7 +1745,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1522 1745
1523static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 1746static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1524{ 1747{
1525 struct inode *inode = file->f_path.dentry->d_inode; 1748 struct inode *inode = file_inode(file);
1526 struct fuse_conn *fc = get_fuse_conn(inode); 1749 struct fuse_conn *fc = get_fuse_conn(inode);
1527 int err; 1750 int err;
1528 1751
@@ -1545,7 +1768,7 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1545 1768
1546static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 1769static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1547{ 1770{
1548 struct inode *inode = file->f_path.dentry->d_inode; 1771 struct inode *inode = file_inode(file);
1549 struct fuse_conn *fc = get_fuse_conn(inode); 1772 struct fuse_conn *fc = get_fuse_conn(inode);
1550 int err; 1773 int err;
1551 1774
@@ -1575,7 +1798,7 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1575 if (!inode->i_sb->s_bdev || fc->no_bmap) 1798 if (!inode->i_sb->s_bdev || fc->no_bmap)
1576 return 0; 1799 return 0;
1577 1800
1578 req = fuse_get_req(fc); 1801 req = fuse_get_req_nopages(fc);
1579 if (IS_ERR(req)) 1802 if (IS_ERR(req))
1580 return 0; 1803 return 0;
1581 1804
@@ -1602,7 +1825,7 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1602static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 1825static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
1603{ 1826{
1604 loff_t retval; 1827 loff_t retval;
1605 struct inode *inode = file->f_path.dentry->d_inode; 1828 struct inode *inode = file_inode(file);
1606 1829
1607 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 1830 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
1608 if (whence == SEEK_CUR || whence == SEEK_SET) 1831 if (whence == SEEK_CUR || whence == SEEK_SET)
@@ -1873,7 +2096,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1873 num_pages++; 2096 num_pages++;
1874 } 2097 }
1875 2098
1876 req = fuse_get_req(fc); 2099 req = fuse_get_req(fc, num_pages);
1877 if (IS_ERR(req)) { 2100 if (IS_ERR(req)) {
1878 err = PTR_ERR(req); 2101 err = PTR_ERR(req);
1879 req = NULL; 2102 req = NULL;
@@ -1881,6 +2104,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1881 } 2104 }
1882 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 2105 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
1883 req->num_pages = num_pages; 2106 req->num_pages = num_pages;
2107 fuse_page_descs_length_init(req, 0, req->num_pages);
1884 2108
1885 /* okay, let's send it to the client */ 2109 /* okay, let's send it to the client */
1886 req->in.h.opcode = FUSE_IOCTL; 2110 req->in.h.opcode = FUSE_IOCTL;
@@ -1978,10 +2202,10 @@ EXPORT_SYMBOL_GPL(fuse_do_ioctl);
1978long fuse_ioctl_common(struct file *file, unsigned int cmd, 2202long fuse_ioctl_common(struct file *file, unsigned int cmd,
1979 unsigned long arg, unsigned int flags) 2203 unsigned long arg, unsigned int flags)
1980{ 2204{
1981 struct inode *inode = file->f_dentry->d_inode; 2205 struct inode *inode = file_inode(file);
1982 struct fuse_conn *fc = get_fuse_conn(inode); 2206 struct fuse_conn *fc = get_fuse_conn(inode);
1983 2207
1984 if (!fuse_allow_task(fc, current)) 2208 if (!fuse_allow_current_process(fc))
1985 return -EACCES; 2209 return -EACCES;
1986 2210
1987 if (is_bad_inode(inode)) 2211 if (is_bad_inode(inode))
@@ -2066,6 +2290,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
2066 return DEFAULT_POLLMASK; 2290 return DEFAULT_POLLMASK;
2067 2291
2068 poll_wait(file, &ff->poll_wait, wait); 2292 poll_wait(file, &ff->poll_wait, wait);
2293 inarg.events = (__u32)poll_requested_events(wait);
2069 2294
2070 /* 2295 /*
2071 * Ask for notification iff there's someone waiting for it. 2296 * Ask for notification iff there's someone waiting for it.
@@ -2076,7 +2301,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
2076 fuse_register_polled_file(fc, ff); 2301 fuse_register_polled_file(fc, ff);
2077 } 2302 }
2078 2303
2079 req = fuse_get_req(fc); 2304 req = fuse_get_req_nopages(fc);
2080 if (IS_ERR(req)) 2305 if (IS_ERR(req))
2081 return POLLERR; 2306 return POLLERR;
2082 2307
@@ -2126,53 +2351,93 @@ int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2126 return 0; 2351 return 0;
2127} 2352}
2128 2353
2129static ssize_t fuse_loop_dio(struct file *filp, const struct iovec *iov, 2354static void fuse_do_truncate(struct file *file)
2130 unsigned long nr_segs, loff_t *ppos, int rw)
2131{ 2355{
2132 const struct iovec *vector = iov; 2356 struct inode *inode = file->f_mapping->host;
2133 ssize_t ret = 0; 2357 struct iattr attr;
2134
2135 while (nr_segs > 0) {
2136 void __user *base;
2137 size_t len;
2138 ssize_t nr;
2139
2140 base = vector->iov_base;
2141 len = vector->iov_len;
2142 vector++;
2143 nr_segs--;
2144 2358
2145 if (rw == WRITE) 2359 attr.ia_valid = ATTR_SIZE;
2146 nr = __fuse_direct_write(filp, base, len, ppos); 2360 attr.ia_size = i_size_read(inode);
2147 else
2148 nr = fuse_direct_read(filp, base, len, ppos);
2149 2361
2150 if (nr < 0) { 2362 attr.ia_file = file;
2151 if (!ret) 2363 attr.ia_valid |= ATTR_FILE;
2152 ret = nr;
2153 break;
2154 }
2155 ret += nr;
2156 if (nr != len)
2157 break;
2158 }
2159 2364
2160 return ret; 2365 fuse_do_setattr(inode, &attr, file);
2161} 2366}
2162 2367
2163
2164static ssize_t 2368static ssize_t
2165fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2369fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2166 loff_t offset, unsigned long nr_segs) 2370 loff_t offset, unsigned long nr_segs)
2167{ 2371{
2168 ssize_t ret = 0; 2372 ssize_t ret = 0;
2169 struct file *file = NULL; 2373 struct file *file = iocb->ki_filp;
2374 struct fuse_file *ff = file->private_data;
2170 loff_t pos = 0; 2375 loff_t pos = 0;
2376 struct inode *inode;
2377 loff_t i_size;
2378 size_t count = iov_length(iov, nr_segs);
2379 struct fuse_io_priv *io;
2171 2380
2172 file = iocb->ki_filp;
2173 pos = offset; 2381 pos = offset;
2382 inode = file->f_mapping->host;
2383 i_size = i_size_read(inode);
2384
2385 /* optimization for short read */
2386 if (rw != WRITE && offset + count > i_size) {
2387 if (offset >= i_size)
2388 return 0;
2389 count = i_size - offset;
2390 }
2391
2392 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2393 if (!io)
2394 return -ENOMEM;
2395 spin_lock_init(&io->lock);
2396 io->reqs = 1;
2397 io->bytes = -1;
2398 io->size = 0;
2399 io->offset = offset;
2400 io->write = (rw == WRITE);
2401 io->err = 0;
2402 io->file = file;
2403 /*
2404 * By default, we want to optimize all I/Os with async request
2405 * submission to the client filesystem if supported.
2406 */
2407 io->async = ff->fc->async_dio;
2408 io->iocb = iocb;
2409
2410 /*
2411 * We cannot asynchronously extend the size of a file. We have no method
2412 * to wait on real async I/O requests, so we must submit this request
2413 * synchronously.
2414 */
2415 if (!is_sync_kiocb(iocb) && (offset + count > i_size))
2416 io->async = false;
2174 2417
2175 ret = fuse_loop_dio(file, iov, nr_segs, &pos, rw); 2418 if (rw == WRITE)
2419 ret = __fuse_direct_write(io, iov, nr_segs, &pos);
2420 else
2421 ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
2422
2423 if (io->async) {
2424 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2425
2426 /* we have a non-extending, async request, so return */
2427 if (ret > 0 && !is_sync_kiocb(iocb))
2428 return -EIOCBQUEUED;
2429
2430 ret = wait_on_sync_kiocb(iocb);
2431 } else {
2432 kfree(io);
2433 }
2434
2435 if (rw == WRITE) {
2436 if (ret > 0)
2437 fuse_write_update_size(inode, pos);
2438 else if (ret < 0 && offset + count > i_size)
2439 fuse_do_truncate(file);
2440 }
2176 2441
2177 return ret; 2442 return ret;
2178} 2443}
@@ -2194,7 +2459,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2194 if (fc->no_fallocate) 2459 if (fc->no_fallocate)
2195 return -EOPNOTSUPP; 2460 return -EOPNOTSUPP;
2196 2461
2197 req = fuse_get_req(fc); 2462 req = fuse_get_req_nopages(fc);
2198 if (IS_ERR(req)) 2463 if (IS_ERR(req))
2199 return PTR_ERR(req); 2464 return PTR_ERR(req);
2200 2465