aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c527
1 files changed, 461 insertions, 66 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index e53df5ebb2b8..9424796d6634 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -16,6 +16,9 @@
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/file.h> 17#include <linux/file.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/swap.h>
21#include <linux/splice.h>
19 22
20MODULE_ALIAS_MISCDEV(FUSE_MINOR); 23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21MODULE_ALIAS("devname:fuse"); 24MODULE_ALIAS("devname:fuse");
@@ -499,6 +502,9 @@ struct fuse_copy_state {
499 int write; 502 int write;
500 struct fuse_req *req; 503 struct fuse_req *req;
501 const struct iovec *iov; 504 const struct iovec *iov;
505 struct pipe_buffer *pipebufs;
506 struct pipe_buffer *currbuf;
507 struct pipe_inode_info *pipe;
502 unsigned long nr_segs; 508 unsigned long nr_segs;
503 unsigned long seglen; 509 unsigned long seglen;
504 unsigned long addr; 510 unsigned long addr;
@@ -506,16 +512,16 @@ struct fuse_copy_state {
506 void *mapaddr; 512 void *mapaddr;
507 void *buf; 513 void *buf;
508 unsigned len; 514 unsigned len;
515 unsigned move_pages:1;
509}; 516};
510 517
511static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, 518static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
512 int write, struct fuse_req *req, 519 int write,
513 const struct iovec *iov, unsigned long nr_segs) 520 const struct iovec *iov, unsigned long nr_segs)
514{ 521{
515 memset(cs, 0, sizeof(*cs)); 522 memset(cs, 0, sizeof(*cs));
516 cs->fc = fc; 523 cs->fc = fc;
517 cs->write = write; 524 cs->write = write;
518 cs->req = req;
519 cs->iov = iov; 525 cs->iov = iov;
520 cs->nr_segs = nr_segs; 526 cs->nr_segs = nr_segs;
521} 527}
@@ -523,7 +529,18 @@ static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
523/* Unmap and put previous page of userspace buffer */ 529/* Unmap and put previous page of userspace buffer */
524static void fuse_copy_finish(struct fuse_copy_state *cs) 530static void fuse_copy_finish(struct fuse_copy_state *cs)
525{ 531{
526 if (cs->mapaddr) { 532 if (cs->currbuf) {
533 struct pipe_buffer *buf = cs->currbuf;
534
535 if (!cs->write) {
536 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
537 } else {
538 kunmap_atomic(cs->mapaddr, KM_USER0);
539 buf->len = PAGE_SIZE - cs->len;
540 }
541 cs->currbuf = NULL;
542 cs->mapaddr = NULL;
543 } else if (cs->mapaddr) {
527 kunmap_atomic(cs->mapaddr, KM_USER0); 544 kunmap_atomic(cs->mapaddr, KM_USER0);
528 if (cs->write) { 545 if (cs->write) {
529 flush_dcache_page(cs->pg); 546 flush_dcache_page(cs->pg);
@@ -545,26 +562,61 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
545 562
546 unlock_request(cs->fc, cs->req); 563 unlock_request(cs->fc, cs->req);
547 fuse_copy_finish(cs); 564 fuse_copy_finish(cs);
548 if (!cs->seglen) { 565 if (cs->pipebufs) {
549 BUG_ON(!cs->nr_segs); 566 struct pipe_buffer *buf = cs->pipebufs;
550 cs->seglen = cs->iov[0].iov_len; 567
551 cs->addr = (unsigned long) cs->iov[0].iov_base; 568 if (!cs->write) {
552 cs->iov++; 569 err = buf->ops->confirm(cs->pipe, buf);
553 cs->nr_segs--; 570 if (err)
571 return err;
572
573 BUG_ON(!cs->nr_segs);
574 cs->currbuf = buf;
575 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
576 cs->len = buf->len;
577 cs->buf = cs->mapaddr + buf->offset;
578 cs->pipebufs++;
579 cs->nr_segs--;
580 } else {
581 struct page *page;
582
583 if (cs->nr_segs == cs->pipe->buffers)
584 return -EIO;
585
586 page = alloc_page(GFP_HIGHUSER);
587 if (!page)
588 return -ENOMEM;
589
590 buf->page = page;
591 buf->offset = 0;
592 buf->len = 0;
593
594 cs->currbuf = buf;
595 cs->mapaddr = kmap_atomic(page, KM_USER0);
596 cs->buf = cs->mapaddr;
597 cs->len = PAGE_SIZE;
598 cs->pipebufs++;
599 cs->nr_segs++;
600 }
601 } else {
602 if (!cs->seglen) {
603 BUG_ON(!cs->nr_segs);
604 cs->seglen = cs->iov[0].iov_len;
605 cs->addr = (unsigned long) cs->iov[0].iov_base;
606 cs->iov++;
607 cs->nr_segs--;
608 }
609 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
610 if (err < 0)
611 return err;
612 BUG_ON(err != 1);
613 offset = cs->addr % PAGE_SIZE;
614 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
615 cs->buf = cs->mapaddr + offset;
616 cs->len = min(PAGE_SIZE - offset, cs->seglen);
617 cs->seglen -= cs->len;
618 cs->addr += cs->len;
554 } 619 }
555 down_read(&current->mm->mmap_sem);
556 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
557 &cs->pg, NULL);
558 up_read(&current->mm->mmap_sem);
559 if (err < 0)
560 return err;
561 BUG_ON(err != 1);
562 offset = cs->addr % PAGE_SIZE;
563 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
564 cs->buf = cs->mapaddr + offset;
565 cs->len = min(PAGE_SIZE - offset, cs->seglen);
566 cs->seglen -= cs->len;
567 cs->addr += cs->len;
568 620
569 return lock_request(cs->fc, cs->req); 621 return lock_request(cs->fc, cs->req);
570} 622}
@@ -586,23 +638,178 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
586 return ncpy; 638 return ncpy;
587} 639}
588 640
641static int fuse_check_page(struct page *page)
642{
643 if (page_mapcount(page) ||
644 page->mapping != NULL ||
645 page_count(page) != 1 ||
646 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
647 ~(1 << PG_locked |
648 1 << PG_referenced |
649 1 << PG_uptodate |
650 1 << PG_lru |
651 1 << PG_active |
652 1 << PG_reclaim))) {
653 printk(KERN_WARNING "fuse: trying to steal weird page\n");
654 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
655 return 1;
656 }
657 return 0;
658}
659
660static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
661{
662 int err;
663 struct page *oldpage = *pagep;
664 struct page *newpage;
665 struct pipe_buffer *buf = cs->pipebufs;
666 struct address_space *mapping;
667 pgoff_t index;
668
669 unlock_request(cs->fc, cs->req);
670 fuse_copy_finish(cs);
671
672 err = buf->ops->confirm(cs->pipe, buf);
673 if (err)
674 return err;
675
676 BUG_ON(!cs->nr_segs);
677 cs->currbuf = buf;
678 cs->len = buf->len;
679 cs->pipebufs++;
680 cs->nr_segs--;
681
682 if (cs->len != PAGE_SIZE)
683 goto out_fallback;
684
685 if (buf->ops->steal(cs->pipe, buf) != 0)
686 goto out_fallback;
687
688 newpage = buf->page;
689
690 if (WARN_ON(!PageUptodate(newpage)))
691 return -EIO;
692
693 ClearPageMappedToDisk(newpage);
694
695 if (fuse_check_page(newpage) != 0)
696 goto out_fallback_unlock;
697
698 mapping = oldpage->mapping;
699 index = oldpage->index;
700
701 /*
702 * This is a new and locked page, it shouldn't be mapped or
703 * have any special flags on it
704 */
705 if (WARN_ON(page_mapped(oldpage)))
706 goto out_fallback_unlock;
707 if (WARN_ON(page_has_private(oldpage)))
708 goto out_fallback_unlock;
709 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
710 goto out_fallback_unlock;
711 if (WARN_ON(PageMlocked(oldpage)))
712 goto out_fallback_unlock;
713
714 remove_from_page_cache(oldpage);
715 page_cache_release(oldpage);
716
717 err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
718 if (err) {
719 printk(KERN_WARNING "fuse_try_move_page: failed to add page");
720 goto out_fallback_unlock;
721 }
722 page_cache_get(newpage);
723
724 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
725 lru_cache_add_file(newpage);
726
727 err = 0;
728 spin_lock(&cs->fc->lock);
729 if (cs->req->aborted)
730 err = -ENOENT;
731 else
732 *pagep = newpage;
733 spin_unlock(&cs->fc->lock);
734
735 if (err) {
736 unlock_page(newpage);
737 page_cache_release(newpage);
738 return err;
739 }
740
741 unlock_page(oldpage);
742 page_cache_release(oldpage);
743 cs->len = 0;
744
745 return 0;
746
747out_fallback_unlock:
748 unlock_page(newpage);
749out_fallback:
750 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
751 cs->buf = cs->mapaddr + buf->offset;
752
753 err = lock_request(cs->fc, cs->req);
754 if (err)
755 return err;
756
757 return 1;
758}
759
760static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
761 unsigned offset, unsigned count)
762{
763 struct pipe_buffer *buf;
764
765 if (cs->nr_segs == cs->pipe->buffers)
766 return -EIO;
767
768 unlock_request(cs->fc, cs->req);
769 fuse_copy_finish(cs);
770
771 buf = cs->pipebufs;
772 page_cache_get(page);
773 buf->page = page;
774 buf->offset = offset;
775 buf->len = count;
776
777 cs->pipebufs++;
778 cs->nr_segs++;
779 cs->len = 0;
780
781 return 0;
782}
783
589/* 784/*
590 * Copy a page in the request to/from the userspace buffer. Must be 785 * Copy a page in the request to/from the userspace buffer. Must be
591 * done atomically 786 * done atomically
592 */ 787 */
593static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, 788static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
594 unsigned offset, unsigned count, int zeroing) 789 unsigned offset, unsigned count, int zeroing)
595{ 790{
791 int err;
792 struct page *page = *pagep;
793
596 if (page && zeroing && count < PAGE_SIZE) { 794 if (page && zeroing && count < PAGE_SIZE) {
597 void *mapaddr = kmap_atomic(page, KM_USER1); 795 void *mapaddr = kmap_atomic(page, KM_USER1);
598 memset(mapaddr, 0, PAGE_SIZE); 796 memset(mapaddr, 0, PAGE_SIZE);
599 kunmap_atomic(mapaddr, KM_USER1); 797 kunmap_atomic(mapaddr, KM_USER1);
600 } 798 }
601 while (count) { 799 while (count) {
602 if (!cs->len) { 800 if (cs->write && cs->pipebufs && page) {
603 int err = fuse_copy_fill(cs); 801 return fuse_ref_page(cs, page, offset, count);
604 if (err) 802 } else if (!cs->len) {
605 return err; 803 if (cs->move_pages && page &&
804 offset == 0 && count == PAGE_SIZE) {
805 err = fuse_try_move_page(cs, pagep);
806 if (err <= 0)
807 return err;
808 } else {
809 err = fuse_copy_fill(cs);
810 if (err)
811 return err;
812 }
606 } 813 }
607 if (page) { 814 if (page) {
608 void *mapaddr = kmap_atomic(page, KM_USER1); 815 void *mapaddr = kmap_atomic(page, KM_USER1);
@@ -627,8 +834,10 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
627 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); 834 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
628 835
629 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { 836 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
630 struct page *page = req->pages[i]; 837 int err;
631 int err = fuse_copy_page(cs, page, offset, count, zeroing); 838
839 err = fuse_copy_page(cs, &req->pages[i], offset, count,
840 zeroing);
632 if (err) 841 if (err)
633 return err; 842 return err;
634 843
@@ -705,11 +914,10 @@ __acquires(&fc->lock)
705 * 914 *
706 * Called with fc->lock held, releases it 915 * Called with fc->lock held, releases it
707 */ 916 */
708static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, 917static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
709 const struct iovec *iov, unsigned long nr_segs) 918 size_t nbytes, struct fuse_req *req)
710__releases(&fc->lock) 919__releases(&fc->lock)
711{ 920{
712 struct fuse_copy_state cs;
713 struct fuse_in_header ih; 921 struct fuse_in_header ih;
714 struct fuse_interrupt_in arg; 922 struct fuse_interrupt_in arg;
715 unsigned reqsize = sizeof(ih) + sizeof(arg); 923 unsigned reqsize = sizeof(ih) + sizeof(arg);
@@ -725,14 +933,13 @@ __releases(&fc->lock)
725 arg.unique = req->in.h.unique; 933 arg.unique = req->in.h.unique;
726 934
727 spin_unlock(&fc->lock); 935 spin_unlock(&fc->lock);
728 if (iov_length(iov, nr_segs) < reqsize) 936 if (nbytes < reqsize)
729 return -EINVAL; 937 return -EINVAL;
730 938
731 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); 939 err = fuse_copy_one(cs, &ih, sizeof(ih));
732 err = fuse_copy_one(&cs, &ih, sizeof(ih));
733 if (!err) 940 if (!err)
734 err = fuse_copy_one(&cs, &arg, sizeof(arg)); 941 err = fuse_copy_one(cs, &arg, sizeof(arg));
735 fuse_copy_finish(&cs); 942 fuse_copy_finish(cs);
736 943
737 return err ? err : reqsize; 944 return err ? err : reqsize;
738} 945}
@@ -746,18 +953,13 @@ __releases(&fc->lock)
746 * request_end(). Otherwise add it to the processing list, and set 953 * request_end(). Otherwise add it to the processing list, and set
747 * the 'sent' flag. 954 * the 'sent' flag.
748 */ 955 */
749static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, 956static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
750 unsigned long nr_segs, loff_t pos) 957 struct fuse_copy_state *cs, size_t nbytes)
751{ 958{
752 int err; 959 int err;
753 struct fuse_req *req; 960 struct fuse_req *req;
754 struct fuse_in *in; 961 struct fuse_in *in;
755 struct fuse_copy_state cs;
756 unsigned reqsize; 962 unsigned reqsize;
757 struct file *file = iocb->ki_filp;
758 struct fuse_conn *fc = fuse_get_conn(file);
759 if (!fc)
760 return -EPERM;
761 963
762 restart: 964 restart:
763 spin_lock(&fc->lock); 965 spin_lock(&fc->lock);
@@ -777,7 +979,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
777 if (!list_empty(&fc->interrupts)) { 979 if (!list_empty(&fc->interrupts)) {
778 req = list_entry(fc->interrupts.next, struct fuse_req, 980 req = list_entry(fc->interrupts.next, struct fuse_req,
779 intr_entry); 981 intr_entry);
780 return fuse_read_interrupt(fc, req, iov, nr_segs); 982 return fuse_read_interrupt(fc, cs, nbytes, req);
781 } 983 }
782 984
783 req = list_entry(fc->pending.next, struct fuse_req, list); 985 req = list_entry(fc->pending.next, struct fuse_req, list);
@@ -787,7 +989,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
787 in = &req->in; 989 in = &req->in;
788 reqsize = in->h.len; 990 reqsize = in->h.len;
789 /* If request is too large, reply with an error and restart the read */ 991 /* If request is too large, reply with an error and restart the read */
790 if (iov_length(iov, nr_segs) < reqsize) { 992 if (nbytes < reqsize) {
791 req->out.h.error = -EIO; 993 req->out.h.error = -EIO;
792 /* SETXATTR is special, since it may contain too large data */ 994 /* SETXATTR is special, since it may contain too large data */
793 if (in->h.opcode == FUSE_SETXATTR) 995 if (in->h.opcode == FUSE_SETXATTR)
@@ -796,12 +998,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
796 goto restart; 998 goto restart;
797 } 999 }
798 spin_unlock(&fc->lock); 1000 spin_unlock(&fc->lock);
799 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); 1001 cs->req = req;
800 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 1002 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
801 if (!err) 1003 if (!err)
802 err = fuse_copy_args(&cs, in->numargs, in->argpages, 1004 err = fuse_copy_args(cs, in->numargs, in->argpages,
803 (struct fuse_arg *) in->args, 0); 1005 (struct fuse_arg *) in->args, 0);
804 fuse_copy_finish(&cs); 1006 fuse_copy_finish(cs);
805 spin_lock(&fc->lock); 1007 spin_lock(&fc->lock);
806 req->locked = 0; 1008 req->locked = 0;
807 if (req->aborted) { 1009 if (req->aborted) {
@@ -829,6 +1031,110 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
829 return err; 1031 return err;
830} 1032}
831 1033
1034static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1035 unsigned long nr_segs, loff_t pos)
1036{
1037 struct fuse_copy_state cs;
1038 struct file *file = iocb->ki_filp;
1039 struct fuse_conn *fc = fuse_get_conn(file);
1040 if (!fc)
1041 return -EPERM;
1042
1043 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1044
1045 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1046}
1047
1048static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1049 struct pipe_buffer *buf)
1050{
1051 return 1;
1052}
1053
1054static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1055 .can_merge = 0,
1056 .map = generic_pipe_buf_map,
1057 .unmap = generic_pipe_buf_unmap,
1058 .confirm = generic_pipe_buf_confirm,
1059 .release = generic_pipe_buf_release,
1060 .steal = fuse_dev_pipe_buf_steal,
1061 .get = generic_pipe_buf_get,
1062};
1063
1064static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1065 struct pipe_inode_info *pipe,
1066 size_t len, unsigned int flags)
1067{
1068 int ret;
1069 int page_nr = 0;
1070 int do_wakeup = 0;
1071 struct pipe_buffer *bufs;
1072 struct fuse_copy_state cs;
1073 struct fuse_conn *fc = fuse_get_conn(in);
1074 if (!fc)
1075 return -EPERM;
1076
1077 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1078 if (!bufs)
1079 return -ENOMEM;
1080
1081 fuse_copy_init(&cs, fc, 1, NULL, 0);
1082 cs.pipebufs = bufs;
1083 cs.pipe = pipe;
1084 ret = fuse_dev_do_read(fc, in, &cs, len);
1085 if (ret < 0)
1086 goto out;
1087
1088 ret = 0;
1089 pipe_lock(pipe);
1090
1091 if (!pipe->readers) {
1092 send_sig(SIGPIPE, current, 0);
1093 if (!ret)
1094 ret = -EPIPE;
1095 goto out_unlock;
1096 }
1097
1098 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1099 ret = -EIO;
1100 goto out_unlock;
1101 }
1102
1103 while (page_nr < cs.nr_segs) {
1104 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1105 struct pipe_buffer *buf = pipe->bufs + newbuf;
1106
1107 buf->page = bufs[page_nr].page;
1108 buf->offset = bufs[page_nr].offset;
1109 buf->len = bufs[page_nr].len;
1110 buf->ops = &fuse_dev_pipe_buf_ops;
1111
1112 pipe->nrbufs++;
1113 page_nr++;
1114 ret += buf->len;
1115
1116 if (pipe->inode)
1117 do_wakeup = 1;
1118 }
1119
1120out_unlock:
1121 pipe_unlock(pipe);
1122
1123 if (do_wakeup) {
1124 smp_mb();
1125 if (waitqueue_active(&pipe->wait))
1126 wake_up_interruptible(&pipe->wait);
1127 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1128 }
1129
1130out:
1131 for (; page_nr < cs.nr_segs; page_nr++)
1132 page_cache_release(bufs[page_nr].page);
1133
1134 kfree(bufs);
1135 return ret;
1136}
1137
832static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1138static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
833 struct fuse_copy_state *cs) 1139 struct fuse_copy_state *cs)
834{ 1140{
@@ -988,23 +1294,17 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
988 * it from the list and copy the rest of the buffer to the request. 1294 * it from the list and copy the rest of the buffer to the request.
989 * The request is finished by calling request_end() 1295 * The request is finished by calling request_end()
990 */ 1296 */
991static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, 1297static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
992 unsigned long nr_segs, loff_t pos) 1298 struct fuse_copy_state *cs, size_t nbytes)
993{ 1299{
994 int err; 1300 int err;
995 size_t nbytes = iov_length(iov, nr_segs);
996 struct fuse_req *req; 1301 struct fuse_req *req;
997 struct fuse_out_header oh; 1302 struct fuse_out_header oh;
998 struct fuse_copy_state cs;
999 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1000 if (!fc)
1001 return -EPERM;
1002 1303
1003 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
1004 if (nbytes < sizeof(struct fuse_out_header)) 1304 if (nbytes < sizeof(struct fuse_out_header))
1005 return -EINVAL; 1305 return -EINVAL;
1006 1306
1007 err = fuse_copy_one(&cs, &oh, sizeof(oh)); 1307 err = fuse_copy_one(cs, &oh, sizeof(oh));
1008 if (err) 1308 if (err)
1009 goto err_finish; 1309 goto err_finish;
1010 1310
@@ -1017,7 +1317,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1017 * and error contains notification code. 1317 * and error contains notification code.
1018 */ 1318 */
1019 if (!oh.unique) { 1319 if (!oh.unique) {
1020 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); 1320 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1021 return err ? err : nbytes; 1321 return err ? err : nbytes;
1022 } 1322 }
1023 1323
@@ -1036,7 +1336,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1036 1336
1037 if (req->aborted) { 1337 if (req->aborted) {
1038 spin_unlock(&fc->lock); 1338 spin_unlock(&fc->lock);
1039 fuse_copy_finish(&cs); 1339 fuse_copy_finish(cs);
1040 spin_lock(&fc->lock); 1340 spin_lock(&fc->lock);
1041 request_end(fc, req); 1341 request_end(fc, req);
1042 return -ENOENT; 1342 return -ENOENT;
@@ -1053,7 +1353,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1053 queue_interrupt(fc, req); 1353 queue_interrupt(fc, req);
1054 1354
1055 spin_unlock(&fc->lock); 1355 spin_unlock(&fc->lock);
1056 fuse_copy_finish(&cs); 1356 fuse_copy_finish(cs);
1057 return nbytes; 1357 return nbytes;
1058 } 1358 }
1059 1359
@@ -1061,11 +1361,13 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1061 list_move(&req->list, &fc->io); 1361 list_move(&req->list, &fc->io);
1062 req->out.h = oh; 1362 req->out.h = oh;
1063 req->locked = 1; 1363 req->locked = 1;
1064 cs.req = req; 1364 cs->req = req;
1365 if (!req->out.page_replace)
1366 cs->move_pages = 0;
1065 spin_unlock(&fc->lock); 1367 spin_unlock(&fc->lock);
1066 1368
1067 err = copy_out_args(&cs, &req->out, nbytes); 1369 err = copy_out_args(cs, &req->out, nbytes);
1068 fuse_copy_finish(&cs); 1370 fuse_copy_finish(cs);
1069 1371
1070 spin_lock(&fc->lock); 1372 spin_lock(&fc->lock);
1071 req->locked = 0; 1373 req->locked = 0;
@@ -1081,10 +1383,101 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1081 err_unlock: 1383 err_unlock:
1082 spin_unlock(&fc->lock); 1384 spin_unlock(&fc->lock);
1083 err_finish: 1385 err_finish:
1084 fuse_copy_finish(&cs); 1386 fuse_copy_finish(cs);
1085 return err; 1387 return err;
1086} 1388}
1087 1389
1390static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1391 unsigned long nr_segs, loff_t pos)
1392{
1393 struct fuse_copy_state cs;
1394 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1395 if (!fc)
1396 return -EPERM;
1397
1398 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1399
1400 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1401}
1402
1403static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1404 struct file *out, loff_t *ppos,
1405 size_t len, unsigned int flags)
1406{
1407 unsigned nbuf;
1408 unsigned idx;
1409 struct pipe_buffer *bufs;
1410 struct fuse_copy_state cs;
1411 struct fuse_conn *fc;
1412 size_t rem;
1413 ssize_t ret;
1414
1415 fc = fuse_get_conn(out);
1416 if (!fc)
1417 return -EPERM;
1418
1419 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1420 if (!bufs)
1421 return -ENOMEM;
1422
1423 pipe_lock(pipe);
1424 nbuf = 0;
1425 rem = 0;
1426 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1427 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1428
1429 ret = -EINVAL;
1430 if (rem < len) {
1431 pipe_unlock(pipe);
1432 goto out;
1433 }
1434
1435 rem = len;
1436 while (rem) {
1437 struct pipe_buffer *ibuf;
1438 struct pipe_buffer *obuf;
1439
1440 BUG_ON(nbuf >= pipe->buffers);
1441 BUG_ON(!pipe->nrbufs);
1442 ibuf = &pipe->bufs[pipe->curbuf];
1443 obuf = &bufs[nbuf];
1444
1445 if (rem >= ibuf->len) {
1446 *obuf = *ibuf;
1447 ibuf->ops = NULL;
1448 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1449 pipe->nrbufs--;
1450 } else {
1451 ibuf->ops->get(pipe, ibuf);
1452 *obuf = *ibuf;
1453 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1454 obuf->len = rem;
1455 ibuf->offset += obuf->len;
1456 ibuf->len -= obuf->len;
1457 }
1458 nbuf++;
1459 rem -= obuf->len;
1460 }
1461 pipe_unlock(pipe);
1462
1463 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1464 cs.pipebufs = bufs;
1465 cs.pipe = pipe;
1466
1467 if (flags & SPLICE_F_MOVE)
1468 cs.move_pages = 1;
1469
1470 ret = fuse_dev_do_write(fc, &cs, len);
1471
1472 for (idx = 0; idx < nbuf; idx++) {
1473 struct pipe_buffer *buf = &bufs[idx];
1474 buf->ops->release(pipe, buf);
1475 }
1476out:
1477 kfree(bufs);
1478 return ret;
1479}
1480
1088static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 1481static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1089{ 1482{
1090 unsigned mask = POLLOUT | POLLWRNORM; 1483 unsigned mask = POLLOUT | POLLWRNORM;
@@ -1226,8 +1619,10 @@ const struct file_operations fuse_dev_operations = {
1226 .llseek = no_llseek, 1619 .llseek = no_llseek,
1227 .read = do_sync_read, 1620 .read = do_sync_read,
1228 .aio_read = fuse_dev_read, 1621 .aio_read = fuse_dev_read,
1622 .splice_read = fuse_dev_splice_read,
1229 .write = do_sync_write, 1623 .write = do_sync_write,
1230 .aio_write = fuse_dev_write, 1624 .aio_write = fuse_dev_write,
1625 .splice_write = fuse_dev_splice_write,
1231 .poll = fuse_dev_poll, 1626 .poll = fuse_dev_poll,
1232 .release = fuse_dev_release, 1627 .release = fuse_dev_release,
1233 .fasync = fuse_dev_fasync, 1628 .fasync = fuse_dev_fasync,