aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c528
1 files changed, 462 insertions, 66 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index eb7e9423691f..9424796d6634 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -16,8 +16,12 @@
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/file.h> 17#include <linux/file.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/swap.h>
21#include <linux/splice.h>
19 22
20MODULE_ALIAS_MISCDEV(FUSE_MINOR); 23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24MODULE_ALIAS("devname:fuse");
21 25
22static struct kmem_cache *fuse_req_cachep; 26static struct kmem_cache *fuse_req_cachep;
23 27
@@ -498,6 +502,9 @@ struct fuse_copy_state {
498 int write; 502 int write;
499 struct fuse_req *req; 503 struct fuse_req *req;
500 const struct iovec *iov; 504 const struct iovec *iov;
505 struct pipe_buffer *pipebufs;
506 struct pipe_buffer *currbuf;
507 struct pipe_inode_info *pipe;
501 unsigned long nr_segs; 508 unsigned long nr_segs;
502 unsigned long seglen; 509 unsigned long seglen;
503 unsigned long addr; 510 unsigned long addr;
@@ -505,16 +512,16 @@ struct fuse_copy_state {
505 void *mapaddr; 512 void *mapaddr;
506 void *buf; 513 void *buf;
507 unsigned len; 514 unsigned len;
515 unsigned move_pages:1;
508}; 516};
509 517
510static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, 518static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
511 int write, struct fuse_req *req, 519 int write,
512 const struct iovec *iov, unsigned long nr_segs) 520 const struct iovec *iov, unsigned long nr_segs)
513{ 521{
514 memset(cs, 0, sizeof(*cs)); 522 memset(cs, 0, sizeof(*cs));
515 cs->fc = fc; 523 cs->fc = fc;
516 cs->write = write; 524 cs->write = write;
517 cs->req = req;
518 cs->iov = iov; 525 cs->iov = iov;
519 cs->nr_segs = nr_segs; 526 cs->nr_segs = nr_segs;
520} 527}
@@ -522,7 +529,18 @@ static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
522/* Unmap and put previous page of userspace buffer */ 529/* Unmap and put previous page of userspace buffer */
523static void fuse_copy_finish(struct fuse_copy_state *cs) 530static void fuse_copy_finish(struct fuse_copy_state *cs)
524{ 531{
525 if (cs->mapaddr) { 532 if (cs->currbuf) {
533 struct pipe_buffer *buf = cs->currbuf;
534
535 if (!cs->write) {
536 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
537 } else {
538 kunmap_atomic(cs->mapaddr, KM_USER0);
539 buf->len = PAGE_SIZE - cs->len;
540 }
541 cs->currbuf = NULL;
542 cs->mapaddr = NULL;
543 } else if (cs->mapaddr) {
526 kunmap_atomic(cs->mapaddr, KM_USER0); 544 kunmap_atomic(cs->mapaddr, KM_USER0);
527 if (cs->write) { 545 if (cs->write) {
528 flush_dcache_page(cs->pg); 546 flush_dcache_page(cs->pg);
@@ -544,26 +562,61 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
544 562
545 unlock_request(cs->fc, cs->req); 563 unlock_request(cs->fc, cs->req);
546 fuse_copy_finish(cs); 564 fuse_copy_finish(cs);
547 if (!cs->seglen) { 565 if (cs->pipebufs) {
548 BUG_ON(!cs->nr_segs); 566 struct pipe_buffer *buf = cs->pipebufs;
549 cs->seglen = cs->iov[0].iov_len; 567
550 cs->addr = (unsigned long) cs->iov[0].iov_base; 568 if (!cs->write) {
551 cs->iov++; 569 err = buf->ops->confirm(cs->pipe, buf);
552 cs->nr_segs--; 570 if (err)
571 return err;
572
573 BUG_ON(!cs->nr_segs);
574 cs->currbuf = buf;
575 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
576 cs->len = buf->len;
577 cs->buf = cs->mapaddr + buf->offset;
578 cs->pipebufs++;
579 cs->nr_segs--;
580 } else {
581 struct page *page;
582
583 if (cs->nr_segs == cs->pipe->buffers)
584 return -EIO;
585
586 page = alloc_page(GFP_HIGHUSER);
587 if (!page)
588 return -ENOMEM;
589
590 buf->page = page;
591 buf->offset = 0;
592 buf->len = 0;
593
594 cs->currbuf = buf;
595 cs->mapaddr = kmap_atomic(page, KM_USER0);
596 cs->buf = cs->mapaddr;
597 cs->len = PAGE_SIZE;
598 cs->pipebufs++;
599 cs->nr_segs++;
600 }
601 } else {
602 if (!cs->seglen) {
603 BUG_ON(!cs->nr_segs);
604 cs->seglen = cs->iov[0].iov_len;
605 cs->addr = (unsigned long) cs->iov[0].iov_base;
606 cs->iov++;
607 cs->nr_segs--;
608 }
609 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
610 if (err < 0)
611 return err;
612 BUG_ON(err != 1);
613 offset = cs->addr % PAGE_SIZE;
614 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
615 cs->buf = cs->mapaddr + offset;
616 cs->len = min(PAGE_SIZE - offset, cs->seglen);
617 cs->seglen -= cs->len;
618 cs->addr += cs->len;
553 } 619 }
554 down_read(&current->mm->mmap_sem);
555 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
556 &cs->pg, NULL);
557 up_read(&current->mm->mmap_sem);
558 if (err < 0)
559 return err;
560 BUG_ON(err != 1);
561 offset = cs->addr % PAGE_SIZE;
562 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
563 cs->buf = cs->mapaddr + offset;
564 cs->len = min(PAGE_SIZE - offset, cs->seglen);
565 cs->seglen -= cs->len;
566 cs->addr += cs->len;
567 620
568 return lock_request(cs->fc, cs->req); 621 return lock_request(cs->fc, cs->req);
569} 622}
@@ -585,23 +638,178 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
585 return ncpy; 638 return ncpy;
586} 639}
587 640
641static int fuse_check_page(struct page *page)
642{
643 if (page_mapcount(page) ||
644 page->mapping != NULL ||
645 page_count(page) != 1 ||
646 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
647 ~(1 << PG_locked |
648 1 << PG_referenced |
649 1 << PG_uptodate |
650 1 << PG_lru |
651 1 << PG_active |
652 1 << PG_reclaim))) {
653 printk(KERN_WARNING "fuse: trying to steal weird page\n");
654 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
655 return 1;
656 }
657 return 0;
658}
659
660static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
661{
662 int err;
663 struct page *oldpage = *pagep;
664 struct page *newpage;
665 struct pipe_buffer *buf = cs->pipebufs;
666 struct address_space *mapping;
667 pgoff_t index;
668
669 unlock_request(cs->fc, cs->req);
670 fuse_copy_finish(cs);
671
672 err = buf->ops->confirm(cs->pipe, buf);
673 if (err)
674 return err;
675
676 BUG_ON(!cs->nr_segs);
677 cs->currbuf = buf;
678 cs->len = buf->len;
679 cs->pipebufs++;
680 cs->nr_segs--;
681
682 if (cs->len != PAGE_SIZE)
683 goto out_fallback;
684
685 if (buf->ops->steal(cs->pipe, buf) != 0)
686 goto out_fallback;
687
688 newpage = buf->page;
689
690 if (WARN_ON(!PageUptodate(newpage)))
691 return -EIO;
692
693 ClearPageMappedToDisk(newpage);
694
695 if (fuse_check_page(newpage) != 0)
696 goto out_fallback_unlock;
697
698 mapping = oldpage->mapping;
699 index = oldpage->index;
700
701 /*
702 * This is a new and locked page, it shouldn't be mapped or
703 * have any special flags on it
704 */
705 if (WARN_ON(page_mapped(oldpage)))
706 goto out_fallback_unlock;
707 if (WARN_ON(page_has_private(oldpage)))
708 goto out_fallback_unlock;
709 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
710 goto out_fallback_unlock;
711 if (WARN_ON(PageMlocked(oldpage)))
712 goto out_fallback_unlock;
713
714 remove_from_page_cache(oldpage);
715 page_cache_release(oldpage);
716
717 err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
718 if (err) {
719 printk(KERN_WARNING "fuse_try_move_page: failed to add page");
720 goto out_fallback_unlock;
721 }
722 page_cache_get(newpage);
723
724 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
725 lru_cache_add_file(newpage);
726
727 err = 0;
728 spin_lock(&cs->fc->lock);
729 if (cs->req->aborted)
730 err = -ENOENT;
731 else
732 *pagep = newpage;
733 spin_unlock(&cs->fc->lock);
734
735 if (err) {
736 unlock_page(newpage);
737 page_cache_release(newpage);
738 return err;
739 }
740
741 unlock_page(oldpage);
742 page_cache_release(oldpage);
743 cs->len = 0;
744
745 return 0;
746
747out_fallback_unlock:
748 unlock_page(newpage);
749out_fallback:
750 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
751 cs->buf = cs->mapaddr + buf->offset;
752
753 err = lock_request(cs->fc, cs->req);
754 if (err)
755 return err;
756
757 return 1;
758}
759
760static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
761 unsigned offset, unsigned count)
762{
763 struct pipe_buffer *buf;
764
765 if (cs->nr_segs == cs->pipe->buffers)
766 return -EIO;
767
768 unlock_request(cs->fc, cs->req);
769 fuse_copy_finish(cs);
770
771 buf = cs->pipebufs;
772 page_cache_get(page);
773 buf->page = page;
774 buf->offset = offset;
775 buf->len = count;
776
777 cs->pipebufs++;
778 cs->nr_segs++;
779 cs->len = 0;
780
781 return 0;
782}
783
588/* 784/*
589 * Copy a page in the request to/from the userspace buffer. Must be 785 * Copy a page in the request to/from the userspace buffer. Must be
590 * done atomically 786 * done atomically
591 */ 787 */
592static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, 788static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
593 unsigned offset, unsigned count, int zeroing) 789 unsigned offset, unsigned count, int zeroing)
594{ 790{
791 int err;
792 struct page *page = *pagep;
793
595 if (page && zeroing && count < PAGE_SIZE) { 794 if (page && zeroing && count < PAGE_SIZE) {
596 void *mapaddr = kmap_atomic(page, KM_USER1); 795 void *mapaddr = kmap_atomic(page, KM_USER1);
597 memset(mapaddr, 0, PAGE_SIZE); 796 memset(mapaddr, 0, PAGE_SIZE);
598 kunmap_atomic(mapaddr, KM_USER1); 797 kunmap_atomic(mapaddr, KM_USER1);
599 } 798 }
600 while (count) { 799 while (count) {
601 if (!cs->len) { 800 if (cs->write && cs->pipebufs && page) {
602 int err = fuse_copy_fill(cs); 801 return fuse_ref_page(cs, page, offset, count);
603 if (err) 802 } else if (!cs->len) {
604 return err; 803 if (cs->move_pages && page &&
804 offset == 0 && count == PAGE_SIZE) {
805 err = fuse_try_move_page(cs, pagep);
806 if (err <= 0)
807 return err;
808 } else {
809 err = fuse_copy_fill(cs);
810 if (err)
811 return err;
812 }
605 } 813 }
606 if (page) { 814 if (page) {
607 void *mapaddr = kmap_atomic(page, KM_USER1); 815 void *mapaddr = kmap_atomic(page, KM_USER1);
@@ -626,8 +834,10 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
626 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); 834 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
627 835
628 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { 836 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
629 struct page *page = req->pages[i]; 837 int err;
630 int err = fuse_copy_page(cs, page, offset, count, zeroing); 838
839 err = fuse_copy_page(cs, &req->pages[i], offset, count,
840 zeroing);
631 if (err) 841 if (err)
632 return err; 842 return err;
633 843
@@ -704,11 +914,10 @@ __acquires(&fc->lock)
704 * 914 *
705 * Called with fc->lock held, releases it 915 * Called with fc->lock held, releases it
706 */ 916 */
707static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, 917static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
708 const struct iovec *iov, unsigned long nr_segs) 918 size_t nbytes, struct fuse_req *req)
709__releases(&fc->lock) 919__releases(&fc->lock)
710{ 920{
711 struct fuse_copy_state cs;
712 struct fuse_in_header ih; 921 struct fuse_in_header ih;
713 struct fuse_interrupt_in arg; 922 struct fuse_interrupt_in arg;
714 unsigned reqsize = sizeof(ih) + sizeof(arg); 923 unsigned reqsize = sizeof(ih) + sizeof(arg);
@@ -724,14 +933,13 @@ __releases(&fc->lock)
724 arg.unique = req->in.h.unique; 933 arg.unique = req->in.h.unique;
725 934
726 spin_unlock(&fc->lock); 935 spin_unlock(&fc->lock);
727 if (iov_length(iov, nr_segs) < reqsize) 936 if (nbytes < reqsize)
728 return -EINVAL; 937 return -EINVAL;
729 938
730 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); 939 err = fuse_copy_one(cs, &ih, sizeof(ih));
731 err = fuse_copy_one(&cs, &ih, sizeof(ih));
732 if (!err) 940 if (!err)
733 err = fuse_copy_one(&cs, &arg, sizeof(arg)); 941 err = fuse_copy_one(cs, &arg, sizeof(arg));
734 fuse_copy_finish(&cs); 942 fuse_copy_finish(cs);
735 943
736 return err ? err : reqsize; 944 return err ? err : reqsize;
737} 945}
@@ -745,18 +953,13 @@ __releases(&fc->lock)
745 * request_end(). Otherwise add it to the processing list, and set 953 * request_end(). Otherwise add it to the processing list, and set
746 * the 'sent' flag. 954 * the 'sent' flag.
747 */ 955 */
748static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, 956static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
749 unsigned long nr_segs, loff_t pos) 957 struct fuse_copy_state *cs, size_t nbytes)
750{ 958{
751 int err; 959 int err;
752 struct fuse_req *req; 960 struct fuse_req *req;
753 struct fuse_in *in; 961 struct fuse_in *in;
754 struct fuse_copy_state cs;
755 unsigned reqsize; 962 unsigned reqsize;
756 struct file *file = iocb->ki_filp;
757 struct fuse_conn *fc = fuse_get_conn(file);
758 if (!fc)
759 return -EPERM;
760 963
761 restart: 964 restart:
762 spin_lock(&fc->lock); 965 spin_lock(&fc->lock);
@@ -776,7 +979,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
776 if (!list_empty(&fc->interrupts)) { 979 if (!list_empty(&fc->interrupts)) {
777 req = list_entry(fc->interrupts.next, struct fuse_req, 980 req = list_entry(fc->interrupts.next, struct fuse_req,
778 intr_entry); 981 intr_entry);
779 return fuse_read_interrupt(fc, req, iov, nr_segs); 982 return fuse_read_interrupt(fc, cs, nbytes, req);
780 } 983 }
781 984
782 req = list_entry(fc->pending.next, struct fuse_req, list); 985 req = list_entry(fc->pending.next, struct fuse_req, list);
@@ -786,7 +989,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
786 in = &req->in; 989 in = &req->in;
787 reqsize = in->h.len; 990 reqsize = in->h.len;
788 /* If request is too large, reply with an error and restart the read */ 991 /* If request is too large, reply with an error and restart the read */
789 if (iov_length(iov, nr_segs) < reqsize) { 992 if (nbytes < reqsize) {
790 req->out.h.error = -EIO; 993 req->out.h.error = -EIO;
791 /* SETXATTR is special, since it may contain too large data */ 994 /* SETXATTR is special, since it may contain too large data */
792 if (in->h.opcode == FUSE_SETXATTR) 995 if (in->h.opcode == FUSE_SETXATTR)
@@ -795,12 +998,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
795 goto restart; 998 goto restart;
796 } 999 }
797 spin_unlock(&fc->lock); 1000 spin_unlock(&fc->lock);
798 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); 1001 cs->req = req;
799 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 1002 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
800 if (!err) 1003 if (!err)
801 err = fuse_copy_args(&cs, in->numargs, in->argpages, 1004 err = fuse_copy_args(cs, in->numargs, in->argpages,
802 (struct fuse_arg *) in->args, 0); 1005 (struct fuse_arg *) in->args, 0);
803 fuse_copy_finish(&cs); 1006 fuse_copy_finish(cs);
804 spin_lock(&fc->lock); 1007 spin_lock(&fc->lock);
805 req->locked = 0; 1008 req->locked = 0;
806 if (req->aborted) { 1009 if (req->aborted) {
@@ -828,6 +1031,110 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
828 return err; 1031 return err;
829} 1032}
830 1033
1034static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1035 unsigned long nr_segs, loff_t pos)
1036{
1037 struct fuse_copy_state cs;
1038 struct file *file = iocb->ki_filp;
1039 struct fuse_conn *fc = fuse_get_conn(file);
1040 if (!fc)
1041 return -EPERM;
1042
1043 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1044
1045 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1046}
1047
1048static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1049 struct pipe_buffer *buf)
1050{
1051 return 1;
1052}
1053
1054static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1055 .can_merge = 0,
1056 .map = generic_pipe_buf_map,
1057 .unmap = generic_pipe_buf_unmap,
1058 .confirm = generic_pipe_buf_confirm,
1059 .release = generic_pipe_buf_release,
1060 .steal = fuse_dev_pipe_buf_steal,
1061 .get = generic_pipe_buf_get,
1062};
1063
1064static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1065 struct pipe_inode_info *pipe,
1066 size_t len, unsigned int flags)
1067{
1068 int ret;
1069 int page_nr = 0;
1070 int do_wakeup = 0;
1071 struct pipe_buffer *bufs;
1072 struct fuse_copy_state cs;
1073 struct fuse_conn *fc = fuse_get_conn(in);
1074 if (!fc)
1075 return -EPERM;
1076
1077 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1078 if (!bufs)
1079 return -ENOMEM;
1080
1081 fuse_copy_init(&cs, fc, 1, NULL, 0);
1082 cs.pipebufs = bufs;
1083 cs.pipe = pipe;
1084 ret = fuse_dev_do_read(fc, in, &cs, len);
1085 if (ret < 0)
1086 goto out;
1087
1088 ret = 0;
1089 pipe_lock(pipe);
1090
1091 if (!pipe->readers) {
1092 send_sig(SIGPIPE, current, 0);
1093 if (!ret)
1094 ret = -EPIPE;
1095 goto out_unlock;
1096 }
1097
1098 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1099 ret = -EIO;
1100 goto out_unlock;
1101 }
1102
1103 while (page_nr < cs.nr_segs) {
1104 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1105 struct pipe_buffer *buf = pipe->bufs + newbuf;
1106
1107 buf->page = bufs[page_nr].page;
1108 buf->offset = bufs[page_nr].offset;
1109 buf->len = bufs[page_nr].len;
1110 buf->ops = &fuse_dev_pipe_buf_ops;
1111
1112 pipe->nrbufs++;
1113 page_nr++;
1114 ret += buf->len;
1115
1116 if (pipe->inode)
1117 do_wakeup = 1;
1118 }
1119
1120out_unlock:
1121 pipe_unlock(pipe);
1122
1123 if (do_wakeup) {
1124 smp_mb();
1125 if (waitqueue_active(&pipe->wait))
1126 wake_up_interruptible(&pipe->wait);
1127 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1128 }
1129
1130out:
1131 for (; page_nr < cs.nr_segs; page_nr++)
1132 page_cache_release(bufs[page_nr].page);
1133
1134 kfree(bufs);
1135 return ret;
1136}
1137
831static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1138static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
832 struct fuse_copy_state *cs) 1139 struct fuse_copy_state *cs)
833{ 1140{
@@ -987,23 +1294,17 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
987 * it from the list and copy the rest of the buffer to the request. 1294 * it from the list and copy the rest of the buffer to the request.
988 * The request is finished by calling request_end() 1295 * The request is finished by calling request_end()
989 */ 1296 */
990static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, 1297static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
991 unsigned long nr_segs, loff_t pos) 1298 struct fuse_copy_state *cs, size_t nbytes)
992{ 1299{
993 int err; 1300 int err;
994 size_t nbytes = iov_length(iov, nr_segs);
995 struct fuse_req *req; 1301 struct fuse_req *req;
996 struct fuse_out_header oh; 1302 struct fuse_out_header oh;
997 struct fuse_copy_state cs;
998 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
999 if (!fc)
1000 return -EPERM;
1001 1303
1002 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
1003 if (nbytes < sizeof(struct fuse_out_header)) 1304 if (nbytes < sizeof(struct fuse_out_header))
1004 return -EINVAL; 1305 return -EINVAL;
1005 1306
1006 err = fuse_copy_one(&cs, &oh, sizeof(oh)); 1307 err = fuse_copy_one(cs, &oh, sizeof(oh));
1007 if (err) 1308 if (err)
1008 goto err_finish; 1309 goto err_finish;
1009 1310
@@ -1016,7 +1317,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1016 * and error contains notification code. 1317 * and error contains notification code.
1017 */ 1318 */
1018 if (!oh.unique) { 1319 if (!oh.unique) {
1019 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); 1320 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1020 return err ? err : nbytes; 1321 return err ? err : nbytes;
1021 } 1322 }
1022 1323
@@ -1035,7 +1336,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1035 1336
1036 if (req->aborted) { 1337 if (req->aborted) {
1037 spin_unlock(&fc->lock); 1338 spin_unlock(&fc->lock);
1038 fuse_copy_finish(&cs); 1339 fuse_copy_finish(cs);
1039 spin_lock(&fc->lock); 1340 spin_lock(&fc->lock);
1040 request_end(fc, req); 1341 request_end(fc, req);
1041 return -ENOENT; 1342 return -ENOENT;
@@ -1052,7 +1353,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1052 queue_interrupt(fc, req); 1353 queue_interrupt(fc, req);
1053 1354
1054 spin_unlock(&fc->lock); 1355 spin_unlock(&fc->lock);
1055 fuse_copy_finish(&cs); 1356 fuse_copy_finish(cs);
1056 return nbytes; 1357 return nbytes;
1057 } 1358 }
1058 1359
@@ -1060,11 +1361,13 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1060 list_move(&req->list, &fc->io); 1361 list_move(&req->list, &fc->io);
1061 req->out.h = oh; 1362 req->out.h = oh;
1062 req->locked = 1; 1363 req->locked = 1;
1063 cs.req = req; 1364 cs->req = req;
1365 if (!req->out.page_replace)
1366 cs->move_pages = 0;
1064 spin_unlock(&fc->lock); 1367 spin_unlock(&fc->lock);
1065 1368
1066 err = copy_out_args(&cs, &req->out, nbytes); 1369 err = copy_out_args(cs, &req->out, nbytes);
1067 fuse_copy_finish(&cs); 1370 fuse_copy_finish(cs);
1068 1371
1069 spin_lock(&fc->lock); 1372 spin_lock(&fc->lock);
1070 req->locked = 0; 1373 req->locked = 0;
@@ -1080,10 +1383,101 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1080 err_unlock: 1383 err_unlock:
1081 spin_unlock(&fc->lock); 1384 spin_unlock(&fc->lock);
1082 err_finish: 1385 err_finish:
1083 fuse_copy_finish(&cs); 1386 fuse_copy_finish(cs);
1084 return err; 1387 return err;
1085} 1388}
1086 1389
1390static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1391 unsigned long nr_segs, loff_t pos)
1392{
1393 struct fuse_copy_state cs;
1394 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1395 if (!fc)
1396 return -EPERM;
1397
1398 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1399
1400 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1401}
1402
1403static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1404 struct file *out, loff_t *ppos,
1405 size_t len, unsigned int flags)
1406{
1407 unsigned nbuf;
1408 unsigned idx;
1409 struct pipe_buffer *bufs;
1410 struct fuse_copy_state cs;
1411 struct fuse_conn *fc;
1412 size_t rem;
1413 ssize_t ret;
1414
1415 fc = fuse_get_conn(out);
1416 if (!fc)
1417 return -EPERM;
1418
1419 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1420 if (!bufs)
1421 return -ENOMEM;
1422
1423 pipe_lock(pipe);
1424 nbuf = 0;
1425 rem = 0;
1426 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1427 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1428
1429 ret = -EINVAL;
1430 if (rem < len) {
1431 pipe_unlock(pipe);
1432 goto out;
1433 }
1434
1435 rem = len;
1436 while (rem) {
1437 struct pipe_buffer *ibuf;
1438 struct pipe_buffer *obuf;
1439
1440 BUG_ON(nbuf >= pipe->buffers);
1441 BUG_ON(!pipe->nrbufs);
1442 ibuf = &pipe->bufs[pipe->curbuf];
1443 obuf = &bufs[nbuf];
1444
1445 if (rem >= ibuf->len) {
1446 *obuf = *ibuf;
1447 ibuf->ops = NULL;
1448 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1449 pipe->nrbufs--;
1450 } else {
1451 ibuf->ops->get(pipe, ibuf);
1452 *obuf = *ibuf;
1453 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1454 obuf->len = rem;
1455 ibuf->offset += obuf->len;
1456 ibuf->len -= obuf->len;
1457 }
1458 nbuf++;
1459 rem -= obuf->len;
1460 }
1461 pipe_unlock(pipe);
1462
1463 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1464 cs.pipebufs = bufs;
1465 cs.pipe = pipe;
1466
1467 if (flags & SPLICE_F_MOVE)
1468 cs.move_pages = 1;
1469
1470 ret = fuse_dev_do_write(fc, &cs, len);
1471
1472 for (idx = 0; idx < nbuf; idx++) {
1473 struct pipe_buffer *buf = &bufs[idx];
1474 buf->ops->release(pipe, buf);
1475 }
1476out:
1477 kfree(bufs);
1478 return ret;
1479}
1480
1087static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 1481static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1088{ 1482{
1089 unsigned mask = POLLOUT | POLLWRNORM; 1483 unsigned mask = POLLOUT | POLLWRNORM;
@@ -1225,8 +1619,10 @@ const struct file_operations fuse_dev_operations = {
1225 .llseek = no_llseek, 1619 .llseek = no_llseek,
1226 .read = do_sync_read, 1620 .read = do_sync_read,
1227 .aio_read = fuse_dev_read, 1621 .aio_read = fuse_dev_read,
1622 .splice_read = fuse_dev_splice_read,
1228 .write = do_sync_write, 1623 .write = do_sync_write,
1229 .aio_write = fuse_dev_write, 1624 .aio_write = fuse_dev_write,
1625 .splice_write = fuse_dev_splice_write,
1230 .poll = fuse_dev_poll, 1626 .poll = fuse_dev_poll,
1231 .release = fuse_dev_release, 1627 .release = fuse_dev_release,
1232 .fasync = fuse_dev_fasync, 1628 .fasync = fuse_dev_fasync,