diff options
author | Nick Piggin <npiggin@suse.de> | 2008-04-30 03:54:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 11:29:50 -0400 |
commit | ea9b9907b82a09bd1a708004454f7065de77c5b0 (patch) | |
tree | 5a8a4f1e41efac6a2c1292adc3aa4a01f2ffd62f /fs | |
parent | 854512ec358f291bcadd7daea10d6bf3704933de (diff) |
fuse: implement perform_write
Introduce fuse_perform_write. With fusexmp (a passthrough filesystem), large
(1MB) writes into a backing tmpfs filesystem are sped up by almost 4 times
(256MB/s vs 71MB/s).
[mszeredi@suse.cz]:
- split into smaller functions
- testing
- duplicate generic_file_aio_write(), so that there's no need to add a
new ->perform_write() a_op. Comment from hch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/fuse/file.c | 194 |
1 files changed, 193 insertions, 1 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f0f0f278b4ea..c5b5982bf386 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -677,6 +677,198 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, | |||
677 | return res; | 677 | return res; |
678 | } | 678 | } |
679 | 679 | ||
680 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, | ||
681 | struct inode *inode, loff_t pos, | ||
682 | size_t count) | ||
683 | { | ||
684 | size_t res; | ||
685 | unsigned offset; | ||
686 | unsigned i; | ||
687 | |||
688 | for (i = 0; i < req->num_pages; i++) | ||
689 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); | ||
690 | |||
691 | res = fuse_send_write(req, file, inode, pos, count, NULL); | ||
692 | |||
693 | offset = req->page_offset; | ||
694 | count = res; | ||
695 | for (i = 0; i < req->num_pages; i++) { | ||
696 | struct page *page = req->pages[i]; | ||
697 | |||
698 | if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) | ||
699 | SetPageUptodate(page); | ||
700 | |||
701 | if (count > PAGE_CACHE_SIZE - offset) | ||
702 | count -= PAGE_CACHE_SIZE - offset; | ||
703 | else | ||
704 | count = 0; | ||
705 | offset = 0; | ||
706 | |||
707 | unlock_page(page); | ||
708 | page_cache_release(page); | ||
709 | } | ||
710 | |||
711 | return res; | ||
712 | } | ||
713 | |||
714 | static ssize_t fuse_fill_write_pages(struct fuse_req *req, | ||
715 | struct address_space *mapping, | ||
716 | struct iov_iter *ii, loff_t pos) | ||
717 | { | ||
718 | struct fuse_conn *fc = get_fuse_conn(mapping->host); | ||
719 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | ||
720 | size_t count = 0; | ||
721 | int err; | ||
722 | |||
723 | req->page_offset = offset; | ||
724 | |||
725 | do { | ||
726 | size_t tmp; | ||
727 | struct page *page; | ||
728 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | ||
729 | size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, | ||
730 | iov_iter_count(ii)); | ||
731 | |||
732 | bytes = min_t(size_t, bytes, fc->max_write - count); | ||
733 | |||
734 | again: | ||
735 | err = -EFAULT; | ||
736 | if (iov_iter_fault_in_readable(ii, bytes)) | ||
737 | break; | ||
738 | |||
739 | err = -ENOMEM; | ||
740 | page = __grab_cache_page(mapping, index); | ||
741 | if (!page) | ||
742 | break; | ||
743 | |||
744 | pagefault_disable(); | ||
745 | tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); | ||
746 | pagefault_enable(); | ||
747 | flush_dcache_page(page); | ||
748 | |||
749 | if (!tmp) { | ||
750 | unlock_page(page); | ||
751 | page_cache_release(page); | ||
752 | bytes = min(bytes, iov_iter_single_seg_count(ii)); | ||
753 | goto again; | ||
754 | } | ||
755 | |||
756 | err = 0; | ||
757 | req->pages[req->num_pages] = page; | ||
758 | req->num_pages++; | ||
759 | |||
760 | iov_iter_advance(ii, tmp); | ||
761 | count += tmp; | ||
762 | pos += tmp; | ||
763 | offset += tmp; | ||
764 | if (offset == PAGE_CACHE_SIZE) | ||
765 | offset = 0; | ||
766 | |||
767 | } while (iov_iter_count(ii) && count < fc->max_write && | ||
768 | req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); | ||
769 | |||
770 | return count > 0 ? count : err; | ||
771 | } | ||
772 | |||
773 | static ssize_t fuse_perform_write(struct file *file, | ||
774 | struct address_space *mapping, | ||
775 | struct iov_iter *ii, loff_t pos) | ||
776 | { | ||
777 | struct inode *inode = mapping->host; | ||
778 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
779 | int err = 0; | ||
780 | ssize_t res = 0; | ||
781 | |||
782 | if (is_bad_inode(inode)) | ||
783 | return -EIO; | ||
784 | |||
785 | do { | ||
786 | struct fuse_req *req; | ||
787 | ssize_t count; | ||
788 | |||
789 | req = fuse_get_req(fc); | ||
790 | if (IS_ERR(req)) { | ||
791 | err = PTR_ERR(req); | ||
792 | break; | ||
793 | } | ||
794 | |||
795 | count = fuse_fill_write_pages(req, mapping, ii, pos); | ||
796 | if (count <= 0) { | ||
797 | err = count; | ||
798 | } else { | ||
799 | size_t num_written; | ||
800 | |||
801 | num_written = fuse_send_write_pages(req, file, inode, | ||
802 | pos, count); | ||
803 | err = req->out.h.error; | ||
804 | if (!err) { | ||
805 | res += num_written; | ||
806 | pos += num_written; | ||
807 | |||
808 | /* break out of the loop on short write */ | ||
809 | if (num_written != count) | ||
810 | err = -EIO; | ||
811 | } | ||
812 | } | ||
813 | fuse_put_request(fc, req); | ||
814 | } while (!err && iov_iter_count(ii)); | ||
815 | |||
816 | if (res > 0) | ||
817 | fuse_write_update_size(inode, pos); | ||
818 | |||
819 | fuse_invalidate_attr(inode); | ||
820 | |||
821 | return res > 0 ? res : err; | ||
822 | } | ||
823 | |||
824 | static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | ||
825 | unsigned long nr_segs, loff_t pos) | ||
826 | { | ||
827 | struct file *file = iocb->ki_filp; | ||
828 | struct address_space *mapping = file->f_mapping; | ||
829 | size_t count = 0; | ||
830 | ssize_t written = 0; | ||
831 | struct inode *inode = mapping->host; | ||
832 | ssize_t err; | ||
833 | struct iov_iter i; | ||
834 | |||
835 | WARN_ON(iocb->ki_pos != pos); | ||
836 | |||
837 | err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); | ||
838 | if (err) | ||
839 | return err; | ||
840 | |||
841 | mutex_lock(&inode->i_mutex); | ||
842 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
843 | |||
844 | /* We can write back this queue in page reclaim */ | ||
845 | current->backing_dev_info = mapping->backing_dev_info; | ||
846 | |||
847 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | ||
848 | if (err) | ||
849 | goto out; | ||
850 | |||
851 | if (count == 0) | ||
852 | goto out; | ||
853 | |||
854 | err = remove_suid(file->f_path.dentry); | ||
855 | if (err) | ||
856 | goto out; | ||
857 | |||
858 | file_update_time(file); | ||
859 | |||
860 | iov_iter_init(&i, iov, nr_segs, count, 0); | ||
861 | written = fuse_perform_write(file, mapping, &i, pos); | ||
862 | if (written >= 0) | ||
863 | iocb->ki_pos = pos + written; | ||
864 | |||
865 | out: | ||
866 | current->backing_dev_info = NULL; | ||
867 | mutex_unlock(&inode->i_mutex); | ||
868 | |||
869 | return written ? written : err; | ||
870 | } | ||
871 | |||
680 | static void fuse_release_user_pages(struct fuse_req *req, int write) | 872 | static void fuse_release_user_pages(struct fuse_req *req, int write) |
681 | { | 873 | { |
682 | unsigned i; | 874 | unsigned i; |
@@ -1203,7 +1395,7 @@ static const struct file_operations fuse_file_operations = { | |||
1203 | .read = do_sync_read, | 1395 | .read = do_sync_read, |
1204 | .aio_read = fuse_file_aio_read, | 1396 | .aio_read = fuse_file_aio_read, |
1205 | .write = do_sync_write, | 1397 | .write = do_sync_write, |
1206 | .aio_write = generic_file_aio_write, | 1398 | .aio_write = fuse_file_aio_write, |
1207 | .mmap = fuse_file_mmap, | 1399 | .mmap = fuse_file_mmap, |
1208 | .open = fuse_open, | 1400 | .open = fuse_open, |
1209 | .flush = fuse_flush, | 1401 | .flush = fuse_flush, |