aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 04:24:53 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:54 -0400
commit41cb8ac025dbbf6782eae10d231e7e2336ad3724 (patch)
tree0070e26ee574338a4a3f56f574e1c126a4357c8e /mm/filemap.c
parent902aaed0d983dfd459fcb2b678608d4584782200 (diff)
mm: revert KERNEL_DS buffered write optimisation
Revert the patch from Neil Brown to optimise NFSD writev handling. Cc: Neil Brown <neilb@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c32
1 files changed, 13 insertions, 19 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7989c44cb293..c504db18ac26 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1874,27 +1874,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1874 /* Limit the size of the copy to the caller's write size */ 1874 /* Limit the size of the copy to the caller's write size */
1875 bytes = min(bytes, count); 1875 bytes = min(bytes, count);
1876 1876
1877 /* We only need to worry about prefaulting when writes are from 1877 /*
1878 * user-space. NFSd uses vfs_writev with several non-aligned 1878 * Limit the size of the copy to that of the current segment,
1879 * segments in the vector, and limiting to one segment a time is 1879 * because fault_in_pages_readable() doesn't know how to walk
1880 * a noticeable performance for re-write 1880 * segments.
1881 */ 1881 */
1882 if (!segment_eq(get_fs(), KERNEL_DS)) { 1882 bytes = min(bytes, cur_iov->iov_len - iov_base);
1883 /* 1883
1884 * Limit the size of the copy to that of the current 1884 /*
1885 * segment, because fault_in_pages_readable() doesn't 1885 * Bring in the user page that we will copy from _first_.
1886 * know how to walk segments. 1886 * Otherwise there's a nasty deadlock on copying from the
1887 */ 1887 * same page as we're writing to, without it being marked
1888 bytes = min(bytes, cur_iov->iov_len - iov_base); 1888 * up-to-date.
1889 */
1890 fault_in_pages_readable(buf, bytes);
1889 1891
1890 /*
1891 * Bring in the user page that we will copy from
1892 * _first_. Otherwise there's a nasty deadlock on
1893 * copying from the same page as we're writing to,
1894 * without it being marked up-to-date.
1895 */
1896 fault_in_pages_readable(buf, bytes);
1897 }
1898 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); 1892 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
1899 if (!page) { 1893 if (!page) {
1900 status = -ENOMEM; 1894 status = -ENOMEM;