diff options
author | NeilBrown <neilb@suse.de> | 2007-02-16 04:28:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-16 11:14:01 -0500 |
commit | 29dbb3fc8020f025bc38b262ec494e19fd3eac02 (patch) | |
tree | 579877f8d80e04e0908253b782b8e58c742b3fe6 /mm | |
parent | 3160a711ef754758e7f85ae371cf900252c1a392 (diff) |
[PATCH] knfsd: stop NFSD writes from being broken into lots of little writes to filesystem
When NFSD receives a write request, the data is typically in a number of
1448 byte segments and writev is used to collect them together.
Unfortunately, generic_file_buffered_write passes these to the filesystem
one at a time, so an e.g. 32K over-write becomes a series of partial-page
writes to each page, causing the filesystem to have to pre-read those pages
- wasted effort.
generic_file_buffered_write handles one segment of the vector at a time as
it has to pre-fault in each segment to avoid deadlocks. When writing from
kernel-space (and nfsd does) this is not an issue, so
generic_file_buffered_write does not need to break and iovec from nfsd into
little pieces.
This patch avoids the splitting when get_fs is KERNEL_DS as it is
from NFSd.
This issue was introduced by commit 6527c2bdf1f833cc18e8f42bd97973d583e4aa83
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Norman Weathers <norman.r.weathers@conocophillips.com>
Cc: Vladimir V. Saveliev <vs@namesys.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 00414849a867..d1060b8d3cd6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2079,21 +2079,27 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
2079 | /* Limit the size of the copy to the caller's write size */ | 2079 | /* Limit the size of the copy to the caller's write size */ |
2080 | bytes = min(bytes, count); | 2080 | bytes = min(bytes, count); |
2081 | 2081 | ||
2082 | /* | 2082 | /* We only need to worry about prefaulting when writes are from |
2083 | * Limit the size of the copy to that of the current segment, | 2083 | * user-space. NFSd uses vfs_writev with several non-aligned |
2084 | * because fault_in_pages_readable() doesn't know how to walk | 2084 | * segments in the vector, and limiting to one segment a time is |
2085 | * segments. | 2085 | * a noticeable performance for re-write |
2086 | */ | 2086 | */ |
2087 | bytes = min(bytes, cur_iov->iov_len - iov_base); | 2087 | if (!segment_eq(get_fs(), KERNEL_DS)) { |
2088 | 2088 | /* | |
2089 | /* | 2089 | * Limit the size of the copy to that of the current |
2090 | * Bring in the user page that we will copy from _first_. | 2090 | * segment, because fault_in_pages_readable() doesn't |
2091 | * Otherwise there's a nasty deadlock on copying from the | 2091 | * know how to walk segments. |
2092 | * same page as we're writing to, without it being marked | 2092 | */ |
2093 | * up-to-date. | 2093 | bytes = min(bytes, cur_iov->iov_len - iov_base); |
2094 | */ | ||
2095 | fault_in_pages_readable(buf, bytes); | ||
2096 | 2094 | ||
2095 | /* | ||
2096 | * Bring in the user page that we will copy from | ||
2097 | * _first_. Otherwise there's a nasty deadlock on | ||
2098 | * copying from the same page as we're writing to, | ||
2099 | * without it being marked up-to-date. | ||
2100 | */ | ||
2101 | fault_in_pages_readable(buf, bytes); | ||
2102 | } | ||
2097 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); | 2103 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); |
2098 | if (!page) { | 2104 | if (!page) { |
2099 | status = -ENOMEM; | 2105 | status = -ENOMEM; |