diff options
author | Vladimir V. Saveliev <vs@namesys.com> | 2006-06-27 05:53:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 20:32:39 -0400 |
commit | 6527c2bdf1f833cc18e8f42bd97973d583e4aa83 (patch) | |
tree | 737055ae276cdfa75e7b3e55a3ebdd1f88105606 /mm/filemap.c | |
parent | 1c0f16e5cdff59f3b132a1b0c0d44a941f8813d2 (diff) |
[PATCH] generic_file_buffered_write(): deadlock on vectored write
generic_file_buffered_write() prefaults in user pages in order to avoid
deadlock on copying from the same page as write goes to.
However, it looks like there is a problem when write is vectored:
fault_in_pages_readable brings in current segment or its part (maxlen).
OTOH, filemap_copy_from_user_iovec is called to copy number of bytes
(bytes) which may exceed current segment, so filemap_copy_from_user_iovec
switches to the next segment which is not brought in yet. Pagefault is
generated. That causes the deadlock if pagefault is for the same page
write goes to: page being written is locked and not uptodate, pagefault
will deadlock trying to lock locked page.
[akpm@osdl.org: somewhat rewritten]
Cc: Neil Brown <neilb@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 9c7334bafda8..d504d6e98886 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2095,14 +2095,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
2095 | do { | 2095 | do { |
2096 | unsigned long index; | 2096 | unsigned long index; |
2097 | unsigned long offset; | 2097 | unsigned long offset; |
2098 | unsigned long maxlen; | ||
2099 | size_t copied; | 2098 | size_t copied; |
2100 | 2099 | ||
2101 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | 2100 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ |
2102 | index = pos >> PAGE_CACHE_SHIFT; | 2101 | index = pos >> PAGE_CACHE_SHIFT; |
2103 | bytes = PAGE_CACHE_SIZE - offset; | 2102 | bytes = PAGE_CACHE_SIZE - offset; |
2104 | if (bytes > count) | 2103 | |
2105 | bytes = count; | 2104 | /* Limit the size of the copy to the caller's write size */ |
2105 | bytes = min(bytes, count); | ||
2106 | |||
2107 | /* | ||
2108 | * Limit the size of the copy to that of the current segment, | ||
2109 | * because fault_in_pages_readable() doesn't know how to walk | ||
2110 | * segments. | ||
2111 | */ | ||
2112 | bytes = min(bytes, cur_iov->iov_len - iov_base); | ||
2106 | 2113 | ||
2107 | /* | 2114 | /* |
2108 | * Bring in the user page that we will copy from _first_. | 2115 | * Bring in the user page that we will copy from _first_. |
@@ -2110,10 +2117,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
2110 | * same page as we're writing to, without it being marked | 2117 | * same page as we're writing to, without it being marked |
2111 | * up-to-date. | 2118 | * up-to-date. |
2112 | */ | 2119 | */ |
2113 | maxlen = cur_iov->iov_len - iov_base; | 2120 | fault_in_pages_readable(buf, bytes); |
2114 | if (maxlen > bytes) | ||
2115 | maxlen = bytes; | ||
2116 | fault_in_pages_readable(buf, maxlen); | ||
2117 | 2121 | ||
2118 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); | 2122 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); |
2119 | if (!page) { | 2123 | if (!page) { |