aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 04:24:59 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:54 -0400
commit08291429cfa6258c4cd95d8833beb40f828b194e (patch)
tree50a206f0f0e7a5400b44073107ff00517e6f50ac /include/linux
parent4a9e5ef1f4f15205e477817a5cefc34bd3f65f55 (diff)
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a lock on the pagecache page. There are a number of different deadlocks possible if we try to do such a thing: 1. generic_buffered_write 2. lock_page 3. prepare_write 4. unlock_page+vmtruncate 5. copy_from_user 6. mmap_sem(r) 7. handle_mm_fault 8. lock_page (filemap_nopage) 9. commit_write 10. unlock_page a. sys_munmap / sys_mlock / others b. mmap_sem(w) c. make_pages_present d. get_user_pages e. handle_mm_fault f. lock_page (filemap_nopage) 2,8 - recursive deadlock if page is same 2,8;2,8 - ABBA deadlock is page is different 2,6;b,f - ABBA deadlock if page is same The solution is as follows: 1. If we find the destination page is uptodate, continue as normal, but use atomic usercopies which do not take pagefaults and do not zero the uncopied tail of the destination. The destination is already uptodate, so we can commit_write the full length even if there was a partial copy: it does not matter that the tail was not modified, because if it is dirtied and written back to disk it will not cause any problems (uptodate *means* that the destination page is as new or newer than the copy on disk). 1a. The above requires that fault_in_pages_readable correctly returns access information, because atomic usercopies cannot distinguish between non-present pages in a readable mapping, from lack of a readable mapping. 2. If we find the destination page is non uptodate, unlock it (this could be made slightly more optimal), then allocate a temporary page to copy the source data into. Relock the destination page and continue with the copy. However, instead of a usercopy (which might take a fault), copy the data from the pinned temporary page via the kernel address space. (also, rename maxlen to seglen, because it was confusing) This increases the CPU/memory copy cost by almost 50% on the affected workloads. That will be solved by introducing a new set of pagecache write aops in a subsequent patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/pagemap.h11
1 files changed, 9 insertions, 2 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 862fc07dc6c0..8f1e390fd71b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -219,6 +219,9 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
219{ 219{
220 int ret; 220 int ret;
221 221
222 if (unlikely(size == 0))
223 return 0;
224
222 /* 225 /*
223 * Writing zeroes into userspace here is OK, because we know that if 226 * Writing zeroes into userspace here is OK, because we know that if
224 * the zero gets there, we'll be overwriting it. 227 * the zero gets there, we'll be overwriting it.
@@ -238,19 +241,23 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
238 return ret; 241 return ret;
239} 242}
240 243
241static inline void fault_in_pages_readable(const char __user *uaddr, int size) 244static inline int fault_in_pages_readable(const char __user *uaddr, int size)
242{ 245{
243 volatile char c; 246 volatile char c;
244 int ret; 247 int ret;
245 248
249 if (unlikely(size == 0))
250 return 0;
251
246 ret = __get_user(c, uaddr); 252 ret = __get_user(c, uaddr);
247 if (ret == 0) { 253 if (ret == 0) {
248 const char __user *end = uaddr + size - 1; 254 const char __user *end = uaddr + size - 1;
249 255
250 if (((unsigned long)uaddr & PAGE_MASK) != 256 if (((unsigned long)uaddr & PAGE_MASK) !=
251 ((unsigned long)end & PAGE_MASK)) 257 ((unsigned long)end & PAGE_MASK))
252 __get_user(c, end); 258 ret = __get_user(c, end);
253 } 259 }
260 return ret;
254} 261}
255 262
256#endif /* _LINUX_PAGEMAP_H */ 263#endif /* _LINUX_PAGEMAP_H */