diff options
author | Anton Altaparmakov <aia21@cam.ac.uk> | 2007-10-12 04:37:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-12 12:16:30 -0400 |
commit | bfab36e81611e60573b84eb4e4b4c8d8545b2320 (patch) | |
tree | acd151a4c85459dcd2f6575ceb385090ebaaf984 /fs/ntfs/file.c | |
parent | f26e51f67ae6a75ffc57b96cf5fe096f75e778cb (diff) |
NTFS: Fix a mount time deadlock.
Big thanks go to Mathias Kolehmainen for reporting the bug, providing
debug output and testing the patches I sent him to get it working.
The fix was to stop calling ntfs_attr_set() at mount time as that causes
balance_dirty_pages_ratelimited() to be called which on systems with
little memory actually tries to go and balance the dirty pages which tries
to take the s_umount semaphore but because we are still in fill_super()
across which the VFS holds s_umount for writing this results in a
deadlock.
We now do the dirty work by hand by submitting individual buffers. This
has the annoying "feature" that mounting can take a few seconds if the
journal is large as we have clear it all. One day someone should improve
on this by deferring the journal clearing to a helper kernel thread so it
can be done in the background but I don't have time for this at the moment
and the current solution works fine so I am leaving it like this for now.
Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs/file.c')
-rw-r--r-- | fs/ntfs/file.c | 36 |
1 files changed, 17 insertions, 19 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index ffcc504a1667..c814204d4ea0 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. | 2 | * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2006 Anton Altaparmakov | 4 | * Copyright (c) 2001-2007 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/swap.h> | 26 | #include <linux/swap.h> |
27 | #include <linux/uio.h> | 27 | #include <linux/uio.h> |
28 | #include <linux/writeback.h> | 28 | #include <linux/writeback.h> |
29 | #include <linux/sched.h> | ||
30 | 29 | ||
31 | #include <asm/page.h> | 30 | #include <asm/page.h> |
32 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -362,7 +361,7 @@ static inline void ntfs_fault_in_pages_readable(const char __user *uaddr, | |||
362 | volatile char c; | 361 | volatile char c; |
363 | 362 | ||
364 | /* Set @end to the first byte outside the last page we care about. */ | 363 | /* Set @end to the first byte outside the last page we care about. */ |
365 | end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes); | 364 | end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes); |
366 | 365 | ||
367 | while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end)) | 366 | while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end)) |
368 | ; | 367 | ; |
@@ -532,7 +531,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, | |||
532 | blocksize_bits = vol->sb->s_blocksize_bits; | 531 | blocksize_bits = vol->sb->s_blocksize_bits; |
533 | u = 0; | 532 | u = 0; |
534 | do { | 533 | do { |
535 | struct page *page = pages[u]; | 534 | page = pages[u]; |
535 | BUG_ON(!page); | ||
536 | /* | 536 | /* |
537 | * create_empty_buffers() will create uptodate/dirty buffers if | 537 | * create_empty_buffers() will create uptodate/dirty buffers if |
538 | * the page is uptodate/dirty. | 538 | * the page is uptodate/dirty. |
@@ -1291,7 +1291,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages, | |||
1291 | size_t bytes) | 1291 | size_t bytes) |
1292 | { | 1292 | { |
1293 | struct page **last_page = pages + nr_pages; | 1293 | struct page **last_page = pages + nr_pages; |
1294 | char *kaddr; | 1294 | char *addr; |
1295 | size_t total = 0; | 1295 | size_t total = 0; |
1296 | unsigned len; | 1296 | unsigned len; |
1297 | int left; | 1297 | int left; |
@@ -1300,13 +1300,13 @@ static inline size_t ntfs_copy_from_user(struct page **pages, | |||
1300 | len = PAGE_CACHE_SIZE - ofs; | 1300 | len = PAGE_CACHE_SIZE - ofs; |
1301 | if (len > bytes) | 1301 | if (len > bytes) |
1302 | len = bytes; | 1302 | len = bytes; |
1303 | kaddr = kmap_atomic(*pages, KM_USER0); | 1303 | addr = kmap_atomic(*pages, KM_USER0); |
1304 | left = __copy_from_user_inatomic(kaddr + ofs, buf, len); | 1304 | left = __copy_from_user_inatomic(addr + ofs, buf, len); |
1305 | kunmap_atomic(kaddr, KM_USER0); | 1305 | kunmap_atomic(addr, KM_USER0); |
1306 | if (unlikely(left)) { | 1306 | if (unlikely(left)) { |
1307 | /* Do it the slow way. */ | 1307 | /* Do it the slow way. */ |
1308 | kaddr = kmap(*pages); | 1308 | addr = kmap(*pages); |
1309 | left = __copy_from_user(kaddr + ofs, buf, len); | 1309 | left = __copy_from_user(addr + ofs, buf, len); |
1310 | kunmap(*pages); | 1310 | kunmap(*pages); |
1311 | if (unlikely(left)) | 1311 | if (unlikely(left)) |
1312 | goto err_out; | 1312 | goto err_out; |
@@ -1408,26 +1408,26 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, | |||
1408 | size_t *iov_ofs, size_t bytes) | 1408 | size_t *iov_ofs, size_t bytes) |
1409 | { | 1409 | { |
1410 | struct page **last_page = pages + nr_pages; | 1410 | struct page **last_page = pages + nr_pages; |
1411 | char *kaddr; | 1411 | char *addr; |
1412 | size_t copied, len, total = 0; | 1412 | size_t copied, len, total = 0; |
1413 | 1413 | ||
1414 | do { | 1414 | do { |
1415 | len = PAGE_CACHE_SIZE - ofs; | 1415 | len = PAGE_CACHE_SIZE - ofs; |
1416 | if (len > bytes) | 1416 | if (len > bytes) |
1417 | len = bytes; | 1417 | len = bytes; |
1418 | kaddr = kmap_atomic(*pages, KM_USER0); | 1418 | addr = kmap_atomic(*pages, KM_USER0); |
1419 | copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, | 1419 | copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, |
1420 | *iov, *iov_ofs, len); | 1420 | *iov, *iov_ofs, len); |
1421 | kunmap_atomic(kaddr, KM_USER0); | 1421 | kunmap_atomic(addr, KM_USER0); |
1422 | if (unlikely(copied != len)) { | 1422 | if (unlikely(copied != len)) { |
1423 | /* Do it the slow way. */ | 1423 | /* Do it the slow way. */ |
1424 | kaddr = kmap(*pages); | 1424 | addr = kmap(*pages); |
1425 | copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, | 1425 | copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, |
1426 | *iov, *iov_ofs, len); | 1426 | *iov, *iov_ofs, len); |
1427 | /* | 1427 | /* |
1428 | * Zero the rest of the target like __copy_from_user(). | 1428 | * Zero the rest of the target like __copy_from_user(). |
1429 | */ | 1429 | */ |
1430 | memset(kaddr + ofs + copied, 0, len - copied); | 1430 | memset(addr + ofs + copied, 0, len - copied); |
1431 | kunmap(*pages); | 1431 | kunmap(*pages); |
1432 | if (unlikely(copied != len)) | 1432 | if (unlikely(copied != len)) |
1433 | goto err_out; | 1433 | goto err_out; |
@@ -1735,8 +1735,6 @@ static int ntfs_commit_pages_after_write(struct page **pages, | |||
1735 | read_unlock_irqrestore(&ni->size_lock, flags); | 1735 | read_unlock_irqrestore(&ni->size_lock, flags); |
1736 | BUG_ON(initialized_size != i_size); | 1736 | BUG_ON(initialized_size != i_size); |
1737 | if (end > initialized_size) { | 1737 | if (end > initialized_size) { |
1738 | unsigned long flags; | ||
1739 | |||
1740 | write_lock_irqsave(&ni->size_lock, flags); | 1738 | write_lock_irqsave(&ni->size_lock, flags); |
1741 | ni->initialized_size = end; | 1739 | ni->initialized_size = end; |
1742 | i_size_write(vi, end); | 1740 | i_size_write(vi, end); |