aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-05-06 17:49:04 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:51 -0400
commit6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2 (patch)
tree8bbfe5072279227cc50a941ad4813908082426a1 /fs/ntfs
parent714b8171af9c930a59a0da8f6fe50518e70ab035 (diff)
mm: make read_cache_page synchronous
Ensure pages are uptodate after returning from read_cache_page, which allows us to cut out most of the filesystem-internal PageUptodate calls. I didn't have a great look down the call chains, but this appears to fixes 7 possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in block2mtd. All depending on whether the filler is async and/or can return with a !uptodate page. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs')
-rw-r--r--fs/ntfs/aops.h3
-rw-r--r--fs/ntfs/attrib.c18
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ntfs/super.c30
4 files changed, 8 insertions, 46 deletions
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 9393f4b1e298..caecc58f529c 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -89,9 +89,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
89 struct page *page = read_mapping_page(mapping, index, NULL); 89 struct page *page = read_mapping_page(mapping, index, NULL);
90 90
91 if (!IS_ERR(page)) { 91 if (!IS_ERR(page)) {
92 wait_on_page_locked(page);
93 kmap(page); 92 kmap(page);
94 if (PageUptodate(page) && !PageError(page)) 93 if (!PageError(page))
95 return page; 94 return page;
96 ntfs_unmap_page(page); 95 ntfs_unmap_page(page);
97 return ERR_PTR(-EIO); 96 return ERR_PTR(-EIO);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 7659cc192995..1c08fefe487a 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -2532,14 +2532,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2532 page = read_mapping_page(mapping, idx, NULL); 2532 page = read_mapping_page(mapping, idx, NULL);
2533 if (IS_ERR(page)) { 2533 if (IS_ERR(page)) {
2534 ntfs_error(vol->sb, "Failed to read first partial " 2534 ntfs_error(vol->sb, "Failed to read first partial "
2535 "page (sync error, index 0x%lx).", idx); 2535 "page (error, index 0x%lx).", idx);
2536 return PTR_ERR(page);
2537 }
2538 wait_on_page_locked(page);
2539 if (unlikely(!PageUptodate(page))) {
2540 ntfs_error(vol->sb, "Failed to read first partial page "
2541 "(async error, index 0x%lx).", idx);
2542 page_cache_release(page);
2543 return PTR_ERR(page); 2536 return PTR_ERR(page);
2544 } 2537 }
2545 /* 2538 /*
@@ -2602,14 +2595,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2602 page = read_mapping_page(mapping, idx, NULL); 2595 page = read_mapping_page(mapping, idx, NULL);
2603 if (IS_ERR(page)) { 2596 if (IS_ERR(page)) {
2604 ntfs_error(vol->sb, "Failed to read last partial page " 2597 ntfs_error(vol->sb, "Failed to read last partial page "
2605 "(sync error, index 0x%lx).", idx); 2598 "(error, index 0x%lx).", idx);
2606 return PTR_ERR(page);
2607 }
2608 wait_on_page_locked(page);
2609 if (unlikely(!PageUptodate(page))) {
2610 ntfs_error(vol->sb, "Failed to read last partial page "
2611 "(async error, index 0x%lx).", idx);
2612 page_cache_release(page);
2613 return PTR_ERR(page); 2599 return PTR_ERR(page);
2614 } 2600 }
2615 kaddr = kmap_atomic(page, KM_USER0); 2601 kaddr = kmap_atomic(page, KM_USER0);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index d69c4595ccd0..dbbac5593106 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -236,8 +236,7 @@ do_non_resident_extend:
236 err = PTR_ERR(page); 236 err = PTR_ERR(page);
237 goto init_err_out; 237 goto init_err_out;
238 } 238 }
239 wait_on_page_locked(page); 239 if (unlikely(PageError(page))) {
240 if (unlikely(!PageUptodate(page) || PageError(page))) {
241 page_cache_release(page); 240 page_cache_release(page);
242 err = -EIO; 241 err = -EIO;
243 goto init_err_out; 242 goto init_err_out;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1594c90b7164..2ddde534db0a 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2471,7 +2471,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2471 s64 nr_free = vol->nr_clusters; 2471 s64 nr_free = vol->nr_clusters;
2472 u32 *kaddr; 2472 u32 *kaddr;
2473 struct address_space *mapping = vol->lcnbmp_ino->i_mapping; 2473 struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
2474 filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
2475 struct page *page; 2474 struct page *page;
2476 pgoff_t index, max_index; 2475 pgoff_t index, max_index;
2477 2476
@@ -2494,24 +2493,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2494 * Read the page from page cache, getting it from backing store 2493 * Read the page from page cache, getting it from backing store
2495 * if necessary, and increment the use count. 2494 * if necessary, and increment the use count.
2496 */ 2495 */
2497 page = read_cache_page(mapping, index, (filler_t*)readpage, 2496 page = read_mapping_page(mapping, index, NULL);
2498 NULL);
2499 /* Ignore pages which errored synchronously. */ 2497 /* Ignore pages which errored synchronously. */
2500 if (IS_ERR(page)) { 2498 if (IS_ERR(page)) {
2501 ntfs_debug("Sync read_cache_page() error. Skipping " 2499 ntfs_debug("read_mapping_page() error. Skipping "
2502 "page (index 0x%lx).", index); 2500 "page (index 0x%lx).", index);
2503 nr_free -= PAGE_CACHE_SIZE * 8; 2501 nr_free -= PAGE_CACHE_SIZE * 8;
2504 continue; 2502 continue;
2505 } 2503 }
2506 wait_on_page_locked(page);
2507 /* Ignore pages which errored asynchronously. */
2508 if (!PageUptodate(page)) {
2509 ntfs_debug("Async read_cache_page() error. Skipping "
2510 "page (index 0x%lx).", index);
2511 page_cache_release(page);
2512 nr_free -= PAGE_CACHE_SIZE * 8;
2513 continue;
2514 }
2515 kaddr = (u32*)kmap_atomic(page, KM_USER0); 2504 kaddr = (u32*)kmap_atomic(page, KM_USER0);
2516 /* 2505 /*
2517 * For each 4 bytes, subtract the number of set bits. If this 2506 * For each 4 bytes, subtract the number of set bits. If this
@@ -2562,7 +2551,6 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2562{ 2551{
2563 u32 *kaddr; 2552 u32 *kaddr;
2564 struct address_space *mapping = vol->mftbmp_ino->i_mapping; 2553 struct address_space *mapping = vol->mftbmp_ino->i_mapping;
2565 filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
2566 struct page *page; 2554 struct page *page;
2567 pgoff_t index; 2555 pgoff_t index;
2568 2556
@@ -2576,21 +2564,11 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2576 * Read the page from page cache, getting it from backing store 2564 * Read the page from page cache, getting it from backing store
2577 * if necessary, and increment the use count. 2565 * if necessary, and increment the use count.
2578 */ 2566 */
2579 page = read_cache_page(mapping, index, (filler_t*)readpage, 2567 page = read_mapping_page(mapping, index, NULL);
2580 NULL);
2581 /* Ignore pages which errored synchronously. */ 2568 /* Ignore pages which errored synchronously. */
2582 if (IS_ERR(page)) { 2569 if (IS_ERR(page)) {
2583 ntfs_debug("Sync read_cache_page() error. Skipping " 2570 ntfs_debug("read_mapping_page() error. Skipping "
2584 "page (index 0x%lx).", index);
2585 nr_free -= PAGE_CACHE_SIZE * 8;
2586 continue;
2587 }
2588 wait_on_page_locked(page);
2589 /* Ignore pages which errored asynchronously. */
2590 if (!PageUptodate(page)) {
2591 ntfs_debug("Async read_cache_page() error. Skipping "
2592 "page (index 0x%lx).", index); 2571 "page (index 0x%lx).", index);
2593 page_cache_release(page);
2594 nr_free -= PAGE_CACHE_SIZE * 8; 2572 nr_free -= PAGE_CACHE_SIZE * 8;
2595 continue; 2573 continue;
2596 } 2574 }