diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 149 |
1 files changed, 107 insertions, 42 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index c9c266db0624..d54812b198e9 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -275,6 +275,7 @@ void invalidate_bdev(struct block_device *bdev) | |||
275 | return; | 275 | return; |
276 | 276 | ||
277 | invalidate_bh_lrus(); | 277 | invalidate_bh_lrus(); |
278 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ | ||
278 | invalidate_mapping_pages(mapping, 0, -1); | 279 | invalidate_mapping_pages(mapping, 0, -1); |
279 | } | 280 | } |
280 | EXPORT_SYMBOL(invalidate_bdev); | 281 | EXPORT_SYMBOL(invalidate_bdev); |
@@ -560,26 +561,17 @@ repeat: | |||
560 | return err; | 561 | return err; |
561 | } | 562 | } |
562 | 563 | ||
563 | static void do_thaw_all(struct work_struct *work) | 564 | static void do_thaw_one(struct super_block *sb, void *unused) |
564 | { | 565 | { |
565 | struct super_block *sb; | ||
566 | char b[BDEVNAME_SIZE]; | 566 | char b[BDEVNAME_SIZE]; |
567 | while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) | ||
568 | printk(KERN_WARNING "Emergency Thaw on %s\n", | ||
569 | bdevname(sb->s_bdev, b)); | ||
570 | } | ||
567 | 571 | ||
568 | spin_lock(&sb_lock); | 572 | static void do_thaw_all(struct work_struct *work) |
569 | restart: | 573 | { |
570 | list_for_each_entry(sb, &super_blocks, s_list) { | 574 | iterate_supers(do_thaw_one, NULL); |
571 | sb->s_count++; | ||
572 | spin_unlock(&sb_lock); | ||
573 | down_read(&sb->s_umount); | ||
574 | while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) | ||
575 | printk(KERN_WARNING "Emergency Thaw on %s\n", | ||
576 | bdevname(sb->s_bdev, b)); | ||
577 | up_read(&sb->s_umount); | ||
578 | spin_lock(&sb_lock); | ||
579 | if (__put_super_and_need_restart(sb)) | ||
580 | goto restart; | ||
581 | } | ||
582 | spin_unlock(&sb_lock); | ||
583 | kfree(work); | 575 | kfree(work); |
584 | printk(KERN_WARNING "Emergency Thaw complete\n"); | 576 | printk(KERN_WARNING "Emergency Thaw complete\n"); |
585 | } | 577 | } |
@@ -1957,14 +1949,11 @@ static int __block_commit_write(struct inode *inode, struct page *page, | |||
1957 | } | 1949 | } |
1958 | 1950 | ||
1959 | /* | 1951 | /* |
1960 | * block_write_begin takes care of the basic task of block allocation and | 1952 | * Filesystems implementing the new truncate sequence should use the |
1961 | * bringing partial write blocks uptodate first. | 1953 | * _newtrunc postfix variant which won't incorrectly call vmtruncate. |
1962 | * | 1954 | * The filesystem needs to handle block truncation upon failure. |
1963 | * If *pagep is not NULL, then block_write_begin uses the locked page | ||
1964 | * at *pagep rather than allocating its own. In this case, the page will | ||
1965 | * not be unlocked or deallocated on failure. | ||
1966 | */ | 1955 | */ |
1967 | int block_write_begin(struct file *file, struct address_space *mapping, | 1956 | int block_write_begin_newtrunc(struct file *file, struct address_space *mapping, |
1968 | loff_t pos, unsigned len, unsigned flags, | 1957 | loff_t pos, unsigned len, unsigned flags, |
1969 | struct page **pagep, void **fsdata, | 1958 | struct page **pagep, void **fsdata, |
1970 | get_block_t *get_block) | 1959 | get_block_t *get_block) |
@@ -2000,20 +1989,50 @@ int block_write_begin(struct file *file, struct address_space *mapping, | |||
2000 | unlock_page(page); | 1989 | unlock_page(page); |
2001 | page_cache_release(page); | 1990 | page_cache_release(page); |
2002 | *pagep = NULL; | 1991 | *pagep = NULL; |
2003 | |||
2004 | /* | ||
2005 | * prepare_write() may have instantiated a few blocks | ||
2006 | * outside i_size. Trim these off again. Don't need | ||
2007 | * i_size_read because we hold i_mutex. | ||
2008 | */ | ||
2009 | if (pos + len > inode->i_size) | ||
2010 | vmtruncate(inode, inode->i_size); | ||
2011 | } | 1992 | } |
2012 | } | 1993 | } |
2013 | 1994 | ||
2014 | out: | 1995 | out: |
2015 | return status; | 1996 | return status; |
2016 | } | 1997 | } |
1998 | EXPORT_SYMBOL(block_write_begin_newtrunc); | ||
1999 | |||
2000 | /* | ||
2001 | * block_write_begin takes care of the basic task of block allocation and | ||
2002 | * bringing partial write blocks uptodate first. | ||
2003 | * | ||
2004 | * If *pagep is not NULL, then block_write_begin uses the locked page | ||
2005 | * at *pagep rather than allocating its own. In this case, the page will | ||
2006 | * not be unlocked or deallocated on failure. | ||
2007 | */ | ||
2008 | int block_write_begin(struct file *file, struct address_space *mapping, | ||
2009 | loff_t pos, unsigned len, unsigned flags, | ||
2010 | struct page **pagep, void **fsdata, | ||
2011 | get_block_t *get_block) | ||
2012 | { | ||
2013 | int ret; | ||
2014 | |||
2015 | ret = block_write_begin_newtrunc(file, mapping, pos, len, flags, | ||
2016 | pagep, fsdata, get_block); | ||
2017 | |||
2018 | /* | ||
2019 | * prepare_write() may have instantiated a few blocks | ||
2020 | * outside i_size. Trim these off again. Don't need | ||
2021 | * i_size_read because we hold i_mutex. | ||
2022 | * | ||
2023 | * Filesystems which pass down their own page also cannot | ||
2024 | * call into vmtruncate here because it would lead to lock | ||
2025 | * inversion problems (*pagep is locked). This is a further | ||
2026 | * example of where the old truncate sequence is inadequate. | ||
2027 | */ | ||
2028 | if (unlikely(ret) && *pagep == NULL) { | ||
2029 | loff_t isize = mapping->host->i_size; | ||
2030 | if (pos + len > isize) | ||
2031 | vmtruncate(mapping->host, isize); | ||
2032 | } | ||
2033 | |||
2034 | return ret; | ||
2035 | } | ||
2017 | EXPORT_SYMBOL(block_write_begin); | 2036 | EXPORT_SYMBOL(block_write_begin); |
2018 | 2037 | ||
2019 | int block_write_end(struct file *file, struct address_space *mapping, | 2038 | int block_write_end(struct file *file, struct address_space *mapping, |
@@ -2332,7 +2351,7 @@ out: | |||
2332 | * For moronic filesystems that do not allow holes in file. | 2351 | * For moronic filesystems that do not allow holes in file. |
2333 | * We may have to extend the file. | 2352 | * We may have to extend the file. |
2334 | */ | 2353 | */ |
2335 | int cont_write_begin(struct file *file, struct address_space *mapping, | 2354 | int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping, |
2336 | loff_t pos, unsigned len, unsigned flags, | 2355 | loff_t pos, unsigned len, unsigned flags, |
2337 | struct page **pagep, void **fsdata, | 2356 | struct page **pagep, void **fsdata, |
2338 | get_block_t *get_block, loff_t *bytes) | 2357 | get_block_t *get_block, loff_t *bytes) |
@@ -2353,11 +2372,30 @@ int cont_write_begin(struct file *file, struct address_space *mapping, | |||
2353 | } | 2372 | } |
2354 | 2373 | ||
2355 | *pagep = NULL; | 2374 | *pagep = NULL; |
2356 | err = block_write_begin(file, mapping, pos, len, | 2375 | err = block_write_begin_newtrunc(file, mapping, pos, len, |
2357 | flags, pagep, fsdata, get_block); | 2376 | flags, pagep, fsdata, get_block); |
2358 | out: | 2377 | out: |
2359 | return err; | 2378 | return err; |
2360 | } | 2379 | } |
2380 | EXPORT_SYMBOL(cont_write_begin_newtrunc); | ||
2381 | |||
2382 | int cont_write_begin(struct file *file, struct address_space *mapping, | ||
2383 | loff_t pos, unsigned len, unsigned flags, | ||
2384 | struct page **pagep, void **fsdata, | ||
2385 | get_block_t *get_block, loff_t *bytes) | ||
2386 | { | ||
2387 | int ret; | ||
2388 | |||
2389 | ret = cont_write_begin_newtrunc(file, mapping, pos, len, flags, | ||
2390 | pagep, fsdata, get_block, bytes); | ||
2391 | if (unlikely(ret)) { | ||
2392 | loff_t isize = mapping->host->i_size; | ||
2393 | if (pos + len > isize) | ||
2394 | vmtruncate(mapping->host, isize); | ||
2395 | } | ||
2396 | |||
2397 | return ret; | ||
2398 | } | ||
2361 | EXPORT_SYMBOL(cont_write_begin); | 2399 | EXPORT_SYMBOL(cont_write_begin); |
2362 | 2400 | ||
2363 | int block_prepare_write(struct page *page, unsigned from, unsigned to, | 2401 | int block_prepare_write(struct page *page, unsigned from, unsigned to, |
@@ -2389,7 +2427,7 @@ EXPORT_SYMBOL(block_commit_write); | |||
2389 | * | 2427 | * |
2390 | * We are not allowed to take the i_mutex here so we have to play games to | 2428 | * We are not allowed to take the i_mutex here so we have to play games to |
2391 | * protect against truncate races as the page could now be beyond EOF. Because | 2429 | * protect against truncate races as the page could now be beyond EOF. Because |
2392 | * vmtruncate() writes the inode size before removing pages, once we have the | 2430 | * truncate writes the inode size before removing pages, once we have the |
2393 | * page lock we can determine safely if the page is beyond EOF. If it is not | 2431 | * page lock we can determine safely if the page is beyond EOF. If it is not |
2394 | * beyond EOF, then the page is guaranteed safe against truncation until we | 2432 | * beyond EOF, then the page is guaranteed safe against truncation until we |
2395 | * unlock the page. | 2433 | * unlock the page. |
@@ -2472,10 +2510,11 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head) | |||
2472 | } | 2510 | } |
2473 | 2511 | ||
2474 | /* | 2512 | /* |
2475 | * On entry, the page is fully not uptodate. | 2513 | * Filesystems implementing the new truncate sequence should use the |
2476 | * On exit the page is fully uptodate in the areas outside (from,to) | 2514 | * _newtrunc postfix variant which won't incorrectly call vmtruncate. |
2515 | * The filesystem needs to handle block truncation upon failure. | ||
2477 | */ | 2516 | */ |
2478 | int nobh_write_begin(struct file *file, struct address_space *mapping, | 2517 | int nobh_write_begin_newtrunc(struct file *file, struct address_space *mapping, |
2479 | loff_t pos, unsigned len, unsigned flags, | 2518 | loff_t pos, unsigned len, unsigned flags, |
2480 | struct page **pagep, void **fsdata, | 2519 | struct page **pagep, void **fsdata, |
2481 | get_block_t *get_block) | 2520 | get_block_t *get_block) |
@@ -2508,8 +2547,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping, | |||
2508 | unlock_page(page); | 2547 | unlock_page(page); |
2509 | page_cache_release(page); | 2548 | page_cache_release(page); |
2510 | *pagep = NULL; | 2549 | *pagep = NULL; |
2511 | return block_write_begin(file, mapping, pos, len, flags, pagep, | 2550 | return block_write_begin_newtrunc(file, mapping, pos, len, |
2512 | fsdata, get_block); | 2551 | flags, pagep, fsdata, get_block); |
2513 | } | 2552 | } |
2514 | 2553 | ||
2515 | if (PageMappedToDisk(page)) | 2554 | if (PageMappedToDisk(page)) |
@@ -2613,8 +2652,34 @@ out_release: | |||
2613 | page_cache_release(page); | 2652 | page_cache_release(page); |
2614 | *pagep = NULL; | 2653 | *pagep = NULL; |
2615 | 2654 | ||
2616 | if (pos + len > inode->i_size) | 2655 | return ret; |
2617 | vmtruncate(inode, inode->i_size); | 2656 | } |
2657 | EXPORT_SYMBOL(nobh_write_begin_newtrunc); | ||
2658 | |||
2659 | /* | ||
2660 | * On entry, the page is fully not uptodate. | ||
2661 | * On exit the page is fully uptodate in the areas outside (from,to) | ||
2662 | */ | ||
2663 | int nobh_write_begin(struct file *file, struct address_space *mapping, | ||
2664 | loff_t pos, unsigned len, unsigned flags, | ||
2665 | struct page **pagep, void **fsdata, | ||
2666 | get_block_t *get_block) | ||
2667 | { | ||
2668 | int ret; | ||
2669 | |||
2670 | ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags, | ||
2671 | pagep, fsdata, get_block); | ||
2672 | |||
2673 | /* | ||
2674 | * prepare_write() may have instantiated a few blocks | ||
2675 | * outside i_size. Trim these off again. Don't need | ||
2676 | * i_size_read because we hold i_mutex. | ||
2677 | */ | ||
2678 | if (unlikely(ret)) { | ||
2679 | loff_t isize = mapping->host->i_size; | ||
2680 | if (pos + len > isize) | ||
2681 | vmtruncate(mapping->host, isize); | ||
2682 | } | ||
2618 | 2683 | ||
2619 | return ret; | 2684 | return ret; |
2620 | } | 2685 | } |