aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c148
1 files changed, 106 insertions, 42 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 08e422d56996..d54812b198e9 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -561,26 +561,17 @@ repeat:
561 return err; 561 return err;
562} 562}
563 563
564static void do_thaw_all(struct work_struct *work) 564static void do_thaw_one(struct super_block *sb, void *unused)
565{ 565{
566 struct super_block *sb;
567 char b[BDEVNAME_SIZE]; 566 char b[BDEVNAME_SIZE];
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
570}
568 571
569 spin_lock(&sb_lock); 572static void do_thaw_all(struct work_struct *work)
570restart: 573{
571 list_for_each_entry(sb, &super_blocks, s_list) { 574 iterate_supers(do_thaw_one, NULL);
572 sb->s_count++;
573 spin_unlock(&sb_lock);
574 down_read(&sb->s_umount);
575 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
576 printk(KERN_WARNING "Emergency Thaw on %s\n",
577 bdevname(sb->s_bdev, b));
578 up_read(&sb->s_umount);
579 spin_lock(&sb_lock);
580 if (__put_super_and_need_restart(sb))
581 goto restart;
582 }
583 spin_unlock(&sb_lock);
584 kfree(work); 575 kfree(work);
585 printk(KERN_WARNING "Emergency Thaw complete\n"); 576 printk(KERN_WARNING "Emergency Thaw complete\n");
586} 577}
@@ -1958,14 +1949,11 @@ static int __block_commit_write(struct inode *inode, struct page *page,
1958} 1949}
1959 1950
1960/* 1951/*
1961 * block_write_begin takes care of the basic task of block allocation and 1952 * Filesystems implementing the new truncate sequence should use the
1962 * bringing partial write blocks uptodate first. 1953 * _newtrunc postfix variant which won't incorrectly call vmtruncate.
1963 * 1954 * The filesystem needs to handle block truncation upon failure.
1964 * If *pagep is not NULL, then block_write_begin uses the locked page
1965 * at *pagep rather than allocating its own. In this case, the page will
1966 * not be unlocked or deallocated on failure.
1967 */ 1955 */
1968int block_write_begin(struct file *file, struct address_space *mapping, 1956int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
1969 loff_t pos, unsigned len, unsigned flags, 1957 loff_t pos, unsigned len, unsigned flags,
1970 struct page **pagep, void **fsdata, 1958 struct page **pagep, void **fsdata,
1971 get_block_t *get_block) 1959 get_block_t *get_block)
@@ -2001,20 +1989,50 @@ int block_write_begin(struct file *file, struct address_space *mapping,
2001 unlock_page(page); 1989 unlock_page(page);
2002 page_cache_release(page); 1990 page_cache_release(page);
2003 *pagep = NULL; 1991 *pagep = NULL;
2004
2005 /*
2006 * prepare_write() may have instantiated a few blocks
2007 * outside i_size. Trim these off again. Don't need
2008 * i_size_read because we hold i_mutex.
2009 */
2010 if (pos + len > inode->i_size)
2011 vmtruncate(inode, inode->i_size);
2012 } 1992 }
2013 } 1993 }
2014 1994
2015out: 1995out:
2016 return status; 1996 return status;
2017} 1997}
1998EXPORT_SYMBOL(block_write_begin_newtrunc);
1999
2000/*
2001 * block_write_begin takes care of the basic task of block allocation and
2002 * bringing partial write blocks uptodate first.
2003 *
2004 * If *pagep is not NULL, then block_write_begin uses the locked page
2005 * at *pagep rather than allocating its own. In this case, the page will
2006 * not be unlocked or deallocated on failure.
2007 */
2008int block_write_begin(struct file *file, struct address_space *mapping,
2009 loff_t pos, unsigned len, unsigned flags,
2010 struct page **pagep, void **fsdata,
2011 get_block_t *get_block)
2012{
2013 int ret;
2014
2015 ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
2016 pagep, fsdata, get_block);
2017
2018 /*
2019 * prepare_write() may have instantiated a few blocks
2020 * outside i_size. Trim these off again. Don't need
2021 * i_size_read because we hold i_mutex.
2022 *
2023 * Filesystems which pass down their own page also cannot
2024 * call into vmtruncate here because it would lead to lock
2025 * inversion problems (*pagep is locked). This is a further
2026 * example of where the old truncate sequence is inadequate.
2027 */
2028 if (unlikely(ret) && *pagep == NULL) {
2029 loff_t isize = mapping->host->i_size;
2030 if (pos + len > isize)
2031 vmtruncate(mapping->host, isize);
2032 }
2033
2034 return ret;
2035}
2018EXPORT_SYMBOL(block_write_begin); 2036EXPORT_SYMBOL(block_write_begin);
2019 2037
2020int block_write_end(struct file *file, struct address_space *mapping, 2038int block_write_end(struct file *file, struct address_space *mapping,
@@ -2333,7 +2351,7 @@ out:
2333 * For moronic filesystems that do not allow holes in file. 2351 * For moronic filesystems that do not allow holes in file.
2334 * We may have to extend the file. 2352 * We may have to extend the file.
2335 */ 2353 */
2336int cont_write_begin(struct file *file, struct address_space *mapping, 2354int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping,
2337 loff_t pos, unsigned len, unsigned flags, 2355 loff_t pos, unsigned len, unsigned flags,
2338 struct page **pagep, void **fsdata, 2356 struct page **pagep, void **fsdata,
2339 get_block_t *get_block, loff_t *bytes) 2357 get_block_t *get_block, loff_t *bytes)
@@ -2354,11 +2372,30 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2354 } 2372 }
2355 2373
2356 *pagep = NULL; 2374 *pagep = NULL;
2357 err = block_write_begin(file, mapping, pos, len, 2375 err = block_write_begin_newtrunc(file, mapping, pos, len,
2358 flags, pagep, fsdata, get_block); 2376 flags, pagep, fsdata, get_block);
2359out: 2377out:
2360 return err; 2378 return err;
2361} 2379}
2380EXPORT_SYMBOL(cont_write_begin_newtrunc);
2381
2382int cont_write_begin(struct file *file, struct address_space *mapping,
2383 loff_t pos, unsigned len, unsigned flags,
2384 struct page **pagep, void **fsdata,
2385 get_block_t *get_block, loff_t *bytes)
2386{
2387 int ret;
2388
2389 ret = cont_write_begin_newtrunc(file, mapping, pos, len, flags,
2390 pagep, fsdata, get_block, bytes);
2391 if (unlikely(ret)) {
2392 loff_t isize = mapping->host->i_size;
2393 if (pos + len > isize)
2394 vmtruncate(mapping->host, isize);
2395 }
2396
2397 return ret;
2398}
2362EXPORT_SYMBOL(cont_write_begin); 2399EXPORT_SYMBOL(cont_write_begin);
2363 2400
2364int block_prepare_write(struct page *page, unsigned from, unsigned to, 2401int block_prepare_write(struct page *page, unsigned from, unsigned to,
@@ -2390,7 +2427,7 @@ EXPORT_SYMBOL(block_commit_write);
2390 * 2427 *
2391 * We are not allowed to take the i_mutex here so we have to play games to 2428 * We are not allowed to take the i_mutex here so we have to play games to
2392 * protect against truncate races as the page could now be beyond EOF. Because 2429 * protect against truncate races as the page could now be beyond EOF. Because
2393 * vmtruncate() writes the inode size before removing pages, once we have the 2430 * truncate writes the inode size before removing pages, once we have the
2394 * page lock we can determine safely if the page is beyond EOF. If it is not 2431 * page lock we can determine safely if the page is beyond EOF. If it is not
2395 * beyond EOF, then the page is guaranteed safe against truncation until we 2432 * beyond EOF, then the page is guaranteed safe against truncation until we
2396 * unlock the page. 2433 * unlock the page.
@@ -2473,10 +2510,11 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2473} 2510}
2474 2511
2475/* 2512/*
2476 * On entry, the page is fully not uptodate. 2513 * Filesystems implementing the new truncate sequence should use the
2477 * On exit the page is fully uptodate in the areas outside (from,to) 2514 * _newtrunc postfix variant which won't incorrectly call vmtruncate.
2515 * The filesystem needs to handle block truncation upon failure.
2478 */ 2516 */
2479int nobh_write_begin(struct file *file, struct address_space *mapping, 2517int nobh_write_begin_newtrunc(struct file *file, struct address_space *mapping,
2480 loff_t pos, unsigned len, unsigned flags, 2518 loff_t pos, unsigned len, unsigned flags,
2481 struct page **pagep, void **fsdata, 2519 struct page **pagep, void **fsdata,
2482 get_block_t *get_block) 2520 get_block_t *get_block)
@@ -2509,8 +2547,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
2509 unlock_page(page); 2547 unlock_page(page);
2510 page_cache_release(page); 2548 page_cache_release(page);
2511 *pagep = NULL; 2549 *pagep = NULL;
2512 return block_write_begin(file, mapping, pos, len, flags, pagep, 2550 return block_write_begin_newtrunc(file, mapping, pos, len,
2513 fsdata, get_block); 2551 flags, pagep, fsdata, get_block);
2514 } 2552 }
2515 2553
2516 if (PageMappedToDisk(page)) 2554 if (PageMappedToDisk(page))
@@ -2614,8 +2652,34 @@ out_release:
2614 page_cache_release(page); 2652 page_cache_release(page);
2615 *pagep = NULL; 2653 *pagep = NULL;
2616 2654
2617 if (pos + len > inode->i_size) 2655 return ret;
2618 vmtruncate(inode, inode->i_size); 2656}
2657EXPORT_SYMBOL(nobh_write_begin_newtrunc);
2658
2659/*
2660 * On entry, the page is fully not uptodate.
2661 * On exit the page is fully uptodate in the areas outside (from,to)
2662 */
2663int nobh_write_begin(struct file *file, struct address_space *mapping,
2664 loff_t pos, unsigned len, unsigned flags,
2665 struct page **pagep, void **fsdata,
2666 get_block_t *get_block)
2667{
2668 int ret;
2669
2670 ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags,
2671 pagep, fsdata, get_block);
2672
2673 /*
2674 * prepare_write() may have instantiated a few blocks
2675 * outside i_size. Trim these off again. Don't need
2676 * i_size_read because we hold i_mutex.
2677 */
2678 if (unlikely(ret)) {
2679 loff_t isize = mapping->host->i_size;
2680 if (pos + len > isize)
2681 vmtruncate(mapping->host, isize);
2682 }
2619 2683
2620 return ret; 2684 return ret;
2621} 2685}