aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-06-04 05:29:58 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-09 16:47:33 -0400
commit155130a4f7848b1aac439cab6bda1a175507c71c (patch)
tree5019b14b0a9221b08959196e01e3f9326957d678 /fs/buffer.c
parent6e1db88d536adcbbfe562b2d4b7d6425784fff12 (diff)
get rid of block_write_begin_newtrunc
Move the call to vmtruncate to get rid of accessive blocks to the callers in preparation of the new truncate sequence and rename the non-truncating version to block_write_begin. While we're at it also remove several unused arguments to block_write_begin. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c61
1 files changed, 9 insertions, 52 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index c319c49da511..50efa339e051 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1962,14 +1962,13 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1962EXPORT_SYMBOL(__block_write_begin); 1962EXPORT_SYMBOL(__block_write_begin);
1963 1963
1964/* 1964/*
1965 * Filesystems implementing the new truncate sequence should use the 1965 * block_write_begin takes care of the basic task of block allocation and
1966 * _newtrunc postfix variant which won't incorrectly call vmtruncate. 1966 * bringing partial write blocks uptodate first.
1967 *
1967 * The filesystem needs to handle block truncation upon failure. 1968 * The filesystem needs to handle block truncation upon failure.
1968 */ 1969 */
1969int block_write_begin_newtrunc(struct file *file, struct address_space *mapping, 1970int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1970 loff_t pos, unsigned len, unsigned flags, 1971 unsigned flags, struct page **pagep, get_block_t *get_block)
1971 struct page **pagep, void **fsdata,
1972 get_block_t *get_block)
1973{ 1972{
1974 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1973 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1975 struct page *page; 1974 struct page *page;
@@ -1989,44 +1988,6 @@ int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
1989 *pagep = page; 1988 *pagep = page;
1990 return status; 1989 return status;
1991} 1990}
1992EXPORT_SYMBOL(block_write_begin_newtrunc);
1993
1994/*
1995 * block_write_begin takes care of the basic task of block allocation and
1996 * bringing partial write blocks uptodate first.
1997 *
1998 * If *pagep is not NULL, then block_write_begin uses the locked page
1999 * at *pagep rather than allocating its own. In this case, the page will
2000 * not be unlocked or deallocated on failure.
2001 */
2002int block_write_begin(struct file *file, struct address_space *mapping,
2003 loff_t pos, unsigned len, unsigned flags,
2004 struct page **pagep, void **fsdata,
2005 get_block_t *get_block)
2006{
2007 int ret;
2008
2009 ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
2010 pagep, fsdata, get_block);
2011
2012 /*
2013 * prepare_write() may have instantiated a few blocks
2014 * outside i_size. Trim these off again. Don't need
2015 * i_size_read because we hold i_mutex.
2016 *
2017 * Filesystems which pass down their own page also cannot
2018 * call into vmtruncate here because it would lead to lock
2019 * inversion problems (*pagep is locked). This is a further
2020 * example of where the old truncate sequence is inadequate.
2021 */
2022 if (unlikely(ret) && *pagep == NULL) {
2023 loff_t isize = mapping->host->i_size;
2024 if (pos + len > isize)
2025 vmtruncate(mapping->host, isize);
2026 }
2027
2028 return ret;
2029}
2030EXPORT_SYMBOL(block_write_begin); 1991EXPORT_SYMBOL(block_write_begin);
2031 1992
2032int block_write_end(struct file *file, struct address_space *mapping, 1993int block_write_end(struct file *file, struct address_space *mapping,
@@ -2357,7 +2318,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2357 2318
2358 err = cont_expand_zero(file, mapping, pos, bytes); 2319 err = cont_expand_zero(file, mapping, pos, bytes);
2359 if (err) 2320 if (err)
2360 goto out; 2321 return err;
2361 2322
2362 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2323 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2363 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2324 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
@@ -2365,11 +2326,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2365 (*bytes)++; 2326 (*bytes)++;
2366 } 2327 }
2367 2328
2368 *pagep = NULL; 2329 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2369 err = block_write_begin_newtrunc(file, mapping, pos, len,
2370 flags, pagep, fsdata, get_block);
2371out:
2372 return err;
2373} 2330}
2374EXPORT_SYMBOL(cont_write_begin); 2331EXPORT_SYMBOL(cont_write_begin);
2375 2332
@@ -2511,8 +2468,8 @@ int nobh_write_begin(struct address_space *mapping,
2511 unlock_page(page); 2468 unlock_page(page);
2512 page_cache_release(page); 2469 page_cache_release(page);
2513 *pagep = NULL; 2470 *pagep = NULL;
2514 return block_write_begin_newtrunc(NULL, mapping, pos, len, 2471 return block_write_begin(mapping, pos, len, flags, pagep,
2515 flags, pagep, fsdata, get_block); 2472 get_block);
2516 } 2473 }
2517 2474
2518 if (PageMappedToDisk(page)) 2475 if (PageMappedToDisk(page))