aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-12-10 09:19:18 -0500
committerJiri Kosina <jkosina@suse.cz>2010-12-10 09:19:18 -0500
commit2ade0c1d9d93b7642212657ef76f4a1e30233711 (patch)
tree63bc720c0ffe5f4760cac4ed617b9870b050175e /fs/buffer.c
parent504499f22c08a03e2e19dc88d31aa0ecd2ac815e (diff)
parent6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff)
Merge branch 'master' into upstream
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c29
1 files changed, 10 insertions, 19 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 7f0b9b083f77..5930e382959b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -905,7 +905,6 @@ try_again:
905 905
906 bh->b_state = 0; 906 bh->b_state = 0;
907 atomic_set(&bh->b_count, 0); 907 atomic_set(&bh->b_count, 0);
908 bh->b_private = NULL;
909 bh->b_size = size; 908 bh->b_size = size;
910 909
911 /* Link the buffer to its page */ 910 /* Link the buffer to its page */
@@ -1706,7 +1705,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1706 * and kswapd activity, but those code paths have their own 1705 * and kswapd activity, but those code paths have their own
1707 * higher-level throttling. 1706 * higher-level throttling.
1708 */ 1707 */
1709 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1708 if (wbc->sync_mode != WB_SYNC_NONE) {
1710 lock_buffer(bh); 1709 lock_buffer(bh);
1711 } else if (!trylock_buffer(bh)) { 1710 } else if (!trylock_buffer(bh)) {
1712 redirty_page_for_writepage(wbc, page); 1711 redirty_page_for_writepage(wbc, page);
@@ -1834,9 +1833,11 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1834} 1833}
1835EXPORT_SYMBOL(page_zero_new_buffers); 1834EXPORT_SYMBOL(page_zero_new_buffers);
1836 1835
1837int block_prepare_write(struct page *page, unsigned from, unsigned to, 1836int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1838 get_block_t *get_block) 1837 get_block_t *get_block)
1839{ 1838{
1839 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1840 unsigned to = from + len;
1840 struct inode *inode = page->mapping->host; 1841 struct inode *inode = page->mapping->host;
1841 unsigned block_start, block_end; 1842 unsigned block_start, block_end;
1842 sector_t block; 1843 sector_t block;
@@ -1916,7 +1917,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
1916 } 1917 }
1917 return err; 1918 return err;
1918} 1919}
1919EXPORT_SYMBOL(block_prepare_write); 1920EXPORT_SYMBOL(__block_write_begin);
1920 1921
1921static int __block_commit_write(struct inode *inode, struct page *page, 1922static int __block_commit_write(struct inode *inode, struct page *page,
1922 unsigned from, unsigned to) 1923 unsigned from, unsigned to)
@@ -1953,15 +1954,6 @@ static int __block_commit_write(struct inode *inode, struct page *page,
1953 return 0; 1954 return 0;
1954} 1955}
1955 1956
1956int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1957 get_block_t *get_block)
1958{
1959 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1960
1961 return block_prepare_write(page, start, start + len, get_block);
1962}
1963EXPORT_SYMBOL(__block_write_begin);
1964
1965/* 1957/*
1966 * block_write_begin takes care of the basic task of block allocation and 1958 * block_write_begin takes care of the basic task of block allocation and
1967 * bringing partial write blocks uptodate first. 1959 * bringing partial write blocks uptodate first.
@@ -2379,7 +2371,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2379 else 2371 else
2380 end = PAGE_CACHE_SIZE; 2372 end = PAGE_CACHE_SIZE;
2381 2373
2382 ret = block_prepare_write(page, 0, end, get_block); 2374 ret = __block_write_begin(page, 0, end, get_block);
2383 if (!ret) 2375 if (!ret)
2384 ret = block_commit_write(page, 0, end); 2376 ret = block_commit_write(page, 0, end);
2385 2377
@@ -2466,11 +2458,10 @@ int nobh_write_begin(struct address_space *mapping,
2466 *fsdata = NULL; 2458 *fsdata = NULL;
2467 2459
2468 if (page_has_buffers(page)) { 2460 if (page_has_buffers(page)) {
2469 unlock_page(page); 2461 ret = __block_write_begin(page, pos, len, get_block);
2470 page_cache_release(page); 2462 if (unlikely(ret))
2471 *pagep = NULL; 2463 goto out_release;
2472 return block_write_begin(mapping, pos, len, flags, pagep, 2464 return ret;
2473 get_block);
2474 } 2465 }
2475 2466
2476 if (PageMappedToDisk(page)) 2467 if (PageMappedToDisk(page))