aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 5f525b3c6d9f..7e9e409feaa7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1210,7 +1210,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1210 return 1; 1210 return 1;
1211} 1211}
1212 1212
1213struct buffer_head * 1213static struct buffer_head *
1214__getblk_slow(struct block_device *bdev, sector_t block, int size) 1214__getblk_slow(struct block_device *bdev, sector_t block, int size)
1215{ 1215{
1216 /* Size must be multiple of hard sectorsize */ 1216 /* Size must be multiple of hard sectorsize */
@@ -1809,7 +1809,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1809 } while (bh != head); 1809 } while (bh != head);
1810 1810
1811 do { 1811 do {
1812 get_bh(bh);
1813 if (!buffer_mapped(bh)) 1812 if (!buffer_mapped(bh))
1814 continue; 1813 continue;
1815 /* 1814 /*
@@ -1838,7 +1837,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1838 */ 1837 */
1839 BUG_ON(PageWriteback(page)); 1838 BUG_ON(PageWriteback(page));
1840 set_page_writeback(page); 1839 set_page_writeback(page);
1841 unlock_page(page);
1842 1840
1843 do { 1841 do {
1844 struct buffer_head *next = bh->b_this_page; 1842 struct buffer_head *next = bh->b_this_page;
@@ -1846,9 +1844,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1846 submit_bh(WRITE, bh); 1844 submit_bh(WRITE, bh);
1847 nr_underway++; 1845 nr_underway++;
1848 } 1846 }
1849 put_bh(bh);
1850 bh = next; 1847 bh = next;
1851 } while (bh != head); 1848 } while (bh != head);
1849 unlock_page(page);
1852 1850
1853 err = 0; 1851 err = 0;
1854done: 1852done:
@@ -1887,7 +1885,6 @@ recover:
1887 bh = head; 1885 bh = head;
1888 /* Recovery: lock and submit the mapped buffers */ 1886 /* Recovery: lock and submit the mapped buffers */
1889 do { 1887 do {
1890 get_bh(bh);
1891 if (buffer_mapped(bh) && buffer_dirty(bh)) { 1888 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1892 lock_buffer(bh); 1889 lock_buffer(bh);
1893 mark_buffer_async_write(bh); 1890 mark_buffer_async_write(bh);
@@ -1910,7 +1907,6 @@ recover:
1910 submit_bh(WRITE, bh); 1907 submit_bh(WRITE, bh);
1911 nr_underway++; 1908 nr_underway++;
1912 } 1909 }
1913 put_bh(bh);
1914 bh = next; 1910 bh = next;
1915 } while (bh != head); 1911 } while (bh != head);
1916 goto done; 1912 goto done;
@@ -1953,7 +1949,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1953 if (!buffer_mapped(bh)) { 1949 if (!buffer_mapped(bh)) {
1954 err = get_block(inode, block, bh, 1); 1950 err = get_block(inode, block, bh, 1);
1955 if (err) 1951 if (err)
1956 goto out; 1952 break;
1957 if (buffer_new(bh)) { 1953 if (buffer_new(bh)) {
1958 clear_buffer_new(bh); 1954 clear_buffer_new(bh);
1959 unmap_underlying_metadata(bh->b_bdev, 1955 unmap_underlying_metadata(bh->b_bdev,
@@ -1995,10 +1991,12 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1995 while(wait_bh > wait) { 1991 while(wait_bh > wait) {
1996 wait_on_buffer(*--wait_bh); 1992 wait_on_buffer(*--wait_bh);
1997 if (!buffer_uptodate(*wait_bh)) 1993 if (!buffer_uptodate(*wait_bh))
1998 return -EIO; 1994 err = -EIO;
1999 } 1995 }
2000 return 0; 1996 if (!err)
2001out: 1997 return err;
1998
1999 /* Error case: */
2002 /* 2000 /*
2003 * Zero out any newly allocated blocks to avoid exposing stale 2001 * Zero out any newly allocated blocks to avoid exposing stale
2004 * data. If BH_New is set, we know that the block was newly 2002 * data. If BH_New is set, we know that the block was newly
@@ -2096,9 +2094,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2096 continue; 2094 continue;
2097 2095
2098 if (!buffer_mapped(bh)) { 2096 if (!buffer_mapped(bh)) {
2097 int err = 0;
2098
2099 fully_mapped = 0; 2099 fully_mapped = 0;
2100 if (iblock < lblock) { 2100 if (iblock < lblock) {
2101 if (get_block(inode, iblock, bh, 0)) 2101 err = get_block(inode, iblock, bh, 0);
2102 if (err)
2102 SetPageError(page); 2103 SetPageError(page);
2103 } 2104 }
2104 if (!buffer_mapped(bh)) { 2105 if (!buffer_mapped(bh)) {
@@ -2106,7 +2107,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2106 memset(kaddr + i * blocksize, 0, blocksize); 2107 memset(kaddr + i * blocksize, 0, blocksize);
2107 flush_dcache_page(page); 2108 flush_dcache_page(page);
2108 kunmap_atomic(kaddr, KM_USER0); 2109 kunmap_atomic(kaddr, KM_USER0);
2109 set_buffer_uptodate(bh); 2110 if (!err)
2111 set_buffer_uptodate(bh);
2110 continue; 2112 continue;
2111 } 2113 }
2112 /* 2114 /*
@@ -3115,7 +3117,7 @@ void __init buffer_init(void)
3115 3117
3116 bh_cachep = kmem_cache_create("buffer_head", 3118 bh_cachep = kmem_cache_create("buffer_head",
3117 sizeof(struct buffer_head), 0, 3119 sizeof(struct buffer_head), 0,
3118 SLAB_PANIC, init_buffer_head, NULL); 3120 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3119 3121
3120 /* 3122 /*
3121 * Limit the bh occupancy to 10% of ZONE_NORMAL 3123 * Limit the bh occupancy to 10% of ZONE_NORMAL