aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c42
1 files changed, 20 insertions, 22 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3b12cf947aba..6f88dcc6d002 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -218,7 +218,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
218 sb = get_super(bdev); 218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) { 219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE; 220 sb->s_frozen = SB_FREEZE_WRITE;
221 wmb(); 221 smp_wmb();
222 222
223 sync_inodes_sb(sb, 0); 223 sync_inodes_sb(sb, 0);
224 DQUOT_SYNC(sb); 224 DQUOT_SYNC(sb);
@@ -235,7 +235,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
235 sync_inodes_sb(sb, 1); 235 sync_inodes_sb(sb, 1);
236 236
237 sb->s_frozen = SB_FREEZE_TRANS; 237 sb->s_frozen = SB_FREEZE_TRANS;
238 wmb(); 238 smp_wmb();
239 239
240 sync_blockdev(sb->s_bdev); 240 sync_blockdev(sb->s_bdev);
241 241
@@ -263,7 +263,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
263 if (sb->s_op->unlockfs) 263 if (sb->s_op->unlockfs)
264 sb->s_op->unlockfs(sb); 264 sb->s_op->unlockfs(sb);
265 sb->s_frozen = SB_UNFROZEN; 265 sb->s_frozen = SB_UNFROZEN;
266 wmb(); 266 smp_wmb();
267 wake_up(&sb->s_wait_unfrozen); 267 wake_up(&sb->s_wait_unfrozen);
268 drop_super(sb); 268 drop_super(sb);
269 } 269 }
@@ -774,15 +774,14 @@ repeat:
774/** 774/**
775 * sync_mapping_buffers - write out and wait upon a mapping's "associated" 775 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
776 * buffers 776 * buffers
777 * @buffer_mapping - the mapping which backs the buffers' data 777 * @mapping: the mapping which wants those buffers written
778 * @mapping - the mapping which wants those buffers written
779 * 778 *
780 * Starts I/O against the buffers at mapping->private_list, and waits upon 779 * Starts I/O against the buffers at mapping->private_list, and waits upon
781 * that I/O. 780 * that I/O.
782 * 781 *
783 * Basically, this is a convenience function for fsync(). @buffer_mapping is 782 * Basically, this is a convenience function for fsync().
784 * the blockdev which "owns" the buffers and @mapping is a file or directory 783 * @mapping is a file or directory which needs those buffers to be written for
785 * which needs those buffers to be written for a successful fsync(). 784 * a successful fsync().
786 */ 785 */
787int sync_mapping_buffers(struct address_space *mapping) 786int sync_mapping_buffers(struct address_space *mapping)
788{ 787{
@@ -1211,7 +1210,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1211 return 1; 1210 return 1;
1212} 1211}
1213 1212
1214struct buffer_head * 1213static struct buffer_head *
1215__getblk_slow(struct block_device *bdev, sector_t block, int size) 1214__getblk_slow(struct block_device *bdev, sector_t block, int size)
1216{ 1215{
1217 /* Size must be multiple of hard sectorsize */ 1216 /* Size must be multiple of hard sectorsize */
@@ -1263,6 +1262,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1263 1262
1264/** 1263/**
1265 * mark_buffer_dirty - mark a buffer_head as needing writeout 1264 * mark_buffer_dirty - mark a buffer_head as needing writeout
1265 * @bh: the buffer_head to mark dirty
1266 * 1266 *
1267 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 1267 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1268 * backing page dirty, then tag the page as dirty in its address_space's radix 1268 * backing page dirty, then tag the page as dirty in its address_space's radix
@@ -1501,6 +1501,7 @@ EXPORT_SYMBOL(__breadahead);
1501 1501
1502/** 1502/**
1503 * __bread() - reads a specified block and returns the bh 1503 * __bread() - reads a specified block and returns the bh
1504 * @bdev: the block_device to read from
1504 * @block: number of block 1505 * @block: number of block
1505 * @size: size (in bytes) to read 1506 * @size: size (in bytes) to read
1506 * 1507 *
@@ -1808,7 +1809,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1808 } while (bh != head); 1809 } while (bh != head);
1809 1810
1810 do { 1811 do {
1811 get_bh(bh);
1812 if (!buffer_mapped(bh)) 1812 if (!buffer_mapped(bh))
1813 continue; 1813 continue;
1814 /* 1814 /*
@@ -1837,7 +1837,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1837 */ 1837 */
1838 BUG_ON(PageWriteback(page)); 1838 BUG_ON(PageWriteback(page));
1839 set_page_writeback(page); 1839 set_page_writeback(page);
1840 unlock_page(page);
1841 1840
1842 do { 1841 do {
1843 struct buffer_head *next = bh->b_this_page; 1842 struct buffer_head *next = bh->b_this_page;
@@ -1845,9 +1844,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1845 submit_bh(WRITE, bh); 1844 submit_bh(WRITE, bh);
1846 nr_underway++; 1845 nr_underway++;
1847 } 1846 }
1848 put_bh(bh);
1849 bh = next; 1847 bh = next;
1850 } while (bh != head); 1848 } while (bh != head);
1849 unlock_page(page);
1851 1850
1852 err = 0; 1851 err = 0;
1853done: 1852done:
@@ -1886,7 +1885,6 @@ recover:
1886 bh = head; 1885 bh = head;
1887 /* Recovery: lock and submit the mapped buffers */ 1886 /* Recovery: lock and submit the mapped buffers */
1888 do { 1887 do {
1889 get_bh(bh);
1890 if (buffer_mapped(bh) && buffer_dirty(bh)) { 1888 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1891 lock_buffer(bh); 1889 lock_buffer(bh);
1892 mark_buffer_async_write(bh); 1890 mark_buffer_async_write(bh);
@@ -1909,7 +1907,6 @@ recover:
1909 submit_bh(WRITE, bh); 1907 submit_bh(WRITE, bh);
1910 nr_underway++; 1908 nr_underway++;
1911 } 1909 }
1912 put_bh(bh);
1913 bh = next; 1910 bh = next;
1914 } while (bh != head); 1911 } while (bh != head);
1915 goto done; 1912 goto done;
@@ -1952,7 +1949,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1952 if (!buffer_mapped(bh)) { 1949 if (!buffer_mapped(bh)) {
1953 err = get_block(inode, block, bh, 1); 1950 err = get_block(inode, block, bh, 1);
1954 if (err) 1951 if (err)
1955 goto out; 1952 break;
1956 if (buffer_new(bh)) { 1953 if (buffer_new(bh)) {
1957 clear_buffer_new(bh); 1954 clear_buffer_new(bh);
1958 unmap_underlying_metadata(bh->b_bdev, 1955 unmap_underlying_metadata(bh->b_bdev,
@@ -1994,10 +1991,12 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1994 while(wait_bh > wait) { 1991 while(wait_bh > wait) {
1995 wait_on_buffer(*--wait_bh); 1992 wait_on_buffer(*--wait_bh);
1996 if (!buffer_uptodate(*wait_bh)) 1993 if (!buffer_uptodate(*wait_bh))
1997 return -EIO; 1994 err = -EIO;
1998 } 1995 }
1999 return 0; 1996 if (!err)
2000out: 1997 return err;
1998
1999 /* Error case: */
2001 /* 2000 /*
2002 * Zero out any newly allocated blocks to avoid exposing stale 2001 * Zero out any newly allocated blocks to avoid exposing stale
2003 * data. If BH_New is set, we know that the block was newly 2002 * data. If BH_New is set, we know that the block was newly
@@ -2078,8 +2077,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2078 int nr, i; 2077 int nr, i;
2079 int fully_mapped = 1; 2078 int fully_mapped = 1;
2080 2079
2081 if (!PageLocked(page)) 2080 BUG_ON(!PageLocked(page));
2082 PAGE_BUG(page);
2083 blocksize = 1 << inode->i_blkbits; 2081 blocksize = 1 << inode->i_blkbits;
2084 if (!page_has_buffers(page)) 2082 if (!page_has_buffers(page))
2085 create_empty_buffers(page, blocksize, 0); 2083 create_empty_buffers(page, blocksize, 0);
@@ -2917,7 +2915,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2917 2915
2918 bh = head; 2916 bh = head;
2919 do { 2917 do {
2920 if (buffer_write_io_error(bh)) 2918 if (buffer_write_io_error(bh) && page->mapping)
2921 set_bit(AS_EIO, &page->mapping->flags); 2919 set_bit(AS_EIO, &page->mapping->flags);
2922 if (buffer_busy(bh)) 2920 if (buffer_busy(bh))
2923 goto failed; 2921 goto failed;
@@ -3115,7 +3113,7 @@ void __init buffer_init(void)
3115 3113
3116 bh_cachep = kmem_cache_create("buffer_head", 3114 bh_cachep = kmem_cache_create("buffer_head",
3117 sizeof(struct buffer_head), 0, 3115 sizeof(struct buffer_head), 0,
3118 SLAB_PANIC, init_buffer_head, NULL); 3116 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3119 3117
3120 /* 3118 /*
3121 * Limit the bh occupancy to 10% of ZONE_NORMAL 3119 * Limit the bh occupancy to 10% of ZONE_NORMAL