aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 17:12:58 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 17:12:58 -0400
commit1e09481365ce248dbb4eb06dad70129bb5807037 (patch)
treec0cff5bef95c8b5e7486f144718ade9a06c284dc /fs/buffer.c
parent3e2f69fdd1b00166e7d589bce56b2d36a9e74374 (diff)
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/softlockup.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a073f3f4f013..5fa1512cd9a2 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -821,7 +821,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
821 * contents - it is a noop if I/O is still in 821 * contents - it is a noop if I/O is still in
822 * flight on potentially older contents. 822 * flight on potentially older contents.
823 */ 823 */
824 ll_rw_block(SWRITE, 1, &bh); 824 ll_rw_block(SWRITE_SYNC, 1, &bh);
825 brelse(bh); 825 brelse(bh);
826 spin_lock(lock); 826 spin_lock(lock);
827 } 827 }
@@ -1691,11 +1691,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1691 */ 1691 */
1692 clear_buffer_dirty(bh); 1692 clear_buffer_dirty(bh);
1693 set_buffer_uptodate(bh); 1693 set_buffer_uptodate(bh);
1694 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1694 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1695 buffer_dirty(bh)) {
1695 WARN_ON(bh->b_size != blocksize); 1696 WARN_ON(bh->b_size != blocksize);
1696 err = get_block(inode, block, bh, 1); 1697 err = get_block(inode, block, bh, 1);
1697 if (err) 1698 if (err)
1698 goto recover; 1699 goto recover;
1700 clear_buffer_delay(bh);
1699 if (buffer_new(bh)) { 1701 if (buffer_new(bh)) {
1700 /* blockdev mappings never come here */ 1702 /* blockdev mappings never come here */
1701 clear_buffer_new(bh); 1703 clear_buffer_new(bh);
@@ -1774,7 +1776,8 @@ recover:
1774 bh = head; 1776 bh = head;
1775 /* Recovery: lock and submit the mapped buffers */ 1777 /* Recovery: lock and submit the mapped buffers */
1776 do { 1778 do {
1777 if (buffer_mapped(bh) && buffer_dirty(bh)) { 1779 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1780 !buffer_delay(bh)) {
1778 lock_buffer(bh); 1781 lock_buffer(bh);
1779 mark_buffer_async_write(bh); 1782 mark_buffer_async_write(bh);
1780 } else { 1783 } else {
@@ -2061,6 +2064,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2061 struct page *page, void *fsdata) 2064 struct page *page, void *fsdata)
2062{ 2065{
2063 struct inode *inode = mapping->host; 2066 struct inode *inode = mapping->host;
2067 int i_size_changed = 0;
2064 2068
2065 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2069 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2066 2070
@@ -2073,12 +2077,21 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2073 */ 2077 */
2074 if (pos+copied > inode->i_size) { 2078 if (pos+copied > inode->i_size) {
2075 i_size_write(inode, pos+copied); 2079 i_size_write(inode, pos+copied);
2076 mark_inode_dirty(inode); 2080 i_size_changed = 1;
2077 } 2081 }
2078 2082
2079 unlock_page(page); 2083 unlock_page(page);
2080 page_cache_release(page); 2084 page_cache_release(page);
2081 2085
2086 /*
2087 * Don't mark the inode dirty under page lock. First, it unnecessarily
2088 * makes the holding time of page lock longer. Second, it forces lock
2089 * ordering of page lock and transaction start for journaling
2090 * filesystems.
2091 */
2092 if (i_size_changed)
2093 mark_inode_dirty(inode);
2094
2082 return copied; 2095 return copied;
2083} 2096}
2084EXPORT_SYMBOL(generic_write_end); 2097EXPORT_SYMBOL(generic_write_end);
@@ -2940,16 +2953,19 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2940 for (i = 0; i < nr; i++) { 2953 for (i = 0; i < nr; i++) {
2941 struct buffer_head *bh = bhs[i]; 2954 struct buffer_head *bh = bhs[i];
2942 2955
2943 if (rw == SWRITE) 2956 if (rw == SWRITE || rw == SWRITE_SYNC)
2944 lock_buffer(bh); 2957 lock_buffer(bh);
2945 else if (test_set_buffer_locked(bh)) 2958 else if (test_set_buffer_locked(bh))
2946 continue; 2959 continue;
2947 2960
2948 if (rw == WRITE || rw == SWRITE) { 2961 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
2949 if (test_clear_buffer_dirty(bh)) { 2962 if (test_clear_buffer_dirty(bh)) {
2950 bh->b_end_io = end_buffer_write_sync; 2963 bh->b_end_io = end_buffer_write_sync;
2951 get_bh(bh); 2964 get_bh(bh);
2952 submit_bh(WRITE, bh); 2965 if (rw == SWRITE_SYNC)
2966 submit_bh(WRITE_SYNC, bh);
2967 else
2968 submit_bh(WRITE, bh);
2953 continue; 2969 continue;
2954 } 2970 }
2955 } else { 2971 } else {
@@ -2978,7 +2994,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
2978 if (test_clear_buffer_dirty(bh)) { 2994 if (test_clear_buffer_dirty(bh)) {
2979 get_bh(bh); 2995 get_bh(bh);
2980 bh->b_end_io = end_buffer_write_sync; 2996 bh->b_end_io = end_buffer_write_sync;
2981 ret = submit_bh(WRITE, bh); 2997 ret = submit_bh(WRITE_SYNC, bh);
2982 wait_on_buffer(bh); 2998 wait_on_buffer(bh);
2983 if (buffer_eopnotsupp(bh)) { 2999 if (buffer_eopnotsupp(bh)) {
2984 clear_buffer_eopnotsupp(bh); 3000 clear_buffer_eopnotsupp(bh);