aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3ebccf4aa7e3..39ff14403d13 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -627,8 +627,7 @@ repeat:
627} 627}
628 628
629/** 629/**
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated" 630 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
631 * buffers
632 * @mapping: the mapping which wants those buffers written 631 * @mapping: the mapping which wants those buffers written
633 * 632 *
634 * Starts I/O against the buffers at mapping->private_list, and waits upon 633 * Starts I/O against the buffers at mapping->private_list, and waits upon
@@ -836,7 +835,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
836 smp_mb(); 835 smp_mb();
837 if (buffer_dirty(bh)) { 836 if (buffer_dirty(bh)) {
838 list_add(&bh->b_assoc_buffers, 837 list_add(&bh->b_assoc_buffers,
839 &bh->b_assoc_map->private_list); 838 &mapping->private_list);
840 bh->b_assoc_map = mapping; 839 bh->b_assoc_map = mapping;
841 } 840 }
842 spin_unlock(lock); 841 spin_unlock(lock);
@@ -1182,7 +1181,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1182void mark_buffer_dirty(struct buffer_head *bh) 1181void mark_buffer_dirty(struct buffer_head *bh)
1183{ 1182{
1184 WARN_ON_ONCE(!buffer_uptodate(bh)); 1183 WARN_ON_ONCE(!buffer_uptodate(bh));
1185 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1184
1185 /*
1186 * Very *carefully* optimize the it-is-already-dirty case.
1187 *
1188 * Don't let the final "is it dirty" escape to before we
1189 * perhaps modified the buffer.
1190 */
1191 if (buffer_dirty(bh)) {
1192 smp_mb();
1193 if (buffer_dirty(bh))
1194 return;
1195 }
1196
1197 if (!test_set_buffer_dirty(bh))
1186 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); 1198 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1187} 1199}
1188 1200
@@ -2565,14 +2577,13 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
2565 struct inode *inode = page->mapping->host; 2577 struct inode *inode = page->mapping->host;
2566 struct buffer_head *head = fsdata; 2578 struct buffer_head *head = fsdata;
2567 struct buffer_head *bh; 2579 struct buffer_head *bh;
2580 BUG_ON(fsdata != NULL && page_has_buffers(page));
2568 2581
2569 if (!PageMappedToDisk(page)) { 2582 if (unlikely(copied < len) && !page_has_buffers(page))
2570 if (unlikely(copied < len) && !page_has_buffers(page)) 2583 attach_nobh_buffers(page, head);
2571 attach_nobh_buffers(page, head); 2584 if (page_has_buffers(page))
2572 if (page_has_buffers(page)) 2585 return generic_write_end(file, mapping, pos, len,
2573 return generic_write_end(file, mapping, pos, len, 2586 copied, page, fsdata);
2574 copied, page, fsdata);
2575 }
2576 2587
2577 SetPageUptodate(page); 2588 SetPageUptodate(page);
2578 set_page_dirty(page); 2589 set_page_dirty(page);
@@ -3214,7 +3225,7 @@ static int buffer_cpu_notify(struct notifier_block *self,
3214} 3225}
3215 3226
3216/** 3227/**
3217 * bh_uptodate_or_lock: Test whether the buffer is uptodate 3228 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3218 * @bh: struct buffer_head 3229 * @bh: struct buffer_head
3219 * 3230 *
3220 * Return true if the buffer is up-to-date and false, 3231 * Return true if the buffer is up-to-date and false,
@@ -3233,7 +3244,7 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
3233EXPORT_SYMBOL(bh_uptodate_or_lock); 3244EXPORT_SYMBOL(bh_uptodate_or_lock);
3234 3245
3235/** 3246/**
3236 * bh_submit_read: Submit a locked buffer for reading 3247 * bh_submit_read - Submit a locked buffer for reading
3237 * @bh: struct buffer_head 3248 * @bh: struct buffer_head
3238 * 3249 *
3239 * Returns zero on success and -EIO on error. 3250 * Returns zero on success and -EIO on error.