aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 826baf4f04bc..3ebccf4aa7e3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -67,14 +67,14 @@ static int sync_buffer(void *word)
67 return 0; 67 return 0;
68} 68}
69 69
70void fastcall __lock_buffer(struct buffer_head *bh) 70void __lock_buffer(struct buffer_head *bh)
71{ 71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE); 73 TASK_UNINTERRUPTIBLE);
74} 74}
75EXPORT_SYMBOL(__lock_buffer); 75EXPORT_SYMBOL(__lock_buffer);
76 76
77void fastcall unlock_buffer(struct buffer_head *bh) 77void unlock_buffer(struct buffer_head *bh)
78{ 78{
79 smp_mb__before_clear_bit(); 79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh); 80 clear_buffer_locked(bh);
@@ -678,7 +678,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
678 } else { 678 } else {
679 BUG_ON(mapping->assoc_mapping != buffer_mapping); 679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
680 } 680 }
681 if (list_empty(&bh->b_assoc_buffers)) { 681 if (!bh->b_assoc_map) {
682 spin_lock(&buffer_mapping->private_lock); 682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers, 683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list); 684 &mapping->private_list);
@@ -794,6 +794,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
794{ 794{
795 struct buffer_head *bh; 795 struct buffer_head *bh;
796 struct list_head tmp; 796 struct list_head tmp;
797 struct address_space *mapping;
797 int err = 0, err2; 798 int err = 0, err2;
798 799
799 INIT_LIST_HEAD(&tmp); 800 INIT_LIST_HEAD(&tmp);
@@ -801,9 +802,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
801 spin_lock(lock); 802 spin_lock(lock);
802 while (!list_empty(list)) { 803 while (!list_empty(list)) {
803 bh = BH_ENTRY(list->next); 804 bh = BH_ENTRY(list->next);
805 mapping = bh->b_assoc_map;
804 __remove_assoc_queue(bh); 806 __remove_assoc_queue(bh);
807 /* Avoid race with mark_buffer_dirty_inode() which does
808 * a lockless check and we rely on seeing the dirty bit */
809 smp_mb();
805 if (buffer_dirty(bh) || buffer_locked(bh)) { 810 if (buffer_dirty(bh) || buffer_locked(bh)) {
806 list_add(&bh->b_assoc_buffers, &tmp); 811 list_add(&bh->b_assoc_buffers, &tmp);
812 bh->b_assoc_map = mapping;
807 if (buffer_dirty(bh)) { 813 if (buffer_dirty(bh)) {
808 get_bh(bh); 814 get_bh(bh);
809 spin_unlock(lock); 815 spin_unlock(lock);
@@ -822,8 +828,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
822 828
823 while (!list_empty(&tmp)) { 829 while (!list_empty(&tmp)) {
824 bh = BH_ENTRY(tmp.prev); 830 bh = BH_ENTRY(tmp.prev);
825 list_del_init(&bh->b_assoc_buffers);
826 get_bh(bh); 831 get_bh(bh);
832 mapping = bh->b_assoc_map;
833 __remove_assoc_queue(bh);
834 /* Avoid race with mark_buffer_dirty_inode() which does
835 * a lockless check and we rely on seeing the dirty bit */
836 smp_mb();
837 if (buffer_dirty(bh)) {
838 list_add(&bh->b_assoc_buffers,
839 &bh->b_assoc_map->private_list);
840 bh->b_assoc_map = mapping;
841 }
827 spin_unlock(lock); 842 spin_unlock(lock);
828 wait_on_buffer(bh); 843 wait_on_buffer(bh);
829 if (!buffer_uptodate(bh)) 844 if (!buffer_uptodate(bh))
@@ -1164,7 +1179,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1164 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1179 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1165 * mapping->tree_lock and the global inode_lock. 1180 * mapping->tree_lock and the global inode_lock.
1166 */ 1181 */
1167void fastcall mark_buffer_dirty(struct buffer_head *bh) 1182void mark_buffer_dirty(struct buffer_head *bh)
1168{ 1183{
1169 WARN_ON_ONCE(!buffer_uptodate(bh)); 1184 WARN_ON_ONCE(!buffer_uptodate(bh));
1170 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1185 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
@@ -1195,7 +1210,7 @@ void __brelse(struct buffer_head * buf)
1195void __bforget(struct buffer_head *bh) 1210void __bforget(struct buffer_head *bh)
1196{ 1211{
1197 clear_buffer_dirty(bh); 1212 clear_buffer_dirty(bh);
1198 if (!list_empty(&bh->b_assoc_buffers)) { 1213 if (bh->b_assoc_map) {
1199 struct address_space *buffer_mapping = bh->b_page->mapping; 1214 struct address_space *buffer_mapping = bh->b_page->mapping;
1200 1215
1201 spin_lock(&buffer_mapping->private_lock); 1216 spin_lock(&buffer_mapping->private_lock);
@@ -1436,6 +1451,7 @@ void invalidate_bh_lrus(void)
1436{ 1451{
1437 on_each_cpu(invalidate_bh_lru, NULL, 1, 1); 1452 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1438} 1453}
1454EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1439 1455
1440void set_bh_page(struct buffer_head *bh, 1456void set_bh_page(struct buffer_head *bh,
1441 struct page *page, unsigned long offset) 1457 struct page *page, unsigned long offset)
@@ -3021,7 +3037,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3021 do { 3037 do {
3022 struct buffer_head *next = bh->b_this_page; 3038 struct buffer_head *next = bh->b_this_page;
3023 3039
3024 if (!list_empty(&bh->b_assoc_buffers)) 3040 if (bh->b_assoc_map)
3025 __remove_assoc_queue(bh); 3041 __remove_assoc_queue(bh);
3026 bh = next; 3042 bh = next;
3027 } while (bh != head); 3043 } while (bh != head);