aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 11b002e01d6e..6f0bddddcf4a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -67,14 +67,14 @@ static int sync_buffer(void *word)
67 return 0; 67 return 0;
68} 68}
69 69
70void fastcall __lock_buffer(struct buffer_head *bh) 70void __lock_buffer(struct buffer_head *bh)
71{ 71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE); 73 TASK_UNINTERRUPTIBLE);
74} 74}
75EXPORT_SYMBOL(__lock_buffer); 75EXPORT_SYMBOL(__lock_buffer);
76 76
77void fastcall unlock_buffer(struct buffer_head *bh) 77void unlock_buffer(struct buffer_head *bh)
78{ 78{
79 smp_mb__before_clear_bit(); 79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh); 80 clear_buffer_locked(bh);
@@ -1164,7 +1164,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1164 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1164 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1165 * mapping->tree_lock and the global inode_lock. 1165 * mapping->tree_lock and the global inode_lock.
1166 */ 1166 */
1167void fastcall mark_buffer_dirty(struct buffer_head *bh) 1167void mark_buffer_dirty(struct buffer_head *bh)
1168{ 1168{
1169 WARN_ON_ONCE(!buffer_uptodate(bh)); 1169 WARN_ON_ONCE(!buffer_uptodate(bh));
1170 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1170 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))