aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 6fa530256bfd..416a2686ec66 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -40,7 +40,6 @@
40#include <linux/cpu.h> 40#include <linux/cpu.h>
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/bit_spinlock.h>
44 43
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 44static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 45
@@ -330,8 +329,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
330 * decide that the page is now completely done. 329 * decide that the page is now completely done.
331 */ 330 */
332 first = page_buffers(page); 331 first = page_buffers(page);
333 local_irq_save(flags); 332 spin_lock_irqsave(&first->b_uptodate_lock, flags);
334 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
335 clear_buffer_async_read(bh); 333 clear_buffer_async_read(bh);
336 unlock_buffer(bh); 334 unlock_buffer(bh);
337 tmp = bh; 335 tmp = bh;
@@ -344,8 +342,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
344 } 342 }
345 tmp = tmp->b_this_page; 343 tmp = tmp->b_this_page;
346 } while (tmp != bh); 344 } while (tmp != bh);
347 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 345 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
348 local_irq_restore(flags);
349 346
350 /* 347 /*
351 * If none of the buffers had errors and they are all 348 * If none of the buffers had errors and they are all
@@ -357,8 +354,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
357 return; 354 return;
358 355
359still_busy: 356still_busy:
360 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 357 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
361 local_irq_restore(flags);
362 return; 358 return;
363} 359}
364 360
@@ -393,8 +389,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
393 } 389 }
394 390
395 first = page_buffers(page); 391 first = page_buffers(page);
396 local_irq_save(flags); 392 spin_lock_irqsave(&first->b_uptodate_lock, flags);
397 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
398 393
399 clear_buffer_async_write(bh); 394 clear_buffer_async_write(bh);
400 unlock_buffer(bh); 395 unlock_buffer(bh);
@@ -406,14 +401,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
406 } 401 }
407 tmp = tmp->b_this_page; 402 tmp = tmp->b_this_page;
408 } 403 }
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 404 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
410 local_irq_restore(flags);
411 end_page_writeback(page); 405 end_page_writeback(page);
412 return; 406 return;
413 407
414still_busy: 408still_busy:
415 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 409 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416 local_irq_restore(flags);
417 return; 410 return;
418} 411}
419EXPORT_SYMBOL(end_buffer_async_write); 412EXPORT_SYMBOL(end_buffer_async_write);
@@ -1152,7 +1145,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1152 * inode list. 1145 * inode list.
1153 * 1146 *
1154 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1147 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1155 * mapping->tree_lock and the global inode_lock. 1148 * and mapping->tree_lock.
1156 */ 1149 */
1157void mark_buffer_dirty(struct buffer_head *bh) 1150void mark_buffer_dirty(struct buffer_head *bh)
1158{ 1151{
@@ -3268,6 +3261,8 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3268 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3261 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3269 if (ret) { 3262 if (ret) {
3270 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3263 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3264 spin_lock_init(&ret->b_uptodate_lock);
3265 spin_lock_init(&ret->b_state_lock);
3271 get_cpu_var(bh_accounting).nr++; 3266 get_cpu_var(bh_accounting).nr++;
3272 recalc_bh_state(); 3267 recalc_bh_state();
3273 put_cpu_var(bh_accounting); 3268 put_cpu_var(bh_accounting);
@@ -3279,6 +3274,8 @@ EXPORT_SYMBOL(alloc_buffer_head);
3279void free_buffer_head(struct buffer_head *bh) 3274void free_buffer_head(struct buffer_head *bh)
3280{ 3275{
3281 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3276 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3277 BUG_ON(spin_is_locked(&bh->b_uptodate_lock));
3278 BUG_ON(spin_is_locked(&bh->b_state_lock));
3282 kmem_cache_free(bh_cachep, bh); 3279 kmem_cache_free(bh_cachep, bh);
3283 get_cpu_var(bh_accounting).nr--; 3280 get_cpu_var(bh_accounting).nr--;
3284 recalc_bh_state(); 3281 recalc_bh_state();