aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c66
1 files changed, 38 insertions, 28 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 561e63a14966..6cbfceabd95d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -40,6 +40,7 @@
40#include <linux/cpu.h> 40#include <linux/cpu.h>
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/bit_spinlock.h>
43 44
44static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
45static void invalidate_bh_lrus(void); 46static void invalidate_bh_lrus(void);
@@ -513,8 +514,8 @@ static void free_more_memory(void)
513 */ 514 */
514static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 515static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
515{ 516{
516 static DEFINE_SPINLOCK(page_uptodate_lock);
517 unsigned long flags; 517 unsigned long flags;
518 struct buffer_head *first;
518 struct buffer_head *tmp; 519 struct buffer_head *tmp;
519 struct page *page; 520 struct page *page;
520 int page_uptodate = 1; 521 int page_uptodate = 1;
@@ -536,7 +537,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
536 * two buffer heads end IO at almost the same time and both 537 * two buffer heads end IO at almost the same time and both
537 * decide that the page is now completely done. 538 * decide that the page is now completely done.
538 */ 539 */
539 spin_lock_irqsave(&page_uptodate_lock, flags); 540 first = page_buffers(page);
541 local_irq_save(flags);
542 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
540 clear_buffer_async_read(bh); 543 clear_buffer_async_read(bh);
541 unlock_buffer(bh); 544 unlock_buffer(bh);
542 tmp = bh; 545 tmp = bh;
@@ -549,7 +552,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
549 } 552 }
550 tmp = tmp->b_this_page; 553 tmp = tmp->b_this_page;
551 } while (tmp != bh); 554 } while (tmp != bh);
552 spin_unlock_irqrestore(&page_uptodate_lock, flags); 555 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
556 local_irq_restore(flags);
553 557
554 /* 558 /*
555 * If none of the buffers had errors and they are all 559 * If none of the buffers had errors and they are all
@@ -561,7 +565,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
561 return; 565 return;
562 566
563still_busy: 567still_busy:
564 spin_unlock_irqrestore(&page_uptodate_lock, flags); 568 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
569 local_irq_restore(flags);
565 return; 570 return;
566} 571}
567 572
@@ -572,8 +577,8 @@ still_busy:
572void end_buffer_async_write(struct buffer_head *bh, int uptodate) 577void end_buffer_async_write(struct buffer_head *bh, int uptodate)
573{ 578{
574 char b[BDEVNAME_SIZE]; 579 char b[BDEVNAME_SIZE];
575 static DEFINE_SPINLOCK(page_uptodate_lock);
576 unsigned long flags; 580 unsigned long flags;
581 struct buffer_head *first;
577 struct buffer_head *tmp; 582 struct buffer_head *tmp;
578 struct page *page; 583 struct page *page;
579 584
@@ -594,7 +599,10 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
594 SetPageError(page); 599 SetPageError(page);
595 } 600 }
596 601
597 spin_lock_irqsave(&page_uptodate_lock, flags); 602 first = page_buffers(page);
603 local_irq_save(flags);
604 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
605
598 clear_buffer_async_write(bh); 606 clear_buffer_async_write(bh);
599 unlock_buffer(bh); 607 unlock_buffer(bh);
600 tmp = bh->b_this_page; 608 tmp = bh->b_this_page;
@@ -605,12 +613,14 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
605 } 613 }
606 tmp = tmp->b_this_page; 614 tmp = tmp->b_this_page;
607 } 615 }
608 spin_unlock_irqrestore(&page_uptodate_lock, flags); 616 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
617 local_irq_restore(flags);
609 end_page_writeback(page); 618 end_page_writeback(page);
610 return; 619 return;
611 620
612still_busy: 621still_busy:
613 spin_unlock_irqrestore(&page_uptodate_lock, flags); 622 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
623 local_irq_restore(flags);
614 return; 624 return;
615} 625}
616 626
@@ -908,8 +918,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
908 * contents - it is a noop if I/O is still in 918 * contents - it is a noop if I/O is still in
909 * flight on potentially older contents. 919 * flight on potentially older contents.
910 */ 920 */
911 wait_on_buffer(bh); 921 ll_rw_block(SWRITE, 1, &bh);
912 ll_rw_block(WRITE, 1, &bh);
913 brelse(bh); 922 brelse(bh);
914 spin_lock(lock); 923 spin_lock(lock);
915 } 924 }
@@ -2784,21 +2793,22 @@ int submit_bh(int rw, struct buffer_head * bh)
2784 2793
2785/** 2794/**
2786 * ll_rw_block: low-level access to block devices (DEPRECATED) 2795 * ll_rw_block: low-level access to block devices (DEPRECATED)
2787 * @rw: whether to %READ or %WRITE or maybe %READA (readahead) 2796 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2788 * @nr: number of &struct buffer_heads in the array 2797 * @nr: number of &struct buffer_heads in the array
2789 * @bhs: array of pointers to &struct buffer_head 2798 * @bhs: array of pointers to &struct buffer_head
2790 * 2799 *
2791 * ll_rw_block() takes an array of pointers to &struct buffer_heads, 2800 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2792 * and requests an I/O operation on them, either a %READ or a %WRITE. 2801 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2793 * The third %READA option is described in the documentation for 2802 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2794 * generic_make_request() which ll_rw_block() calls. 2803 * are sent to disk. The fourth %READA option is described in the documentation
2804 * for generic_make_request() which ll_rw_block() calls.
2795 * 2805 *
2796 * This function drops any buffer that it cannot get a lock on (with the 2806 * This function drops any buffer that it cannot get a lock on (with the
2797 * BH_Lock state bit), any buffer that appears to be clean when doing a 2807 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2798 * write request, and any buffer that appears to be up-to-date when doing 2808 * clean when doing a write request, and any buffer that appears to be
2799 * read request. Further it marks as clean buffers that are processed for 2809 * up-to-date when doing read request. Further it marks as clean buffers that
2800 * writing (the buffer cache won't assume that they are actually clean until 2810 * are processed for writing (the buffer cache won't assume that they are
2801 * the buffer gets unlocked). 2811 * actually clean until the buffer gets unlocked).
2802 * 2812 *
2803 * ll_rw_block sets b_end_io to simple completion handler that marks 2813 * ll_rw_block sets b_end_io to simple completion handler that marks
2804 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2814 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2814,11 +2824,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2814 for (i = 0; i < nr; i++) { 2824 for (i = 0; i < nr; i++) {
2815 struct buffer_head *bh = bhs[i]; 2825 struct buffer_head *bh = bhs[i];
2816 2826
2817 if (test_set_buffer_locked(bh)) 2827 if (rw == SWRITE)
2828 lock_buffer(bh);
2829 else if (test_set_buffer_locked(bh))
2818 continue; 2830 continue;
2819 2831
2820 get_bh(bh); 2832 get_bh(bh);
2821 if (rw == WRITE) { 2833 if (rw == WRITE || rw == SWRITE) {
2822 if (test_clear_buffer_dirty(bh)) { 2834 if (test_clear_buffer_dirty(bh)) {
2823 bh->b_end_io = end_buffer_write_sync; 2835 bh->b_end_io = end_buffer_write_sync;
2824 submit_bh(WRITE, bh); 2836 submit_bh(WRITE, bh);
@@ -3037,10 +3049,9 @@ struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
3037{ 3049{
3038 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3050 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3039 if (ret) { 3051 if (ret) {
3040 preempt_disable(); 3052 get_cpu_var(bh_accounting).nr++;
3041 __get_cpu_var(bh_accounting).nr++;
3042 recalc_bh_state(); 3053 recalc_bh_state();
3043 preempt_enable(); 3054 put_cpu_var(bh_accounting);
3044 } 3055 }
3045 return ret; 3056 return ret;
3046} 3057}
@@ -3050,10 +3061,9 @@ void free_buffer_head(struct buffer_head *bh)
3050{ 3061{
3051 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3062 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3052 kmem_cache_free(bh_cachep, bh); 3063 kmem_cache_free(bh_cachep, bh);
3053 preempt_disable(); 3064 get_cpu_var(bh_accounting).nr--;
3054 __get_cpu_var(bh_accounting).nr--;
3055 recalc_bh_state(); 3065 recalc_bh_state();
3056 preempt_enable(); 3066 put_cpu_var(bh_accounting);
3057} 3067}
3058EXPORT_SYMBOL(free_buffer_head); 3068EXPORT_SYMBOL(free_buffer_head);
3059 3069