diff options
Diffstat (limited to 'fs/buffer.c')
| -rw-r--r-- | fs/buffer.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6a25d7df89b1..1c62203a4906 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -917,8 +917,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
| 917 | * contents - it is a noop if I/O is still in | 917 | * contents - it is a noop if I/O is still in |
| 918 | * flight on potentially older contents. | 918 | * flight on potentially older contents. |
| 919 | */ | 919 | */ |
| 920 | wait_on_buffer(bh); | 920 | ll_rw_block(SWRITE, 1, &bh); |
| 921 | ll_rw_block(WRITE, 1, &bh); | ||
| 922 | brelse(bh); | 921 | brelse(bh); |
| 923 | spin_lock(lock); | 922 | spin_lock(lock); |
| 924 | } | 923 | } |
| @@ -2793,21 +2792,22 @@ int submit_bh(int rw, struct buffer_head * bh) | |||
| 2793 | 2792 | ||
| 2794 | /** | 2793 | /** |
| 2795 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 2794 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
| 2796 | * @rw: whether to %READ or %WRITE or maybe %READA (readahead) | 2795 | * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) |
| 2797 | * @nr: number of &struct buffer_heads in the array | 2796 | * @nr: number of &struct buffer_heads in the array |
| 2798 | * @bhs: array of pointers to &struct buffer_head | 2797 | * @bhs: array of pointers to &struct buffer_head |
| 2799 | * | 2798 | * |
| 2800 | * ll_rw_block() takes an array of pointers to &struct buffer_heads, | 2799 | * ll_rw_block() takes an array of pointers to &struct buffer_heads, and |
| 2801 | * and requests an I/O operation on them, either a %READ or a %WRITE. | 2800 | * requests an I/O operation on them, either a %READ or a %WRITE. The third |
| 2802 | * The third %READA option is described in the documentation for | 2801 | * %SWRITE is like %WRITE only we make sure that the *current* data in buffers |
| 2803 | * generic_make_request() which ll_rw_block() calls. | 2802 | * are sent to disk. The fourth %READA option is described in the documentation |
| 2803 | * for generic_make_request() which ll_rw_block() calls. | ||
| 2804 | * | 2804 | * |
| 2805 | * This function drops any buffer that it cannot get a lock on (with the | 2805 | * This function drops any buffer that it cannot get a lock on (with the |
| 2806 | * BH_Lock state bit), any buffer that appears to be clean when doing a | 2806 | * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be |
| 2807 | * write request, and any buffer that appears to be up-to-date when doing | 2807 | * clean when doing a write request, and any buffer that appears to be |
| 2808 | * read request. Further it marks as clean buffers that are processed for | 2808 | * up-to-date when doing read request. Further it marks as clean buffers that |
| 2809 | * writing (the buffer cache won't assume that they are actually clean until | 2809 | * are processed for writing (the buffer cache won't assume that they are |
| 2810 | * the buffer gets unlocked). | 2810 | * actually clean until the buffer gets unlocked). |
| 2811 | * | 2811 | * |
| 2812 | * ll_rw_block sets b_end_io to simple completion handler that marks | 2812 | * ll_rw_block sets b_end_io to simple completion handler that marks |
| 2813 | * the buffer up-to-date (if approriate), unlocks the buffer and wakes | 2813 | * the buffer up-to-date (if approriate), unlocks the buffer and wakes |
| @@ -2823,11 +2823,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
| 2823 | for (i = 0; i < nr; i++) { | 2823 | for (i = 0; i < nr; i++) { |
| 2824 | struct buffer_head *bh = bhs[i]; | 2824 | struct buffer_head *bh = bhs[i]; |
| 2825 | 2825 | ||
| 2826 | if (test_set_buffer_locked(bh)) | 2826 | if (rw == SWRITE) |
| 2827 | lock_buffer(bh); | ||
| 2828 | else if (test_set_buffer_locked(bh)) | ||
| 2827 | continue; | 2829 | continue; |
| 2828 | 2830 | ||
| 2829 | get_bh(bh); | 2831 | get_bh(bh); |
| 2830 | if (rw == WRITE) { | 2832 | if (rw == WRITE || rw == SWRITE) { |
| 2831 | if (test_clear_buffer_dirty(bh)) { | 2833 | if (test_clear_buffer_dirty(bh)) { |
| 2832 | bh->b_end_io = end_buffer_write_sync; | 2834 | bh->b_end_io = end_buffer_write_sync; |
| 2833 | submit_bh(WRITE, bh); | 2835 | submit_bh(WRITE, bh); |
| @@ -3046,10 +3048,9 @@ struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) | |||
| 3046 | { | 3048 | { |
| 3047 | struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); | 3049 | struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); |
| 3048 | if (ret) { | 3050 | if (ret) { |
| 3049 | preempt_disable(); | 3051 | get_cpu_var(bh_accounting).nr++; |
| 3050 | __get_cpu_var(bh_accounting).nr++; | ||
| 3051 | recalc_bh_state(); | 3052 | recalc_bh_state(); |
| 3052 | preempt_enable(); | 3053 | put_cpu_var(bh_accounting); |
| 3053 | } | 3054 | } |
| 3054 | return ret; | 3055 | return ret; |
| 3055 | } | 3056 | } |
| @@ -3059,10 +3060,9 @@ void free_buffer_head(struct buffer_head *bh) | |||
| 3059 | { | 3060 | { |
| 3060 | BUG_ON(!list_empty(&bh->b_assoc_buffers)); | 3061 | BUG_ON(!list_empty(&bh->b_assoc_buffers)); |
| 3061 | kmem_cache_free(bh_cachep, bh); | 3062 | kmem_cache_free(bh_cachep, bh); |
| 3062 | preempt_disable(); | 3063 | get_cpu_var(bh_accounting).nr--; |
| 3063 | __get_cpu_var(bh_accounting).nr--; | ||
| 3064 | recalc_bh_state(); | 3064 | recalc_bh_state(); |
| 3065 | preempt_enable(); | 3065 | put_cpu_var(bh_accounting); |
| 3066 | } | 3066 | } |
| 3067 | EXPORT_SYMBOL(free_buffer_head); | 3067 | EXPORT_SYMBOL(free_buffer_head); |
| 3068 | 3068 | ||
