aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c69
1 files changed, 37 insertions, 32 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 50efa339e051..3e7dca279d1c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
770 spin_unlock(lock); 770 spin_unlock(lock);
771 /* 771 /*
772 * Ensure any pending I/O completes so that 772 * Ensure any pending I/O completes so that
773 * ll_rw_block() actually writes the current 773 * write_dirty_buffer() actually writes the
774 * contents - it is a noop if I/O is still in 774 * current contents - it is a noop if I/O is
775 * flight on potentially older contents. 775 * still in flight on potentially older
776 * contents.
776 */ 777 */
777 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); 778 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
778 779
779 /* 780 /*
780 * Kick off IO for the previous mapping. Note 781 * Kick off IO for the previous mapping. Note
@@ -2912,13 +2913,6 @@ int submit_bh(int rw, struct buffer_head * bh)
2912 BUG_ON(buffer_unwritten(bh)); 2913 BUG_ON(buffer_unwritten(bh));
2913 2914
2914 /* 2915 /*
2915 * Mask in barrier bit for a write (could be either a WRITE or a
2916 * WRITE_SYNC
2917 */
2918 if (buffer_ordered(bh) && (rw & WRITE))
2919 rw |= WRITE_BARRIER;
2920
2921 /*
2922 * Only clear out a write error when rewriting 2916 * Only clear out a write error when rewriting
2923 */ 2917 */
2924 if (test_set_buffer_req(bh) && (rw & WRITE)) 2918 if (test_set_buffer_req(bh) && (rw & WRITE))
@@ -2956,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
2956 2950
2957/** 2951/**
2958 * ll_rw_block: low-level access to block devices (DEPRECATED) 2952 * ll_rw_block: low-level access to block devices (DEPRECATED)
2959 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 2953 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2960 * @nr: number of &struct buffer_heads in the array 2954 * @nr: number of &struct buffer_heads in the array
2961 * @bhs: array of pointers to &struct buffer_head 2955 * @bhs: array of pointers to &struct buffer_head
2962 * 2956 *
2963 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2957 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2964 * requests an I/O operation on them, either a %READ or a %WRITE. The third 2958 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2965 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2959 * %READA option is described in the documentation for generic_make_request()
2966 * are sent to disk. The fourth %READA option is described in the documentation 2960 * which ll_rw_block() calls.
2967 * for generic_make_request() which ll_rw_block() calls.
2968 * 2961 *
2969 * This function drops any buffer that it cannot get a lock on (with the 2962 * This function drops any buffer that it cannot get a lock on (with the
2970 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2963 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2971 * clean when doing a write request, and any buffer that appears to be 2964 * request, and any buffer that appears to be up-to-date when doing read
2972 * up-to-date when doing read request. Further it marks as clean buffers that 2965 * request. Further it marks as clean buffers that are processed for
2973 * are processed for writing (the buffer cache won't assume that they are 2966 * writing (the buffer cache won't assume that they are actually clean
2974 * actually clean until the buffer gets unlocked). 2967 * until the buffer gets unlocked).
2975 * 2968 *
2976 * ll_rw_block sets b_end_io to simple completion handler that marks 2969 * ll_rw_block sets b_end_io to simple completion handler that marks
2977 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2970 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2987,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2987 for (i = 0; i < nr; i++) { 2980 for (i = 0; i < nr; i++) {
2988 struct buffer_head *bh = bhs[i]; 2981 struct buffer_head *bh = bhs[i];
2989 2982
2990 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) 2983 if (!trylock_buffer(bh))
2991 lock_buffer(bh);
2992 else if (!trylock_buffer(bh))
2993 continue; 2984 continue;
2994 2985 if (rw == WRITE) {
2995 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2996 rw == SWRITE_SYNC_PLUG) {
2997 if (test_clear_buffer_dirty(bh)) { 2986 if (test_clear_buffer_dirty(bh)) {
2998 bh->b_end_io = end_buffer_write_sync; 2987 bh->b_end_io = end_buffer_write_sync;
2999 get_bh(bh); 2988 get_bh(bh);
3000 if (rw == SWRITE_SYNC) 2989 submit_bh(WRITE, bh);
3001 submit_bh(WRITE_SYNC, bh);
3002 else
3003 submit_bh(WRITE, bh);
3004 continue; 2990 continue;
3005 } 2991 }
3006 } else { 2992 } else {
@@ -3016,12 +3002,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3016} 3002}
3017EXPORT_SYMBOL(ll_rw_block); 3003EXPORT_SYMBOL(ll_rw_block);
3018 3004
3005void write_dirty_buffer(struct buffer_head *bh, int rw)
3006{
3007 lock_buffer(bh);
3008 if (!test_clear_buffer_dirty(bh)) {
3009 unlock_buffer(bh);
3010 return;
3011 }
3012 bh->b_end_io = end_buffer_write_sync;
3013 get_bh(bh);
3014 submit_bh(rw, bh);
3015}
3016EXPORT_SYMBOL(write_dirty_buffer);
3017
3019/* 3018/*
3020 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3019 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3021 * and then start new I/O and then wait upon it. The caller must have a ref on 3020 * and then start new I/O and then wait upon it. The caller must have a ref on
3022 * the buffer_head. 3021 * the buffer_head.
3023 */ 3022 */
3024int sync_dirty_buffer(struct buffer_head *bh) 3023int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3025{ 3024{
3026 int ret = 0; 3025 int ret = 0;
3027 3026
@@ -3030,7 +3029,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
3030 if (test_clear_buffer_dirty(bh)) { 3029 if (test_clear_buffer_dirty(bh)) {
3031 get_bh(bh); 3030 get_bh(bh);
3032 bh->b_end_io = end_buffer_write_sync; 3031 bh->b_end_io = end_buffer_write_sync;
3033 ret = submit_bh(WRITE_SYNC, bh); 3032 ret = submit_bh(rw, bh);
3034 wait_on_buffer(bh); 3033 wait_on_buffer(bh);
3035 if (buffer_eopnotsupp(bh)) { 3034 if (buffer_eopnotsupp(bh)) {
3036 clear_buffer_eopnotsupp(bh); 3035 clear_buffer_eopnotsupp(bh);
@@ -3043,6 +3042,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
3043 } 3042 }
3044 return ret; 3043 return ret;
3045} 3044}
3045EXPORT_SYMBOL(__sync_dirty_buffer);
3046
3047int sync_dirty_buffer(struct buffer_head *bh)
3048{
3049 return __sync_dirty_buffer(bh, WRITE_SYNC);
3050}
3046EXPORT_SYMBOL(sync_dirty_buffer); 3051EXPORT_SYMBOL(sync_dirty_buffer);
3047 3052
3048/* 3053/*