diff options
author | Christoph Hellwig <hch@lst.de> | 2010-08-11 11:06:24 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2010-08-18 01:09:01 -0400 |
commit | 9cb569d601e0b93e01c20a22872270ec663b75f6 (patch) | |
tree | 80b2568fae48018806e82f8884062dae8a5494ae /fs/buffer.c | |
parent | 87e99511ea54510ffb60b98001d108794d5037f8 (diff) |
remove SWRITE* I/O types
These flags aren't real I/O types, but tell ll_rw_block to always
lock the buffer instead of giving up on a failed trylock.
Instead add a new write_dirty_buffer helper that implements this semantic
and use it from the existing SWRITE* callers. Note that the ll_rw_block
code had a bug where it didn't promote WRITE_SYNC_PLUG properly, which
this patch fixes.
In the ufs code clean up the helper that used to call ll_rw_block
to mirror sync_dirty_buffer, which is the function it implements for
compound buffers.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 52 |
1 files changed, 29 insertions, 23 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6c8ad977f3d4..3e7dca279d1c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
770 | spin_unlock(lock); | 770 | spin_unlock(lock); |
771 | /* | 771 | /* |
772 | * Ensure any pending I/O completes so that | 772 | * Ensure any pending I/O completes so that |
773 | * ll_rw_block() actually writes the current | 773 | * write_dirty_buffer() actually writes the |
774 | * contents - it is a noop if I/O is still in | 774 | * current contents - it is a noop if I/O is |
775 | * flight on potentially older contents. | 775 | * still in flight on potentially older |
776 | * contents. | ||
776 | */ | 777 | */ |
777 | ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); | 778 | write_dirty_buffer(bh, WRITE_SYNC_PLUG); |
778 | 779 | ||
779 | /* | 780 | /* |
780 | * Kick off IO for the previous mapping. Note | 781 | * Kick off IO for the previous mapping. Note |
@@ -2949,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh); | |||
2949 | 2950 | ||
2950 | /** | 2951 | /** |
2951 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 2952 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
2952 | * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) | 2953 | * @rw: whether to %READ or %WRITE or maybe %READA (readahead) |
2953 | * @nr: number of &struct buffer_heads in the array | 2954 | * @nr: number of &struct buffer_heads in the array |
2954 | * @bhs: array of pointers to &struct buffer_head | 2955 | * @bhs: array of pointers to &struct buffer_head |
2955 | * | 2956 | * |
2956 | * ll_rw_block() takes an array of pointers to &struct buffer_heads, and | 2957 | * ll_rw_block() takes an array of pointers to &struct buffer_heads, and |
2957 | * requests an I/O operation on them, either a %READ or a %WRITE. The third | 2958 | * requests an I/O operation on them, either a %READ or a %WRITE. The third |
2958 | * %SWRITE is like %WRITE only we make sure that the *current* data in buffers | 2959 | * %READA option is described in the documentation for generic_make_request() |
2959 | * are sent to disk. The fourth %READA option is described in the documentation | 2960 | * which ll_rw_block() calls. |
2960 | * for generic_make_request() which ll_rw_block() calls. | ||
2961 | * | 2961 | * |
2962 | * This function drops any buffer that it cannot get a lock on (with the | 2962 | * This function drops any buffer that it cannot get a lock on (with the |
2963 | * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be | 2963 | * BH_Lock state bit), any buffer that appears to be clean when doing a write |
2964 | * clean when doing a write request, and any buffer that appears to be | 2964 | * request, and any buffer that appears to be up-to-date when doing read |
2965 | * up-to-date when doing read request. Further it marks as clean buffers that | 2965 | * request. Further it marks as clean buffers that are processed for |
2966 | * are processed for writing (the buffer cache won't assume that they are | 2966 | * writing (the buffer cache won't assume that they are actually clean |
2967 | * actually clean until the buffer gets unlocked). | 2967 | * until the buffer gets unlocked). |
2968 | * | 2968 | * |
2969 | * ll_rw_block sets b_end_io to simple completion handler that marks | 2969 | * ll_rw_block sets b_end_io to simple completion handler that marks |
2970 | * the buffer up-to-date (if approriate), unlocks the buffer and wakes | 2970 | * the buffer up-to-date (if approriate), unlocks the buffer and wakes |
@@ -2980,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
2980 | for (i = 0; i < nr; i++) { | 2980 | for (i = 0; i < nr; i++) { |
2981 | struct buffer_head *bh = bhs[i]; | 2981 | struct buffer_head *bh = bhs[i]; |
2982 | 2982 | ||
2983 | if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) | 2983 | if (!trylock_buffer(bh)) |
2984 | lock_buffer(bh); | ||
2985 | else if (!trylock_buffer(bh)) | ||
2986 | continue; | 2984 | continue; |
2987 | 2985 | if (rw == WRITE) { | |
2988 | if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC || | ||
2989 | rw == SWRITE_SYNC_PLUG) { | ||
2990 | if (test_clear_buffer_dirty(bh)) { | 2986 | if (test_clear_buffer_dirty(bh)) { |
2991 | bh->b_end_io = end_buffer_write_sync; | 2987 | bh->b_end_io = end_buffer_write_sync; |
2992 | get_bh(bh); | 2988 | get_bh(bh); |
2993 | if (rw == SWRITE_SYNC) | 2989 | submit_bh(WRITE, bh); |
2994 | submit_bh(WRITE_SYNC, bh); | ||
2995 | else | ||
2996 | submit_bh(WRITE, bh); | ||
2997 | continue; | 2990 | continue; |
2998 | } | 2991 | } |
2999 | } else { | 2992 | } else { |
@@ -3009,6 +3002,19 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3009 | } | 3002 | } |
3010 | EXPORT_SYMBOL(ll_rw_block); | 3003 | EXPORT_SYMBOL(ll_rw_block); |
3011 | 3004 | ||
3005 | void write_dirty_buffer(struct buffer_head *bh, int rw) | ||
3006 | { | ||
3007 | lock_buffer(bh); | ||
3008 | if (!test_clear_buffer_dirty(bh)) { | ||
3009 | unlock_buffer(bh); | ||
3010 | return; | ||
3011 | } | ||
3012 | bh->b_end_io = end_buffer_write_sync; | ||
3013 | get_bh(bh); | ||
3014 | submit_bh(rw, bh); | ||
3015 | } | ||
3016 | EXPORT_SYMBOL(write_dirty_buffer); | ||
3017 | |||
3012 | /* | 3018 | /* |
3013 | * For a data-integrity writeout, we need to wait upon any in-progress I/O | 3019 | * For a data-integrity writeout, we need to wait upon any in-progress I/O |
3014 | * and then start new I/O and then wait upon it. The caller must have a ref on | 3020 | * and then start new I/O and then wait upon it. The caller must have a ref on |