aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-08-11 11:06:24 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-18 01:09:01 -0400
commit9cb569d601e0b93e01c20a22872270ec663b75f6 (patch)
tree80b2568fae48018806e82f8884062dae8a5494ae
parent87e99511ea54510ffb60b98001d108794d5037f8 (diff)
remove SWRITE* I/O types
These flags aren't real I/O types, but tell ll_rw_block to always lock the buffer instead of giving up on a failed trylock. Instead add a new write_dirty_buffer helper that implements this semantic and use it from the existing SWRITE* callers. Note that the ll_rw_block code had a bug where it didn't promote WRITE_SYNC_PLUG properly, which this patch fixes. In the ufs code clean up the helper that used to call ll_rw_block to mirror sync_dirty_buffer, which is the function it implements for compound buffers. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/buffer.c52
-rw-r--r--fs/fat/misc.c4
-rw-r--r--fs/jbd/checkpoint.c4
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/ufs/balloc.c24
-rw-r--r--fs/ufs/ialloc.c18
-rw-r--r--fs/ufs/truncate.c18
-rw-r--r--fs/ufs/util.c20
-rw-r--r--fs/ufs/util.h3
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/fs.h9
16 files changed, 73 insertions, 94 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 6c8ad977f3d4..3e7dca279d1c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
770 spin_unlock(lock); 770 spin_unlock(lock);
771 /* 771 /*
772 * Ensure any pending I/O completes so that 772 * Ensure any pending I/O completes so that
773 * ll_rw_block() actually writes the current 773 * write_dirty_buffer() actually writes the
774 * contents - it is a noop if I/O is still in 774 * current contents - it is a noop if I/O is
775 * flight on potentially older contents. 775 * still in flight on potentially older
776 * contents.
776 */ 777 */
777 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); 778 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
778 779
779 /* 780 /*
780 * Kick off IO for the previous mapping. Note 781 * Kick off IO for the previous mapping. Note
@@ -2949,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
2949 2950
2950/** 2951/**
2951 * ll_rw_block: low-level access to block devices (DEPRECATED) 2952 * ll_rw_block: low-level access to block devices (DEPRECATED)
2952 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 2953 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2953 * @nr: number of &struct buffer_heads in the array 2954 * @nr: number of &struct buffer_heads in the array
2954 * @bhs: array of pointers to &struct buffer_head 2955 * @bhs: array of pointers to &struct buffer_head
2955 * 2956 *
2956 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2957 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2957 * requests an I/O operation on them, either a %READ or a %WRITE. The third 2958 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2958 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2959 * %READA option is described in the documentation for generic_make_request()
2959 * are sent to disk. The fourth %READA option is described in the documentation 2960 * which ll_rw_block() calls.
2960 * for generic_make_request() which ll_rw_block() calls.
2961 * 2961 *
2962 * This function drops any buffer that it cannot get a lock on (with the 2962 * This function drops any buffer that it cannot get a lock on (with the
2963 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2963 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2964 * clean when doing a write request, and any buffer that appears to be 2964 * request, and any buffer that appears to be up-to-date when doing read
2965 * up-to-date when doing read request. Further it marks as clean buffers that 2965 * request. Further it marks as clean buffers that are processed for
2966 * are processed for writing (the buffer cache won't assume that they are 2966 * writing (the buffer cache won't assume that they are actually clean
2967 * actually clean until the buffer gets unlocked). 2967 * until the buffer gets unlocked).
2968 * 2968 *
2969 * ll_rw_block sets b_end_io to simple completion handler that marks 2969 * ll_rw_block sets b_end_io to simple completion handler that marks
2970 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2970 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2980,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2980 for (i = 0; i < nr; i++) { 2980 for (i = 0; i < nr; i++) {
2981 struct buffer_head *bh = bhs[i]; 2981 struct buffer_head *bh = bhs[i];
2982 2982
2983 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) 2983 if (!trylock_buffer(bh))
2984 lock_buffer(bh);
2985 else if (!trylock_buffer(bh))
2986 continue; 2984 continue;
2987 2985 if (rw == WRITE) {
2988 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2989 rw == SWRITE_SYNC_PLUG) {
2990 if (test_clear_buffer_dirty(bh)) { 2986 if (test_clear_buffer_dirty(bh)) {
2991 bh->b_end_io = end_buffer_write_sync; 2987 bh->b_end_io = end_buffer_write_sync;
2992 get_bh(bh); 2988 get_bh(bh);
2993 if (rw == SWRITE_SYNC) 2989 submit_bh(WRITE, bh);
2994 submit_bh(WRITE_SYNC, bh);
2995 else
2996 submit_bh(WRITE, bh);
2997 continue; 2990 continue;
2998 } 2991 }
2999 } else { 2992 } else {
@@ -3009,6 +3002,19 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3009} 3002}
3010EXPORT_SYMBOL(ll_rw_block); 3003EXPORT_SYMBOL(ll_rw_block);
3011 3004
3005void write_dirty_buffer(struct buffer_head *bh, int rw)
3006{
3007 lock_buffer(bh);
3008 if (!test_clear_buffer_dirty(bh)) {
3009 unlock_buffer(bh);
3010 return;
3011 }
3012 bh->b_end_io = end_buffer_write_sync;
3013 get_bh(bh);
3014 submit_bh(rw, bh);
3015}
3016EXPORT_SYMBOL(write_dirty_buffer);
3017
3012/* 3018/*
3013 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3019 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3014 * and then start new I/O and then wait upon it. The caller must have a ref on 3020 * and then start new I/O and then wait upon it. The caller must have a ref on
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1fa23f6ffba5..1736f2356388 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
250{ 250{
251 int i, err = 0; 251 int i, err = 0;
252 252
253 ll_rw_block(SWRITE, nr_bhs, bhs); 253 for (i = 0; i < nr_bhs; i++)
254 write_dirty_buffer(bhs[i], WRITE);
255
254 for (i = 0; i < nr_bhs; i++) { 256 for (i = 0; i < nr_bhs; i++) {
255 wait_on_buffer(bhs[i]); 257 wait_on_buffer(bhs[i]);
256 if (buffer_eopnotsupp(bhs[i])) { 258 if (buffer_eopnotsupp(bhs[i])) {
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index b0435dd0654d..05a38b9c4c0e 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
254{ 254{
255 int i; 255 int i;
256 256
257 ll_rw_block(SWRITE, *batch_count, bhs); 257 for (i = 0; i < *batch_count; i++)
258 write_dirty_buffer(bhs[i], WRITE);
259
258 for (i = 0; i < *batch_count; i++) { 260 for (i = 0; i < *batch_count; i++) {
259 struct buffer_head *bh = bhs[i]; 261 struct buffer_head *bh = bhs[i];
260 clear_buffer_jwrite(bh); 262 clear_buffer_jwrite(bh);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index f19ce94693d8..2c4b1f109da9 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait)
1024 if (wait) 1024 if (wait)
1025 sync_dirty_buffer(bh); 1025 sync_dirty_buffer(bh);
1026 else 1026 else
1027 ll_rw_block(SWRITE, 1, &bh); 1027 write_dirty_buffer(bh, WRITE);
1028 1028
1029out: 1029out:
1030 /* If we have just flushed the log (by marking s_start==0), then 1030 /* If we have just flushed the log (by marking s_start==0), then
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index ad717328343a..d29018307e2e 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal,
617 set_buffer_jwrite(bh); 617 set_buffer_jwrite(bh);
618 BUFFER_TRACE(bh, "write"); 618 BUFFER_TRACE(bh, "write");
619 set_buffer_dirty(bh); 619 set_buffer_dirty(bh);
620 ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh); 620 write_dirty_buffer(bh, write_op);
621} 621}
622#endif 622#endif
623 623
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 1c23a0f4e8a3..5247e7ffdcb4 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count)
255{ 255{
256 int i; 256 int i;
257 257
258 ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs); 258 for (i = 0; i < *batch_count; i++)
259 write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
260
259 for (i = 0; i < *batch_count; i++) { 261 for (i = 0; i < *batch_count; i++) {
260 struct buffer_head *bh = journal->j_chkpt_bhs[i]; 262 struct buffer_head *bh = journal->j_chkpt_bhs[i];
261 clear_buffer_jwrite(bh); 263 clear_buffer_jwrite(bh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ad5866aaf0f9..0e8014ea6b94 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
1124 set_buffer_uptodate(bh); 1124 set_buffer_uptodate(bh);
1125 } 1125 }
1126 } else 1126 } else
1127 ll_rw_block(SWRITE, 1, &bh); 1127 write_dirty_buffer(bh, WRITE);
1128 1128
1129out: 1129out:
1130 /* If we have just flushed the log (by marking s_start==0), then 1130 /* If we have just flushed the log (by marking s_start==0), then
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index a360b06af2e3..9ad321fd63fd 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -625,7 +625,7 @@ static void flush_descriptor(journal_t *journal,
625 set_buffer_jwrite(bh); 625 set_buffer_jwrite(bh);
626 BUFFER_TRACE(bh, "write"); 626 BUFFER_TRACE(bh, "write");
627 set_buffer_dirty(bh); 627 set_buffer_dirty(bh);
628 ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh); 628 write_dirty_buffer(bh, write_op);
629} 629}
630#endif 630#endif
631 631
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 1ec952b1f036..812e2c05aa29 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2311,7 +2311,7 @@ static int journal_read_transaction(struct super_block *sb,
2311 /* flush out the real blocks */ 2311 /* flush out the real blocks */
2312 for (i = 0; i < get_desc_trans_len(desc); i++) { 2312 for (i = 0; i < get_desc_trans_len(desc); i++) {
2313 set_buffer_dirty(real_blocks[i]); 2313 set_buffer_dirty(real_blocks[i]);
2314 ll_rw_block(SWRITE, 1, real_blocks + i); 2314 write_dirty_buffer(real_blocks[i], WRITE);
2315 } 2315 }
2316 for (i = 0; i < get_desc_trans_len(desc); i++) { 2316 for (i = 0; i < get_desc_trans_len(desc); i++) {
2317 wait_on_buffer(real_blocks[i]); 2317 wait_on_buffer(real_blocks[i]);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 048484fb10d2..46f7a807bbc1 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -114,10 +114,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
114 114
115 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 115 ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117 if (sb->s_flags & MS_SYNCHRONOUS) { 117 if (sb->s_flags & MS_SYNCHRONOUS)
118 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 118 ubh_sync_block(UCPI_UBH(ucpi));
119 ubh_wait_on_buffer (UCPI_UBH(ucpi));
120 }
121 sb->s_dirt = 1; 119 sb->s_dirt = 1;
122 120
123 unlock_super (sb); 121 unlock_super (sb);
@@ -207,10 +205,8 @@ do_more:
207 205
208 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
209 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
210 if (sb->s_flags & MS_SYNCHRONOUS) { 208 if (sb->s_flags & MS_SYNCHRONOUS)
211 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 209 ubh_sync_block(UCPI_UBH(ucpi));
212 ubh_wait_on_buffer (UCPI_UBH(ucpi));
213 }
214 210
215 if (overflow) { 211 if (overflow) {
216 fragment += count; 212 fragment += count;
@@ -558,10 +554,8 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
558 554
559 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 555 ubh_mark_buffer_dirty (USPI_UBH(uspi));
560 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 556 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
561 if (sb->s_flags & MS_SYNCHRONOUS) { 557 if (sb->s_flags & MS_SYNCHRONOUS)
562 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 558 ubh_sync_block(UCPI_UBH(ucpi));
563 ubh_wait_on_buffer (UCPI_UBH(ucpi));
564 }
565 sb->s_dirt = 1; 559 sb->s_dirt = 1;
566 560
567 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); 561 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
@@ -680,10 +674,8 @@ cg_found:
680succed: 674succed:
681 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 675 ubh_mark_buffer_dirty (USPI_UBH(uspi));
682 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 676 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
683 if (sb->s_flags & MS_SYNCHRONOUS) { 677 if (sb->s_flags & MS_SYNCHRONOUS)
684 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 678 ubh_sync_block(UCPI_UBH(ucpi));
685 ubh_wait_on_buffer (UCPI_UBH(ucpi));
686 }
687 sb->s_dirt = 1; 679 sb->s_dirt = 1;
688 680
689 result += cgno * uspi->s_fpg; 681 result += cgno * uspi->s_fpg;
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 428017e018fe..2eabf04af3de 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -113,10 +113,8 @@ void ufs_free_inode (struct inode * inode)
113 113
114 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 114 ubh_mark_buffer_dirty (USPI_UBH(uspi));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116 if (sb->s_flags & MS_SYNCHRONOUS) { 116 if (sb->s_flags & MS_SYNCHRONOUS)
117 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 117 ubh_sync_block(UCPI_UBH(ucpi));
118 ubh_wait_on_buffer (UCPI_UBH(ucpi));
119 }
120 118
121 sb->s_dirt = 1; 119 sb->s_dirt = 1;
122 unlock_super (sb); 120 unlock_super (sb);
@@ -156,10 +154,8 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
156 154
157 fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb); 155 fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
158 ubh_mark_buffer_dirty(UCPI_UBH(ucpi)); 156 ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
159 if (sb->s_flags & MS_SYNCHRONOUS) { 157 if (sb->s_flags & MS_SYNCHRONOUS)
160 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 158 ubh_sync_block(UCPI_UBH(ucpi));
161 ubh_wait_on_buffer(UCPI_UBH(ucpi));
162 }
163 159
164 UFSD("EXIT\n"); 160 UFSD("EXIT\n");
165} 161}
@@ -290,10 +286,8 @@ cg_found:
290 } 286 }
291 ubh_mark_buffer_dirty (USPI_UBH(uspi)); 287 ubh_mark_buffer_dirty (USPI_UBH(uspi));
292 ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); 288 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
293 if (sb->s_flags & MS_SYNCHRONOUS) { 289 if (sb->s_flags & MS_SYNCHRONOUS)
294 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); 290 ubh_sync_block(UCPI_UBH(ucpi));
295 ubh_wait_on_buffer (UCPI_UBH(ucpi));
296 }
297 sb->s_dirt = 1; 291 sb->s_dirt = 1;
298 292
299 inode->i_ino = cg * uspi->s_ipg + bit; 293 inode->i_ino = cg * uspi->s_ipg + bit;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 34d5cb135320..a58f9155fc9a 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -243,10 +243,8 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
243 ubh_bforget(ind_ubh); 243 ubh_bforget(ind_ubh);
244 ind_ubh = NULL; 244 ind_ubh = NULL;
245 } 245 }
246 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { 246 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
247 ubh_ll_rw_block(SWRITE, ind_ubh); 247 ubh_sync_block(ind_ubh);
248 ubh_wait_on_buffer (ind_ubh);
249 }
250 ubh_brelse (ind_ubh); 248 ubh_brelse (ind_ubh);
251 249
252 UFSD("EXIT: ino %lu\n", inode->i_ino); 250 UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -307,10 +305,8 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
307 ubh_bforget(dind_bh); 305 ubh_bforget(dind_bh);
308 dind_bh = NULL; 306 dind_bh = NULL;
309 } 307 }
310 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { 308 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
311 ubh_ll_rw_block(SWRITE, dind_bh); 309 ubh_sync_block(dind_bh);
312 ubh_wait_on_buffer (dind_bh);
313 }
314 ubh_brelse (dind_bh); 310 ubh_brelse (dind_bh);
315 311
316 UFSD("EXIT: ino %lu\n", inode->i_ino); 312 UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -367,10 +363,8 @@ static int ufs_trunc_tindirect(struct inode *inode)
367 ubh_bforget(tind_bh); 363 ubh_bforget(tind_bh);
368 tind_bh = NULL; 364 tind_bh = NULL;
369 } 365 }
370 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { 366 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
371 ubh_ll_rw_block(SWRITE, tind_bh); 367 ubh_sync_block(tind_bh);
372 ubh_wait_on_buffer (tind_bh);
373 }
374 ubh_brelse (tind_bh); 368 ubh_brelse (tind_bh);
375 369
376 UFSD("EXIT: ino %lu\n", inode->i_ino); 370 UFSD("EXIT: ino %lu\n", inode->i_ino);
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 85a7fc9e4a4e..d2c36d53fe66 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -113,21 +113,17 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
113 } 113 }
114} 114}
115 115
116void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh) 116void ubh_sync_block(struct ufs_buffer_head *ubh)
117{ 117{
118 if (!ubh) 118 if (ubh) {
119 return; 119 unsigned i;
120 120
121 ll_rw_block(rw, ubh->count, ubh->bh); 121 for (i = 0; i < ubh->count; i++)
122} 122 write_dirty_buffer(ubh->bh[i], WRITE);
123 123
124void ubh_wait_on_buffer (struct ufs_buffer_head * ubh) 124 for (i = 0; i < ubh->count; i++)
125{ 125 wait_on_buffer(ubh->bh[i]);
126 unsigned i; 126 }
127 if (!ubh)
128 return;
129 for ( i = 0; i < ubh->count; i++ )
130 wait_on_buffer (ubh->bh[i]);
131} 127}
132 128
133void ubh_bforget (struct ufs_buffer_head * ubh) 129void ubh_bforget (struct ufs_buffer_head * ubh)
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 0466036912f1..9f8775ce381c 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -269,8 +269,7 @@ extern void ubh_brelse (struct ufs_buffer_head *);
269extern void ubh_brelse_uspi (struct ufs_sb_private_info *); 269extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
270extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *); 270extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
271extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int); 271extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
272extern void ubh_ll_rw_block(int, struct ufs_buffer_head *); 272extern void ubh_sync_block(struct ufs_buffer_head *);
273extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
274extern void ubh_bforget (struct ufs_buffer_head *); 273extern void ubh_bforget (struct ufs_buffer_head *);
275extern int ubh_buffer_dirty (struct ufs_buffer_head *); 274extern int ubh_buffer_dirty (struct ufs_buffer_head *);
276#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size) 275#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 72c1cf83eb85..ec94c12f21da 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -182,6 +182,7 @@ void __lock_buffer(struct buffer_head *bh);
182void ll_rw_block(int, int, struct buffer_head * bh[]); 182void ll_rw_block(int, int, struct buffer_head * bh[]);
183int sync_dirty_buffer(struct buffer_head *bh); 183int sync_dirty_buffer(struct buffer_head *bh);
184int __sync_dirty_buffer(struct buffer_head *bh, int rw); 184int __sync_dirty_buffer(struct buffer_head *bh, int rw);
185void write_dirty_buffer(struct buffer_head *bh, int rw);
185int submit_bh(int, struct buffer_head *); 186int submit_bh(int, struct buffer_head *);
186void write_boundary_block(struct block_device *bdev, 187void write_boundary_block(struct block_device *bdev,
187 sector_t bblock, unsigned blocksize); 188 sector_t bblock, unsigned blocksize);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a96b4d83fc1..29f7c975304c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -125,9 +125,6 @@ struct inodes_stat_t {
125 * block layer could (in theory) choose to ignore this 125 * block layer could (in theory) choose to ignore this
126 * request if it runs into resource problems. 126 * request if it runs into resource problems.
127 * WRITE A normal async write. Device will be plugged. 127 * WRITE A normal async write. Device will be plugged.
128 * SWRITE Like WRITE, but a special case for ll_rw_block() that
129 * tells it to lock the buffer first. Normally a buffer
130 * must be locked before doing IO.
131 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down 128 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
132 * the hint that someone will be waiting on this IO 129 * the hint that someone will be waiting on this IO
133 * shortly. The device must still be unplugged explicitly, 130 * shortly. The device must still be unplugged explicitly,
@@ -138,9 +135,6 @@ struct inodes_stat_t {
138 * immediately after submission. The write equivalent 135 * immediately after submission. The write equivalent
139 * of READ_SYNC. 136 * of READ_SYNC.
140 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. 137 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
141 * SWRITE_SYNC
142 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
143 * See SWRITE.
144 * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all 138 * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
145 * previously submitted writes must be safely on storage 139 * previously submitted writes must be safely on storage
146 * before this one is started. Also guarantees that when 140 * before this one is started. Also guarantees that when
@@ -155,7 +149,6 @@ struct inodes_stat_t {
155#define READ 0 149#define READ 0
156#define WRITE RW_MASK 150#define WRITE RW_MASK
157#define READA RWA_MASK 151#define READA RWA_MASK
158#define SWRITE (WRITE | READA)
159 152
160#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) 153#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
161#define READ_META (READ | REQ_META) 154#define READ_META (READ | REQ_META)
@@ -165,8 +158,6 @@ struct inodes_stat_t {
165#define WRITE_META (WRITE | REQ_META) 158#define WRITE_META (WRITE | REQ_META)
166#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 159#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
167 REQ_HARDBARRIER) 160 REQ_HARDBARRIER)
168#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
169#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
170 161
171/* 162/*
172 * These aren't really reads or writes, they pass down information about 163 * These aren't really reads or writes, they pass down information about