aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Monakhov <dmonakhov@openvz.org>2007-10-16 04:24:47 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:53 -0400
commit68671f35fe8d785277118a333c88768a4f894917 (patch)
tree50cb5a378db1b021c2ec3573388af3480df49791
parentef8b4520bd9f8294ffce9abd6158085bde5dc902 (diff)
mm: add end_buffer_read helper function
Move duplicated code from end_buffer_read_XXX methods to separate helper function. Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/buffer.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 75b51dfa5e03..b144fc367b8b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -110,10 +110,14 @@ static void buffer_io_error(struct buffer_head *bh)
110} 110}
111 111
112/* 112/*
113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 113 * End-of-IO handler helper function which does not touch the bh after
114 * unlock the buffer. This is what ll_rw_block uses too. 114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
115 */ 119 */
116void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 120static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
117{ 121{
118 if (uptodate) { 122 if (uptodate) {
119 set_buffer_uptodate(bh); 123 set_buffer_uptodate(bh);
@@ -122,6 +126,15 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
122 clear_buffer_uptodate(bh); 126 clear_buffer_uptodate(bh);
123 } 127 }
124 unlock_buffer(bh); 128 unlock_buffer(bh);
129}
130
131/*
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
134 */
135void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136{
137 __end_buffer_read_notouch(bh, uptodate);
125 put_bh(bh); 138 put_bh(bh);
126} 139}
127 140
@@ -2245,21 +2258,10 @@ out_unlock:
2245 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed 2258 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2246 * immediately, while under the page lock. So it needs a special end_io 2259 * immediately, while under the page lock. So it needs a special end_io
2247 * handler which does not touch the bh after unlocking it. 2260 * handler which does not touch the bh after unlocking it.
2248 *
2249 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2250 * a race there is benign: unlock_buffer() only use the bh's address for
2251 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2252 * itself.
2253 */ 2261 */
2254static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 2262static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2255{ 2263{
2256 if (uptodate) { 2264 __end_buffer_read_notouch(bh, uptodate);
2257 set_buffer_uptodate(bh);
2258 } else {
2259 /* This happens, due to failed READA attempts. */
2260 clear_buffer_uptodate(bh);
2261 }
2262 unlock_buffer(bh);
2263} 2265}
2264 2266
2265/* 2267/*