diff options
author | Zach Brown <zach.brown@oracle.com> | 2006-12-10 05:21:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 12:57:21 -0500 |
commit | 20258b2b397031649e4a41922fe803d57017df84 (patch) | |
tree | 9c5fc96367b4938474cee2789d3c98c5e37586bf /fs | |
parent | 0273201e693fd62381f6b1e85b15ffc117d8a46e (diff) |
[PATCH] dio: remove duplicate bio wait code
Now that we have a single refcount and waiting path we can reuse it in the
async 'should_wait' path. It continues to rely on the fragile link between
the conditional in dio_complete_aio() which decides to complete the AIO and
the conditional in direct_io_worker() which decides to wait and free.
By waiting before dropping the reference we stop dio_bio_end_aio() from
calling dio_complete_aio() which used to wake up the waiter after seeing the
reference count drop to 0. We hoist this wake up into dio_bio_end_aio() which
now notices when it's left a single remaining reference that is held by the
waiter.
Signed-off-by: Zach Brown <zach.brown@oracle.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Suparna Bhattacharya <suparna@in.ibm.com>
Acked-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/direct-io.c | 41 |
1 files changed, 12 insertions, 29 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c index bc1cbf9149f7..f11f05dc9e61 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -257,7 +257,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) | |||
257 | */ | 257 | */ |
258 | static void dio_complete_aio(struct dio *dio) | 258 | static void dio_complete_aio(struct dio *dio) |
259 | { | 259 | { |
260 | unsigned long flags; | ||
261 | int ret; | 260 | int ret; |
262 | 261 | ||
263 | ret = dio_complete(dio, dio->iocb->ki_pos, 0); | 262 | ret = dio_complete(dio, dio->iocb->ki_pos, 0); |
@@ -267,14 +266,6 @@ static void dio_complete_aio(struct dio *dio) | |||
267 | ((dio->rw == READ) && dio->result)) { | 266 | ((dio->rw == READ) && dio->result)) { |
268 | aio_complete(dio->iocb, ret, 0); | 267 | aio_complete(dio->iocb, ret, 0); |
269 | kfree(dio); | 268 | kfree(dio); |
270 | } else { | ||
271 | /* | ||
272 | * Falling back to buffered | ||
273 | */ | ||
274 | spin_lock_irqsave(&dio->bio_lock, flags); | ||
275 | if (dio->waiter) | ||
276 | wake_up_process(dio->waiter); | ||
277 | spin_unlock_irqrestore(&dio->bio_lock, flags); | ||
278 | } | 269 | } |
279 | } | 270 | } |
280 | 271 | ||
@@ -285,6 +276,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio); | |||
285 | static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) | 276 | static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) |
286 | { | 277 | { |
287 | struct dio *dio = bio->bi_private; | 278 | struct dio *dio = bio->bi_private; |
279 | int waiter_holds_ref = 0; | ||
280 | int remaining; | ||
288 | 281 | ||
289 | if (bio->bi_size) | 282 | if (bio->bi_size) |
290 | return 1; | 283 | return 1; |
@@ -292,7 +285,12 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) | |||
292 | /* cleanup the bio */ | 285 | /* cleanup the bio */ |
293 | dio_bio_complete(dio, bio); | 286 | dio_bio_complete(dio, bio); |
294 | 287 | ||
295 | if (atomic_dec_and_test(&dio->refcount)) | 288 | waiter_holds_ref = !!dio->waiter; |
289 | remaining = atomic_sub_return(1, (&dio->refcount)); | ||
290 | if (remaining == 1 && waiter_holds_ref) | ||
291 | wake_up_process(dio->waiter); | ||
292 | |||
293 | if (remaining == 0) | ||
296 | dio_complete_aio(dio); | 294 | dio_complete_aio(dio); |
297 | 295 | ||
298 | return 0; | 296 | return 0; |
@@ -1097,30 +1095,15 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
1097 | if (ret == 0) | 1095 | if (ret == 0) |
1098 | ret = dio->result; | 1096 | ret = dio->result; |
1099 | 1097 | ||
1098 | if (should_wait) | ||
1099 | dio_await_completion(dio); | ||
1100 | |||
1100 | /* this can free the dio */ | 1101 | /* this can free the dio */ |
1101 | if (atomic_dec_and_test(&dio->refcount)) | 1102 | if (atomic_dec_and_test(&dio->refcount)) |
1102 | dio_complete_aio(dio); | 1103 | dio_complete_aio(dio); |
1103 | 1104 | ||
1104 | if (should_wait) { | 1105 | if (should_wait) |
1105 | unsigned long flags; | ||
1106 | /* | ||
1107 | * Wait for already issued I/O to drain out and | ||
1108 | * release its references to user-space pages | ||
1109 | * before returning to fallback on buffered I/O | ||
1110 | */ | ||
1111 | |||
1112 | spin_lock_irqsave(&dio->bio_lock, flags); | ||
1113 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1114 | while (atomic_read(&dio->refcount)) { | ||
1115 | spin_unlock_irqrestore(&dio->bio_lock, flags); | ||
1116 | io_schedule(); | ||
1117 | spin_lock_irqsave(&dio->bio_lock, flags); | ||
1118 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1119 | } | ||
1120 | spin_unlock_irqrestore(&dio->bio_lock, flags); | ||
1121 | set_current_state(TASK_RUNNING); | ||
1122 | kfree(dio); | 1106 | kfree(dio); |
1123 | } | ||
1124 | } else { | 1107 | } else { |
1125 | dio_await_completion(dio); | 1108 | dio_await_completion(dio); |
1126 | 1109 | ||