diff options
author | Christoph Hellwig <hch@infradead.org> | 2010-07-18 17:17:09 -0400 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2010-07-26 17:09:02 -0400 |
commit | 40e2e97316af6e62affab7a392e792494b8d9dde (patch) | |
tree | 981ce3b464467893683d47f52ae5d35fdd32d46a /fs/direct-io.c | |
parent | 696123fca877905696591829c97a2cef11c8d048 (diff) |
direct-io: move aio_complete into ->end_io
Filesystems with unwritten extent support must not complete an AIO request
until the transaction to convert the extent has been commited. That means
the aio_complete calls needs to be moved into the ->end_io callback so
that the filesystem can control when to call it exactly.
This makes a bit of a mess out of dio_complete and the ->end_io callback
prototype even more complicated.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/direct-io.c')
-rw-r--r-- | fs/direct-io.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c index 7600aacf531d..a10cb91cadea 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio) | |||
218 | * filesystems can use it to hold additional state between get_block calls and | 218 | * filesystems can use it to hold additional state between get_block calls and |
219 | * dio_complete. | 219 | * dio_complete. |
220 | */ | 220 | */ |
221 | static int dio_complete(struct dio *dio, loff_t offset, int ret) | 221 | static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async) |
222 | { | 222 | { |
223 | ssize_t transferred = 0; | 223 | ssize_t transferred = 0; |
224 | 224 | ||
@@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) | |||
239 | transferred = dio->i_size - offset; | 239 | transferred = dio->i_size - offset; |
240 | } | 240 | } |
241 | 241 | ||
242 | if (dio->end_io && dio->result) | ||
243 | dio->end_io(dio->iocb, offset, transferred, | ||
244 | dio->map_bh.b_private); | ||
245 | |||
246 | if (dio->flags & DIO_LOCKING) | ||
247 | /* lockdep: non-owner release */ | ||
248 | up_read_non_owner(&dio->inode->i_alloc_sem); | ||
249 | |||
250 | if (ret == 0) | 242 | if (ret == 0) |
251 | ret = dio->page_errors; | 243 | ret = dio->page_errors; |
252 | if (ret == 0) | 244 | if (ret == 0) |
@@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) | |||
254 | if (ret == 0) | 246 | if (ret == 0) |
255 | ret = transferred; | 247 | ret = transferred; |
256 | 248 | ||
249 | if (dio->end_io && dio->result) { | ||
250 | dio->end_io(dio->iocb, offset, transferred, | ||
251 | dio->map_bh.b_private, ret, is_async); | ||
252 | } else if (is_async) { | ||
253 | aio_complete(dio->iocb, ret, 0); | ||
254 | } | ||
255 | |||
256 | if (dio->flags & DIO_LOCKING) | ||
257 | /* lockdep: non-owner release */ | ||
258 | up_read_non_owner(&dio->inode->i_alloc_sem); | ||
259 | |||
257 | return ret; | 260 | return ret; |
258 | } | 261 | } |
259 | 262 | ||
@@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error) | |||
277 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 280 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
278 | 281 | ||
279 | if (remaining == 0) { | 282 | if (remaining == 0) { |
280 | int ret = dio_complete(dio, dio->iocb->ki_pos, 0); | 283 | dio_complete(dio, dio->iocb->ki_pos, 0, true); |
281 | aio_complete(dio->iocb, ret, 0); | ||
282 | kfree(dio); | 284 | kfree(dio); |
283 | } | 285 | } |
284 | } | 286 | } |
@@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
1126 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 1128 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
1127 | 1129 | ||
1128 | if (ret2 == 0) { | 1130 | if (ret2 == 0) { |
1129 | ret = dio_complete(dio, offset, ret); | 1131 | ret = dio_complete(dio, offset, ret, false); |
1130 | kfree(dio); | 1132 | kfree(dio); |
1131 | } else | 1133 | } else |
1132 | BUG_ON(ret != -EIOCBQUEUED); | 1134 | BUG_ON(ret != -EIOCBQUEUED); |