aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_aops.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-08-23 04:28:10 -0400
committerAlex Elder <aelder@sgi.com>2011-10-11 22:15:00 -0400
commitc859cdd1da008b3825555be3242908088a3de366 (patch)
tree0d5c5d22d1d10a346da81990a3c94bfabfd03904 /fs/xfs/xfs_aops.c
parent398d25ef23b10ce75424e0336a8d059dda1dbc8d (diff)
xfs: defer AIO/DIO completions
We really shouldn't complete AIO or DIO requests until we have finished the unwritten extent conversion and size update. This means fsync never has to pick up any ioends as all work has been completed when signalling I/O completion. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_aops.c')
-rw-r--r--fs/xfs/xfs_aops.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d91564404abf..10660c364105 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -122,6 +122,11 @@ xfs_destroy_ioend(
122 bh->b_end_io(bh, !ioend->io_error); 122 bh->b_end_io(bh, !ioend->io_error);
123 } 123 }
124 124
125 if (ioend->io_iocb) {
126 if (ioend->io_isasync)
127 aio_complete(ioend->io_iocb, ioend->io_result, 0);
128 inode_dio_done(ioend->io_inode);
129 }
125 xfs_ioend_wake(ip); 130 xfs_ioend_wake(ip);
126 mempool_free(ioend, xfs_ioend_pool); 131 mempool_free(ioend, xfs_ioend_pool);
127} 132}
@@ -236,8 +241,6 @@ xfs_end_io(
236 /* ensure we don't spin on blocked ioends */ 241 /* ensure we don't spin on blocked ioends */
237 delay(1); 242 delay(1);
238 } else { 243 } else {
239 if (ioend->io_iocb)
240 aio_complete(ioend->io_iocb, ioend->io_result, 0);
241 xfs_destroy_ioend(ioend); 244 xfs_destroy_ioend(ioend);
242 } 245 }
243} 246}
@@ -274,6 +277,7 @@ xfs_alloc_ioend(
274 * all the I/O from calling the completion routine too early. 277 * all the I/O from calling the completion routine too early.
275 */ 278 */
276 atomic_set(&ioend->io_remaining, 1); 279 atomic_set(&ioend->io_remaining, 1);
280 ioend->io_isasync = 0;
277 ioend->io_error = 0; 281 ioend->io_error = 0;
278 ioend->io_list = NULL; 282 ioend->io_list = NULL;
279 ioend->io_type = type; 283 ioend->io_type = type;
@@ -1289,7 +1293,6 @@ xfs_end_io_direct_write(
1289 bool is_async) 1293 bool is_async)
1290{ 1294{
1291 struct xfs_ioend *ioend = iocb->private; 1295 struct xfs_ioend *ioend = iocb->private;
1292 struct inode *inode = ioend->io_inode;
1293 1296
1294 /* 1297 /*
1295 * blockdev_direct_IO can return an error even after the I/O 1298 * blockdev_direct_IO can return an error even after the I/O
@@ -1300,28 +1303,17 @@ xfs_end_io_direct_write(
1300 1303
1301 ioend->io_offset = offset; 1304 ioend->io_offset = offset;
1302 ioend->io_size = size; 1305 ioend->io_size = size;
1306 ioend->io_iocb = iocb;
1307 ioend->io_result = ret;
1303 if (private && size > 0) 1308 if (private && size > 0)
1304 ioend->io_type = IO_UNWRITTEN; 1309 ioend->io_type = IO_UNWRITTEN;
1305 1310
1306 if (is_async) { 1311 if (is_async) {
1307 /* 1312 ioend->io_isasync = 1;
1308 * If we are converting an unwritten extent we need to delay
1309 * the AIO completion until after the unwrittent extent
1310 * conversion has completed, otherwise do it ASAP.
1311 */
1312 if (ioend->io_type == IO_UNWRITTEN) {
1313 ioend->io_iocb = iocb;
1314 ioend->io_result = ret;
1315 } else {
1316 aio_complete(iocb, ret, 0);
1317 }
1318 xfs_finish_ioend(ioend); 1313 xfs_finish_ioend(ioend);
1319 } else { 1314 } else {
1320 xfs_finish_ioend_sync(ioend); 1315 xfs_finish_ioend_sync(ioend);
1321 } 1316 }
1322
1323 /* XXX: probably should move into the real I/O completion handler */
1324 inode_dio_done(inode);
1325} 1317}
1326 1318
1327STATIC ssize_t 1319STATIC ssize_t