diff options
author | Christoph Hellwig <hch@infradead.org> | 2010-07-18 17:17:10 -0400 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2010-07-26 17:09:10 -0400 |
commit | fb511f2150174b18b28ad54708c1adda0df39b17 (patch) | |
tree | 2642c10c35a8dcbf7aa1e46115a5b48555aa258b /fs/xfs | |
parent | 40e2e97316af6e62affab7a392e792494b8d9dde (diff) |
xfs: move aio completion after unwritten extent conversion
If we write into an unwritten extent using AIO we need to complete the AIO
request after the extent conversion has finished. Without that a read could
race to see see the extent still unwritten and return zeros. For synchronous
I/O we already take care of that by flushing the xfsconvertd workqueue (which
might be a bit of overkill).
To do that add iocb and result fields to struct xfs_ioend, so that we can
call aio_complete from xfs_end_io after the extent conversion has happened.
Note that we need a new result field as io_error is used for positive errno
values, while the AIO code can return negative error values and positive
transfer sizes.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 95d1e2695c3a..13622d5ba068 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -265,8 +265,11 @@ xfs_end_io( | |||
265 | xfs_finish_ioend(ioend, 0); | 265 | xfs_finish_ioend(ioend, 0); |
266 | /* ensure we don't spin on blocked ioends */ | 266 | /* ensure we don't spin on blocked ioends */ |
267 | delay(1); | 267 | delay(1); |
268 | } else | 268 | } else { |
269 | if (ioend->io_iocb) | ||
270 | aio_complete(ioend->io_iocb, ioend->io_result, 0); | ||
269 | xfs_destroy_ioend(ioend); | 271 | xfs_destroy_ioend(ioend); |
272 | } | ||
270 | } | 273 | } |
271 | 274 | ||
272 | /* | 275 | /* |
@@ -299,6 +302,8 @@ xfs_alloc_ioend( | |||
299 | atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); | 302 | atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); |
300 | ioend->io_offset = 0; | 303 | ioend->io_offset = 0; |
301 | ioend->io_size = 0; | 304 | ioend->io_size = 0; |
305 | ioend->io_iocb = NULL; | ||
306 | ioend->io_result = 0; | ||
302 | 307 | ||
303 | INIT_WORK(&ioend->io_work, xfs_end_io); | 308 | INIT_WORK(&ioend->io_work, xfs_end_io); |
304 | return ioend; | 309 | return ioend; |
@@ -1411,6 +1416,7 @@ xfs_end_io_direct( | |||
1411 | bool is_async) | 1416 | bool is_async) |
1412 | { | 1417 | { |
1413 | xfs_ioend_t *ioend = iocb->private; | 1418 | xfs_ioend_t *ioend = iocb->private; |
1419 | bool complete_aio = is_async; | ||
1414 | 1420 | ||
1415 | /* | 1421 | /* |
1416 | * Non-NULL private data means we need to issue a transaction to | 1422 | * Non-NULL private data means we need to issue a transaction to |
@@ -1436,7 +1442,14 @@ xfs_end_io_direct( | |||
1436 | if (ioend->io_type == IO_READ) { | 1442 | if (ioend->io_type == IO_READ) { |
1437 | xfs_finish_ioend(ioend, 0); | 1443 | xfs_finish_ioend(ioend, 0); |
1438 | } else if (private && size > 0) { | 1444 | } else if (private && size > 0) { |
1439 | xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); | 1445 | if (is_async) { |
1446 | ioend->io_iocb = iocb; | ||
1447 | ioend->io_result = ret; | ||
1448 | complete_aio = false; | ||
1449 | xfs_finish_ioend(ioend, 0); | ||
1450 | } else { | ||
1451 | xfs_finish_ioend(ioend, 1); | ||
1452 | } | ||
1440 | } else { | 1453 | } else { |
1441 | /* | 1454 | /* |
1442 | * A direct I/O write ioend starts it's life in unwritten | 1455 | * A direct I/O write ioend starts it's life in unwritten |
@@ -1455,7 +1468,7 @@ xfs_end_io_direct( | |||
1455 | */ | 1468 | */ |
1456 | iocb->private = NULL; | 1469 | iocb->private = NULL; |
1457 | 1470 | ||
1458 | if (is_async) | 1471 | if (complete_aio) |
1459 | aio_complete(iocb, ret, 0); | 1472 | aio_complete(iocb, ret, 0); |
1460 | } | 1473 | } |
1461 | 1474 | ||