summaryrefslogtreecommitdiffstats
path: root/fs/iomap.c
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2017-09-21 10:16:29 -0400
committerJens Axboe <axboe@kernel.dk>2017-09-25 10:56:05 -0400
commit332391a9935da939319e473b4680e173df75afcf (patch)
tree52609917ecaadeea19dab63feaa4229af5a88561 /fs/iomap.c
parentbb1cc74790eb51f52d23c6e5fd9a3bb16030c3d8 (diff)
fs: Fix page cache inconsistency when mixing buffered and AIO DIO
Currently when mixing buffered reads and asynchronous direct writes it is possible to end up with the situation where we have stale data in the page cache while the new data is already written to disk. This is permanent until the affected pages are flushed away. Despite the fact that mixing buffered and direct IO is ill-advised it does pose a thread for a data integrity, is unexpected and should be fixed. Fix this by deferring completion of asynchronous direct writes to a process context in the case that there are mapped pages to be found in the inode. Later before the completion in dio_complete() invalidate the pages in question. This ensures that after the completion the pages in the written area are either unmapped, or populated with up-to-date data. Also do the same for the iomap case which uses iomap_dio_complete() instead. This has a side effect of deferring the completion to a process context for every AIO DIO that happens on inode that has pages mapped. However since the consensus is that this is ill-advised practice the performance implication should not be a problem. This was based on proposal from Jeff Moyer, thanks! Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/iomap.c')
-rw-r--r--fs/iomap.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/fs/iomap.c b/fs/iomap.c
index 269b24a01f32..8194d30bdca0 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -713,8 +713,24 @@ struct iomap_dio {
713static ssize_t iomap_dio_complete(struct iomap_dio *dio) 713static ssize_t iomap_dio_complete(struct iomap_dio *dio)
714{ 714{
715 struct kiocb *iocb = dio->iocb; 715 struct kiocb *iocb = dio->iocb;
716 struct inode *inode = file_inode(iocb->ki_filp);
716 ssize_t ret; 717 ssize_t ret;
717 718
719 /*
720 * Try again to invalidate clean pages which might have been cached by
721 * non-direct readahead, or faulted in by get_user_pages() if the source
722 * of the write was an mmap'ed region of the file we're writing. Either
723 * one is a pretty crazy thing to do, so we don't support it 100%. If
724 * this invalidation fails, tough, the write still worked...
725 */
726 if (!dio->error &&
727 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
728 ret = invalidate_inode_pages2_range(inode->i_mapping,
729 iocb->ki_pos >> PAGE_SHIFT,
730 (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT);
731 WARN_ON_ONCE(ret);
732 }
733
718 if (dio->end_io) { 734 if (dio->end_io) {
719 ret = dio->end_io(iocb, 735 ret = dio->end_io(iocb,
720 dio->error ? dio->error : dio->size, 736 dio->error ? dio->error : dio->size,
@@ -1042,19 +1058,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1042 1058
1043 ret = iomap_dio_complete(dio); 1059 ret = iomap_dio_complete(dio);
1044 1060
1045 /*
1046 * Try again to invalidate clean pages which might have been cached by
1047 * non-direct readahead, or faulted in by get_user_pages() if the source
1048 * of the write was an mmap'ed region of the file we're writing. Either
1049 * one is a pretty crazy thing to do, so we don't support it 100%. If
1050 * this invalidation fails, tough, the write still worked...
1051 */
1052 if (iov_iter_rw(iter) == WRITE) {
1053 int err = invalidate_inode_pages2_range(mapping,
1054 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1055 WARN_ON_ONCE(err);
1056 }
1057
1058 return ret; 1061 return ret;
1059 1062
1060out_free_dio: 1063out_free_dio: