aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2018-08-10 11:48:18 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2018-08-12 11:37:31 -0400
commite25ff835af89a80aa6a4de58f413e494b2b96bd1 (patch)
tree0fc0f6bd0cc21537f79509bba4761bb0f74696b3 /fs/xfs
parent13942aa94a8b5df662d93c42c307b2f50cbe88b0 (diff)
xfs: Close race between direct IO and xfs_break_layouts()
This patch is the duplicate of ross's fix for ext4 for xfs. If the refcount of a page is lowered between the time that it is returned by dax_busy_page() and when the refcount is again checked in xfs_break_layouts() => ___wait_var_event(), the waiting function xfs_wait_dax_page() will never be called. This means that xfs_break_layouts() will still have 'retry' set to false, so we'll stop looping and never check the refcount of other pages in this inode. Instead, always continue looping as long as dax_layout_busy_page() gives us a page which it found with an elevated refcount. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_file.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 6b31f41eafa2..181e9084519b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -721,12 +721,10 @@ xfs_file_write_iter(
721 721
722static void 722static void
723xfs_wait_dax_page( 723xfs_wait_dax_page(
724 struct inode *inode, 724 struct inode *inode)
725 bool *did_unlock)
726{ 725{
727 struct xfs_inode *ip = XFS_I(inode); 726 struct xfs_inode *ip = XFS_I(inode);
728 727
729 *did_unlock = true;
730 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 728 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
731 schedule(); 729 schedule();
732 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 730 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
@@ -735,7 +733,7 @@ xfs_wait_dax_page(
735static int 733static int
736xfs_break_dax_layouts( 734xfs_break_dax_layouts(
737 struct inode *inode, 735 struct inode *inode,
738 bool *did_unlock) 736 bool *retry)
739{ 737{
740 struct page *page; 738 struct page *page;
741 739
@@ -745,9 +743,10 @@ xfs_break_dax_layouts(
745 if (!page) 743 if (!page)
746 return 0; 744 return 0;
747 745
746 *retry = true;
748 return ___wait_var_event(&page->_refcount, 747 return ___wait_var_event(&page->_refcount,
749 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, 748 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
750 0, 0, xfs_wait_dax_page(inode, did_unlock)); 749 0, 0, xfs_wait_dax_page(inode));
751} 750}
752 751
753int 752int