diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-08-27 10:42:53 -0400 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2011-10-11 22:15:01 -0400 |
commit | c58cb165bd44de8aaee9755a144136ae743be116 (patch) | |
tree | 47cf60c7555cf49eb76bbc682bd355e726f51df3 /fs/xfs | |
parent | 859f57ca00805e6c482eef1a7ab073097d02c8ca (diff) |
xfs: avoid direct I/O write vs buffered I/O race
Currently a buffered reader or writer can add pages to the pagecache
while we are waiting for the iolock in xfs_file_dio_aio_write. Prevent
this by re-checking mapping->nrpages after we got the iolock, and if
nessecary upgrade the lock to exclusive mode. To simplify this a bit
only take the ilock inside of xfs_file_aio_write_checks.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_file.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ee63c4fb363..06fe97e56e4 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -676,6 +676,7 @@ xfs_file_aio_write_checks( | |||
676 | xfs_fsize_t new_size; | 676 | xfs_fsize_t new_size; |
677 | int error = 0; | 677 | int error = 0; |
678 | 678 | ||
679 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); | ||
679 | *new_sizep = 0; | 680 | *new_sizep = 0; |
680 | restart: | 681 | restart: |
681 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); | 682 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
@@ -798,14 +799,24 @@ xfs_file_dio_aio_write( | |||
798 | *iolock = XFS_IOLOCK_EXCL; | 799 | *iolock = XFS_IOLOCK_EXCL; |
799 | else | 800 | else |
800 | *iolock = XFS_IOLOCK_SHARED; | 801 | *iolock = XFS_IOLOCK_SHARED; |
801 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | 802 | xfs_rw_ilock(ip, *iolock); |
803 | |||
804 | /* | ||
805 | * Recheck if there are cached pages that need invalidate after we got | ||
806 | * the iolock to protect against other threads adding new pages while | ||
807 | * we were waiting for the iolock. | ||
808 | */ | ||
809 | if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) { | ||
810 | xfs_rw_iunlock(ip, *iolock); | ||
811 | *iolock = XFS_IOLOCK_EXCL; | ||
812 | xfs_rw_ilock(ip, *iolock); | ||
813 | } | ||
802 | 814 | ||
803 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); | 815 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); |
804 | if (ret) | 816 | if (ret) |
805 | return ret; | 817 | return ret; |
806 | 818 | ||
807 | if (mapping->nrpages) { | 819 | if (mapping->nrpages) { |
808 | WARN_ON(*iolock != XFS_IOLOCK_EXCL); | ||
809 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, | 820 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, |
810 | FI_REMAPF_LOCKED); | 821 | FI_REMAPF_LOCKED); |
811 | if (ret) | 822 | if (ret) |
@@ -851,7 +862,7 @@ xfs_file_buffered_aio_write( | |||
851 | size_t count = ocount; | 862 | size_t count = ocount; |
852 | 863 | ||
853 | *iolock = XFS_IOLOCK_EXCL; | 864 | *iolock = XFS_IOLOCK_EXCL; |
854 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | 865 | xfs_rw_ilock(ip, *iolock); |
855 | 866 | ||
856 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); | 867 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); |
857 | if (ret) | 868 | if (ret) |