aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2013-12-18 09:14:52 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2013-12-20 05:41:21 -0500
commitdfd11184d894cd0a92397b25cac18831a1a6a5bc (patch)
treeb9b52451fc269f124312b91d6da6a5c894f4e6b9 /fs
parent502be2a32f09f388e4ff34ef2e3ebcabbbb261da (diff)
GFS2: Fix incorrect invalidation for DIO/buffered I/O
In patch 209806aba9d540dde3db0a5ce72307f85f33468f we allowed local deferred locks to be granted against a cached exclusive lock. That opened up a corner case which this patch now fixes. The solution to the problem is to check whether we have cached pages each time we do direct I/O and if so to unmap, flush and invalidate those pages. Since the glock state machine normally does that for us, mostly the code will be a no-op. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/aops.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b7fc035a6943..73f3e4ee4037 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
986{ 986{
987 struct file *file = iocb->ki_filp; 987 struct file *file = iocb->ki_filp;
988 struct inode *inode = file->f_mapping->host; 988 struct inode *inode = file->f_mapping->host;
989 struct address_space *mapping = inode->i_mapping;
989 struct gfs2_inode *ip = GFS2_I(inode); 990 struct gfs2_inode *ip = GFS2_I(inode);
990 struct gfs2_holder gh; 991 struct gfs2_holder gh;
991 int rv; 992 int rv;
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1006 if (rv != 1) 1007 if (rv != 1)
1007 goto out; /* dio not valid, fall back to buffered i/o */ 1008 goto out; /* dio not valid, fall back to buffered i/o */
1008 1009
1010 /*
1011 * Now since we are holding a deferred (CW) lock at this point, you
1012 * might be wondering why this is ever needed. There is a case however
1013 * where we've granted a deferred local lock against a cached exclusive
1014 * glock. That is ok provided all granted local locks are deferred, but
1015 * it also means that it is possible to encounter pages which are
1016 * cached and possibly also mapped. So here we check for that and sort
1017 * them out ahead of the dio. The glock state machine will take care of
1018 * everything else.
1019 *
1020 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1021 * the first place, mapping->nr_pages will always be zero.
1022 */
1023 if (mapping->nrpages) {
1024 loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
1025 loff_t len = iov_length(iov, nr_segs);
1026 loff_t end = PAGE_ALIGN(offset + len) - 1;
1027
1028 rv = 0;
1029 if (len == 0)
1030 goto out;
1031 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1032 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1033 rv = filemap_write_and_wait_range(mapping, lstart, end);
1034 if (rv)
1035 return rv;
1036 truncate_inode_pages_range(mapping, lstart, end);
1037 }
1038
1009 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1039 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1010 offset, nr_segs, gfs2_get_block_direct, 1040 offset, nr_segs, gfs2_get_block_direct,
1011 NULL, NULL, 0); 1041 NULL, NULL, 0);