aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext2/file.c
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2015-10-13 18:25:37 -0400
committerJan Kara <jack@suse.com>2015-10-19 08:40:54 -0400
commit5726b27b09cc92452b543764899a07e7c8037edd (patch)
treea24cd32cee813259c61e6fb07ce4ba7d93a254f8 /fs/ext2/file.c
parentd4eb6dee471250661a5183a7336b18c85990e26d (diff)
ext2: Add locking for DAX faults
Add locking to ensure that DAX faults are isolated from ext2 operations that modify the data blocks allocation for an inode. This is intended to be analogous to the work being done in XFS by Dave Chinner: http://www.spinics.net/lists/linux-fsdevel/msg90260.html Compared with XFS the ext2 case is greatly simplified by the fact that ext2 already allocates and zeros new blocks before they are returned as part of ext2_get_block(), so DAX doesn't need to worry about getting unmapped or unwritten buffer heads. This means that the only work we need to do in ext2 is to isolate the DAX faults from inode block allocation changes. I believe this just means that we need to isolate the DAX faults from truncate operations. The newly introduced dax_sem is intended to replicate the protection offered by i_mmaplock in XFS. In addition to truncate the i_mmaplock also protects XFS operations like hole punching, fallocate down, extent manipulation IOCTLS like xfs_ioc_space() and extent swapping. Truncate is the only one of these operations supported by ext2. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.com>
Diffstat (limited to 'fs/ext2/file.c')
-rw-r--r--fs/ext2/file.c84
1 files changed, 80 insertions, 4 deletions
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 1982c3f11aec..11a42c5a09ae 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -27,27 +27,103 @@
27#include "acl.h" 27#include "acl.h"
28 28
29#ifdef CONFIG_FS_DAX 29#ifdef CONFIG_FS_DAX
30/*
31 * The lock ordering for ext2 DAX fault paths is:
32 *
33 * mmap_sem (MM)
34 * sb_start_pagefault (vfs, freeze)
35 * ext2_inode_info->dax_sem
36 * address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
37 * ext2_inode_info->truncate_mutex
38 *
39 * The default page_lock and i_size verification done by non-DAX fault paths
40 * is sufficient because ext2 doesn't support hole punching.
41 */
30static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 42static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
31{ 43{
32 return dax_fault(vma, vmf, ext2_get_block, NULL); 44 struct inode *inode = file_inode(vma->vm_file);
45 struct ext2_inode_info *ei = EXT2_I(inode);
46 int ret;
47
48 if (vmf->flags & FAULT_FLAG_WRITE) {
49 sb_start_pagefault(inode->i_sb);
50 file_update_time(vma->vm_file);
51 }
52 down_read(&ei->dax_sem);
53
54 ret = __dax_fault(vma, vmf, ext2_get_block, NULL);
55
56 up_read(&ei->dax_sem);
57 if (vmf->flags & FAULT_FLAG_WRITE)
58 sb_end_pagefault(inode->i_sb);
59 return ret;
33} 60}
34 61
35static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, 62static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
36 pmd_t *pmd, unsigned int flags) 63 pmd_t *pmd, unsigned int flags)
37{ 64{
38 return dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL); 65 struct inode *inode = file_inode(vma->vm_file);
66 struct ext2_inode_info *ei = EXT2_I(inode);
67 int ret;
68
69 if (flags & FAULT_FLAG_WRITE) {
70 sb_start_pagefault(inode->i_sb);
71 file_update_time(vma->vm_file);
72 }
73 down_read(&ei->dax_sem);
74
75 ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL);
76
77 up_read(&ei->dax_sem);
78 if (flags & FAULT_FLAG_WRITE)
79 sb_end_pagefault(inode->i_sb);
80 return ret;
39} 81}
40 82
41static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 83static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
42{ 84{
43 return dax_mkwrite(vma, vmf, ext2_get_block, NULL); 85 struct inode *inode = file_inode(vma->vm_file);
86 struct ext2_inode_info *ei = EXT2_I(inode);
87 int ret;
88
89 sb_start_pagefault(inode->i_sb);
90 file_update_time(vma->vm_file);
91 down_read(&ei->dax_sem);
92
93 ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL);
94
95 up_read(&ei->dax_sem);
96 sb_end_pagefault(inode->i_sb);
97 return ret;
98}
99
100static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
101 struct vm_fault *vmf)
102{
103 struct inode *inode = file_inode(vma->vm_file);
104 struct ext2_inode_info *ei = EXT2_I(inode);
105 int ret = VM_FAULT_NOPAGE;
106 loff_t size;
107
108 sb_start_pagefault(inode->i_sb);
109 file_update_time(vma->vm_file);
110 down_read(&ei->dax_sem);
111
112 /* check that the faulting page hasn't raced with truncate */
113 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
114 if (vmf->pgoff >= size)
115 ret = VM_FAULT_SIGBUS;
116
117 up_read(&ei->dax_sem);
118 sb_end_pagefault(inode->i_sb);
119 return ret;
44} 120}
45 121
46static const struct vm_operations_struct ext2_dax_vm_ops = { 122static const struct vm_operations_struct ext2_dax_vm_ops = {
47 .fault = ext2_dax_fault, 123 .fault = ext2_dax_fault,
48 .pmd_fault = ext2_dax_pmd_fault, 124 .pmd_fault = ext2_dax_pmd_fault,
49 .page_mkwrite = ext2_dax_mkwrite, 125 .page_mkwrite = ext2_dax_mkwrite,
50 .pfn_mkwrite = dax_pfn_mkwrite, 126 .pfn_mkwrite = ext2_dax_pfn_mkwrite,
51}; 127};
52 128
53static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) 129static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)