diff options
author | Dan Williams <dan.j.williams@intel.com> | 2017-11-13 19:38:44 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2017-11-13 19:38:44 -0500 |
commit | aaa422c4c3f6ee958ea9d6c9260ac40f90a3f4e9 (patch) | |
tree | 64ce62bb891f903a5c504a2700823cbd60bb877c /fs/dax.c | |
parent | a39e596baa07cb1dc19c2ead14c9fd2a30f22352 (diff) |
fs, dax: unify IOMAP_F_DIRTY read vs write handling policy in the dax core
While reviewing whether MAP_SYNC should strengthen its current guarantee
of syncing writes from the initiating process to also include
third-party readers observing dirty metadata, Dave pointed out that the
check of IOMAP_WRITE is misplaced.
The policy of what to with IOMAP_F_DIRTY should be separated from the
generic filesystem mechanism of reporting dirty metadata. Move this
policy to the fs-dax core to simplify the per-filesystem iomap handlers,
and further centralize code that implements the MAP_SYNC policy. This
otherwise should not change behavior, it just makes it easier to change
behavior in the future.
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Reported-by: Dave Chinner <david@fromorbit.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 15 |
1 files changed, 13 insertions, 2 deletions
@@ -1079,6 +1079,17 @@ static int dax_fault_return(int error) | |||
1079 | return VM_FAULT_SIGBUS; | 1079 | return VM_FAULT_SIGBUS; |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | /* | ||
1083 | * MAP_SYNC on a dax mapping guarantees dirty metadata is | ||
1084 | * flushed on write-faults (non-cow), but not read-faults. | ||
1085 | */ | ||
1086 | static bool dax_fault_is_synchronous(unsigned long flags, | ||
1087 | struct vm_area_struct *vma, struct iomap *iomap) | ||
1088 | { | ||
1089 | return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) | ||
1090 | && (iomap->flags & IOMAP_F_DIRTY); | ||
1091 | } | ||
1092 | |||
1082 | static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, | 1093 | static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, |
1083 | const struct iomap_ops *ops) | 1094 | const struct iomap_ops *ops) |
1084 | { | 1095 | { |
@@ -1170,7 +1181,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1170 | goto finish_iomap; | 1181 | goto finish_iomap; |
1171 | } | 1182 | } |
1172 | 1183 | ||
1173 | sync = (vma->vm_flags & VM_SYNC) && (iomap.flags & IOMAP_F_DIRTY); | 1184 | sync = dax_fault_is_synchronous(flags, vma, &iomap); |
1174 | 1185 | ||
1175 | switch (iomap.type) { | 1186 | switch (iomap.type) { |
1176 | case IOMAP_MAPPED: | 1187 | case IOMAP_MAPPED: |
@@ -1390,7 +1401,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1390 | if (iomap.offset + iomap.length < pos + PMD_SIZE) | 1401 | if (iomap.offset + iomap.length < pos + PMD_SIZE) |
1391 | goto finish_iomap; | 1402 | goto finish_iomap; |
1392 | 1403 | ||
1393 | sync = (vma->vm_flags & VM_SYNC) && (iomap.flags & IOMAP_F_DIRTY); | 1404 | sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); |
1394 | 1405 | ||
1395 | switch (iomap.type) { | 1406 | switch (iomap.type) { |
1396 | case IOMAP_MAPPED: | 1407 | case IOMAP_MAPPED: |