aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2017-11-01 11:36:33 -0400
committerDan Williams <dan.j.williams@intel.com>2017-11-03 09:26:23 -0400
commit5e161e4066d3ebeaff95a4b979b42f8bf00494d5 (patch)
treea2a0e0118ca82153089ea9ce6f6c0827ea51bdeb /fs/dax.c
parent31a6f1a6e5a4a26040b67d8fa4256539b36f5893 (diff)
dax: Factor out getting of pfn out of iomap
Factor out code to get pfn out of iomap that is shared between PTE and PMD fault path. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c83
1 files changed, 43 insertions, 40 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 0bc42ac294ca..116eef8d6c69 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -825,30 +825,53 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
825 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 825 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
826} 826}
827 827
828static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, 828static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
829 loff_t pos, void *entry) 829 pfn_t *pfnp)
830{ 830{
831 const sector_t sector = dax_iomap_sector(iomap, pos); 831 const sector_t sector = dax_iomap_sector(iomap, pos);
832 struct vm_area_struct *vma = vmf->vma;
833 struct address_space *mapping = vma->vm_file->f_mapping;
834 unsigned long vaddr = vmf->address;
835 void *ret, *kaddr;
836 pgoff_t pgoff; 832 pgoff_t pgoff;
833 void *kaddr;
837 int id, rc; 834 int id, rc;
838 pfn_t pfn; 835 long length;
839 836
840 rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); 837 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
841 if (rc) 838 if (rc)
842 return rc; 839 return rc;
843
844 id = dax_read_lock(); 840 id = dax_read_lock();
845 rc = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), 841 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
846 &kaddr, &pfn); 842 &kaddr, pfnp);
847 if (rc < 0) { 843 if (length < 0) {
848 dax_read_unlock(id); 844 rc = length;
849 return rc; 845 goto out;
850 } 846 }
847 rc = -EINVAL;
848 if (PFN_PHYS(length) < size)
849 goto out;
850 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
851 goto out;
852 /* For larger pages we need devmap */
853 if (length > 1 && !pfn_t_devmap(*pfnp))
854 goto out;
855 rc = 0;
856out:
851 dax_read_unlock(id); 857 dax_read_unlock(id);
858 return rc;
859}
860
861static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
862 loff_t pos, void *entry)
863{
864 const sector_t sector = dax_iomap_sector(iomap, pos);
865 struct vm_area_struct *vma = vmf->vma;
866 struct address_space *mapping = vma->vm_file->f_mapping;
867 unsigned long vaddr = vmf->address;
868 void *ret;
869 int rc;
870 pfn_t pfn;
871
872 rc = dax_iomap_pfn(iomap, pos, PAGE_SIZE, &pfn);
873 if (rc < 0)
874 return rc;
852 875
853 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); 876 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
854 if (IS_ERR(ret)) 877 if (IS_ERR(ret))
@@ -1223,46 +1246,26 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1223{ 1246{
1224 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1247 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1225 const sector_t sector = dax_iomap_sector(iomap, pos); 1248 const sector_t sector = dax_iomap_sector(iomap, pos);
1226 struct dax_device *dax_dev = iomap->dax_dev;
1227 struct block_device *bdev = iomap->bdev;
1228 struct inode *inode = mapping->host; 1249 struct inode *inode = mapping->host;
1229 const size_t size = PMD_SIZE; 1250 void *ret = NULL;
1230 void *ret = NULL, *kaddr;
1231 long length = 0;
1232 pgoff_t pgoff;
1233 pfn_t pfn = {}; 1251 pfn_t pfn = {};
1234 int id; 1252 int rc;
1235 1253
1236 if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) 1254 rc = dax_iomap_pfn(iomap, pos, PMD_SIZE, &pfn);
1255 if (rc < 0)
1237 goto fallback; 1256 goto fallback;
1238 1257
1239 id = dax_read_lock();
1240 length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1241 if (length < 0)
1242 goto unlock_fallback;
1243 length = PFN_PHYS(length);
1244
1245 if (length < size)
1246 goto unlock_fallback;
1247 if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1248 goto unlock_fallback;
1249 if (!pfn_t_devmap(pfn))
1250 goto unlock_fallback;
1251 dax_read_unlock(id);
1252
1253 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 1258 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
1254 RADIX_DAX_PMD); 1259 RADIX_DAX_PMD);
1255 if (IS_ERR(ret)) 1260 if (IS_ERR(ret))
1256 goto fallback; 1261 goto fallback;
1257 1262
1258 trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); 1263 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, ret);
1259 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1264 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1260 pfn, vmf->flags & FAULT_FLAG_WRITE); 1265 pfn, vmf->flags & FAULT_FLAG_WRITE);
1261 1266
1262unlock_fallback:
1263 dax_read_unlock(id);
1264fallback: 1267fallback:
1265 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); 1268 trace_dax_pmd_insert_mapping_fallback(inode, vmf, PMD_SIZE, pfn, ret);
1266 return VM_FAULT_FALLBACK; 1269 return VM_FAULT_FALLBACK;
1267} 1270}
1268 1271