diff options
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 39 |
1 files changed, 37 insertions, 2 deletions
@@ -1091,6 +1091,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1091 | unsigned flags = IOMAP_FAULT; | 1091 | unsigned flags = IOMAP_FAULT; |
1092 | int error, major = 0; | 1092 | int error, major = 0; |
1093 | bool write = vmf->flags & FAULT_FLAG_WRITE; | 1093 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
1094 | bool sync; | ||
1094 | int vmf_ret = 0; | 1095 | int vmf_ret = 0; |
1095 | void *entry; | 1096 | void *entry; |
1096 | pfn_t pfn; | 1097 | pfn_t pfn; |
@@ -1169,6 +1170,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1169 | goto finish_iomap; | 1170 | goto finish_iomap; |
1170 | } | 1171 | } |
1171 | 1172 | ||
1173 | sync = (vma->vm_flags & VM_SYNC) && (iomap.flags & IOMAP_F_DIRTY); | ||
1174 | |||
1172 | switch (iomap.type) { | 1175 | switch (iomap.type) { |
1173 | case IOMAP_MAPPED: | 1176 | case IOMAP_MAPPED: |
1174 | if (iomap.flags & IOMAP_F_NEW) { | 1177 | if (iomap.flags & IOMAP_F_NEW) { |
@@ -1182,12 +1185,27 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1182 | 1185 | ||
1183 | entry = dax_insert_mapping_entry(mapping, vmf, entry, | 1186 | entry = dax_insert_mapping_entry(mapping, vmf, entry, |
1184 | dax_iomap_sector(&iomap, pos), | 1187 | dax_iomap_sector(&iomap, pos), |
1185 | 0, write); | 1188 | 0, write && !sync); |
1186 | if (IS_ERR(entry)) { | 1189 | if (IS_ERR(entry)) { |
1187 | error = PTR_ERR(entry); | 1190 | error = PTR_ERR(entry); |
1188 | goto error_finish_iomap; | 1191 | goto error_finish_iomap; |
1189 | } | 1192 | } |
1190 | 1193 | ||
1194 | /* | ||
1195 | * If we are doing synchronous page fault and inode needs fsync, | ||
1196 | * we can insert PTE into page tables only after that happens. | ||
1197 | * Skip insertion for now and return the pfn so that caller can | ||
1198 | * insert it after fsync is done. | ||
1199 | */ | ||
1200 | if (sync) { | ||
1201 | if (WARN_ON_ONCE(!pfnp)) { | ||
1202 | error = -EIO; | ||
1203 | goto error_finish_iomap; | ||
1204 | } | ||
1205 | *pfnp = pfn; | ||
1206 | vmf_ret = VM_FAULT_NEEDDSYNC | major; | ||
1207 | goto finish_iomap; | ||
1208 | } | ||
1191 | trace_dax_insert_mapping(inode, vmf, entry); | 1209 | trace_dax_insert_mapping(inode, vmf, entry); |
1192 | if (write) | 1210 | if (write) |
1193 | error = vm_insert_mixed_mkwrite(vma, vaddr, pfn); | 1211 | error = vm_insert_mixed_mkwrite(vma, vaddr, pfn); |
@@ -1287,6 +1305,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1287 | struct address_space *mapping = vma->vm_file->f_mapping; | 1305 | struct address_space *mapping = vma->vm_file->f_mapping; |
1288 | unsigned long pmd_addr = vmf->address & PMD_MASK; | 1306 | unsigned long pmd_addr = vmf->address & PMD_MASK; |
1289 | bool write = vmf->flags & FAULT_FLAG_WRITE; | 1307 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
1308 | bool sync; | ||
1290 | unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; | 1309 | unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; |
1291 | struct inode *inode = mapping->host; | 1310 | struct inode *inode = mapping->host; |
1292 | int result = VM_FAULT_FALLBACK; | 1311 | int result = VM_FAULT_FALLBACK; |
@@ -1371,6 +1390,8 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1371 | if (iomap.offset + iomap.length < pos + PMD_SIZE) | 1390 | if (iomap.offset + iomap.length < pos + PMD_SIZE) |
1372 | goto finish_iomap; | 1391 | goto finish_iomap; |
1373 | 1392 | ||
1393 | sync = (vma->vm_flags & VM_SYNC) && (iomap.flags & IOMAP_F_DIRTY); | ||
1394 | |||
1374 | switch (iomap.type) { | 1395 | switch (iomap.type) { |
1375 | case IOMAP_MAPPED: | 1396 | case IOMAP_MAPPED: |
1376 | error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); | 1397 | error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); |
@@ -1379,10 +1400,24 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, | |||
1379 | 1400 | ||
1380 | entry = dax_insert_mapping_entry(mapping, vmf, entry, | 1401 | entry = dax_insert_mapping_entry(mapping, vmf, entry, |
1381 | dax_iomap_sector(&iomap, pos), | 1402 | dax_iomap_sector(&iomap, pos), |
1382 | RADIX_DAX_PMD, write); | 1403 | RADIX_DAX_PMD, write && !sync); |
1383 | if (IS_ERR(entry)) | 1404 | if (IS_ERR(entry)) |
1384 | goto finish_iomap; | 1405 | goto finish_iomap; |
1385 | 1406 | ||
1407 | /* | ||
1408 | * If we are doing synchronous page fault and inode needs fsync, | ||
1409 | * we can insert PMD into page tables only after that happens. | ||
1410 | * Skip insertion for now and return the pfn so that caller can | ||
1411 | * insert it after fsync is done. | ||
1412 | */ | ||
1413 | if (sync) { | ||
1414 | if (WARN_ON_ONCE(!pfnp)) | ||
1415 | goto finish_iomap; | ||
1416 | *pfnp = pfn; | ||
1417 | result = VM_FAULT_NEEDDSYNC; | ||
1418 | goto finish_iomap; | ||
1419 | } | ||
1420 | |||
1386 | trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); | 1421 | trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); |
1387 | result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, | 1422 | result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, |
1388 | write); | 1423 | write); |