diff options
author | David S. Miller <davem@davemloft.net> | 2019-04-17 14:26:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-17 14:26:25 -0400 |
commit | 6b0a7f84ea1fe248df96ccc4dd86e817e32ef65b (patch) | |
tree | 0a7976054052e793da782c2b7ec34eccfbf66449 /fs/dax.c | |
parent | cea0aa9cbd5ad4efe267e9487ed5d48d16756253 (diff) | |
parent | fe5cdef29e41c8bda8cd1a11545e7c6bfe25570e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflict resolution of af_smc.c from Stephen Rothwell.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 15 |
1 files changed, 15 insertions, 0 deletions
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/sizes.h> | 33 | #include <linux/sizes.h> |
34 | #include <linux/mmu_notifier.h> | 34 | #include <linux/mmu_notifier.h> |
35 | #include <linux/iomap.h> | 35 | #include <linux/iomap.h> |
36 | #include <asm/pgalloc.h> | ||
36 | #include "internal.h" | 37 | #include "internal.h" |
37 | 38 | ||
38 | #define CREATE_TRACE_POINTS | 39 | #define CREATE_TRACE_POINTS |
@@ -1407,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |||
1407 | { | 1408 | { |
1408 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; | 1409 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
1409 | unsigned long pmd_addr = vmf->address & PMD_MASK; | 1410 | unsigned long pmd_addr = vmf->address & PMD_MASK; |
1411 | struct vm_area_struct *vma = vmf->vma; | ||
1410 | struct inode *inode = mapping->host; | 1412 | struct inode *inode = mapping->host; |
1413 | pgtable_t pgtable = NULL; | ||
1411 | struct page *zero_page; | 1414 | struct page *zero_page; |
1412 | spinlock_t *ptl; | 1415 | spinlock_t *ptl; |
1413 | pmd_t pmd_entry; | 1416 | pmd_t pmd_entry; |
@@ -1422,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |||
1422 | *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, | 1425 | *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, |
1423 | DAX_PMD | DAX_ZERO_PAGE, false); | 1426 | DAX_PMD | DAX_ZERO_PAGE, false); |
1424 | 1427 | ||
1428 | if (arch_needs_pgtable_deposit()) { | ||
1429 | pgtable = pte_alloc_one(vma->vm_mm); | ||
1430 | if (!pgtable) | ||
1431 | return VM_FAULT_OOM; | ||
1432 | } | ||
1433 | |||
1425 | ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); | 1434 | ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); |
1426 | if (!pmd_none(*(vmf->pmd))) { | 1435 | if (!pmd_none(*(vmf->pmd))) { |
1427 | spin_unlock(ptl); | 1436 | spin_unlock(ptl); |
1428 | goto fallback; | 1437 | goto fallback; |
1429 | } | 1438 | } |
1430 | 1439 | ||
1440 | if (pgtable) { | ||
1441 | pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); | ||
1442 | mm_inc_nr_ptes(vma->vm_mm); | ||
1443 | } | ||
1431 | pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); | 1444 | pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); |
1432 | pmd_entry = pmd_mkhuge(pmd_entry); | 1445 | pmd_entry = pmd_mkhuge(pmd_entry); |
1433 | set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); | 1446 | set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); |
@@ -1436,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |||
1436 | return VM_FAULT_NOPAGE; | 1449 | return VM_FAULT_NOPAGE; |
1437 | 1450 | ||
1438 | fallback: | 1451 | fallback: |
1452 | if (pgtable) | ||
1453 | pte_free(vma->vm_mm, pgtable); | ||
1439 | trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); | 1454 | trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); |
1440 | return VM_FAULT_FALLBACK; | 1455 | return VM_FAULT_FALLBACK; |
1441 | } | 1456 | } |