aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOliver O'Halloran <oohall@gmail.com>2017-05-08 18:59:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 20:15:15 -0400
commit3b6521f53572d7fc1b40c93931948716a53a82ab (patch)
tree6344476356e46204fe91c39a0c790031e212d544
parentc14a6eb44d8a59337433961d181ca953fb20d083 (diff)
mm/huge_memory.c: deposit a pgtable for DAX PMD faults when required
Although all architectures use a deposited page table for THP on anonymous VMAs, some architectures (s390 and powerpc) require the deposited storage even for file backed VMAs due to quirks of their MMUs. This patch adds support for depositing a table in DAX PMD fault handling path for archs that require it. Other architectures should see no functional changes. Link: http://lkml.kernel.org/r/20170411174233.21902-3-oohall@gmail.com Signed-off-by: Oliver O'Halloran <oohall@gmail.com> Cc: Reza Arbab <arbab@linux.vnet.ibm.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: linux-nvdimm@ml01.01.org Cc: Oliver O'Halloran <oohall@gmail.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/huge_memory.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index aa01dd47cc65..a84909cf20d3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -715,7 +715,8 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
715} 715}
716 716
717static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 717static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
718 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) 718 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
719 pgtable_t pgtable)
719{ 720{
720 struct mm_struct *mm = vma->vm_mm; 721 struct mm_struct *mm = vma->vm_mm;
721 pmd_t entry; 722 pmd_t entry;
@@ -729,6 +730,12 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
729 entry = pmd_mkyoung(pmd_mkdirty(entry)); 730 entry = pmd_mkyoung(pmd_mkdirty(entry));
730 entry = maybe_pmd_mkwrite(entry, vma); 731 entry = maybe_pmd_mkwrite(entry, vma);
731 } 732 }
733
734 if (pgtable) {
735 pgtable_trans_huge_deposit(mm, pmd, pgtable);
736 atomic_long_inc(&mm->nr_ptes);
737 }
738
732 set_pmd_at(mm, addr, pmd, entry); 739 set_pmd_at(mm, addr, pmd, entry);
733 update_mmu_cache_pmd(vma, addr, pmd); 740 update_mmu_cache_pmd(vma, addr, pmd);
734 spin_unlock(ptl); 741 spin_unlock(ptl);
@@ -738,6 +745,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
738 pmd_t *pmd, pfn_t pfn, bool write) 745 pmd_t *pmd, pfn_t pfn, bool write)
739{ 746{
740 pgprot_t pgprot = vma->vm_page_prot; 747 pgprot_t pgprot = vma->vm_page_prot;
748 pgtable_t pgtable = NULL;
741 /* 749 /*
742 * If we had pmd_special, we could avoid all these restrictions, 750 * If we had pmd_special, we could avoid all these restrictions,
743 * but we need to be consistent with PTEs and architectures that 751 * but we need to be consistent with PTEs and architectures that
@@ -752,9 +760,15 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
752 if (addr < vma->vm_start || addr >= vma->vm_end) 760 if (addr < vma->vm_start || addr >= vma->vm_end)
753 return VM_FAULT_SIGBUS; 761 return VM_FAULT_SIGBUS;
754 762
763 if (arch_needs_pgtable_deposit()) {
764 pgtable = pte_alloc_one(vma->vm_mm, addr);
765 if (!pgtable)
766 return VM_FAULT_OOM;
767 }
768
755 track_pfn_insert(vma, &pgprot, pfn); 769 track_pfn_insert(vma, &pgprot, pfn);
756 770
757 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); 771 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
758 return VM_FAULT_NOPAGE; 772 return VM_FAULT_NOPAGE;
759} 773}
760EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 774EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -1611,6 +1625,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1611 tlb->fullmm); 1625 tlb->fullmm);
1612 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1626 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1613 if (vma_is_dax(vma)) { 1627 if (vma_is_dax(vma)) {
1628 if (arch_needs_pgtable_deposit())
1629 zap_deposited_table(tlb->mm, pmd);
1614 spin_unlock(ptl); 1630 spin_unlock(ptl);
1615 if (is_huge_zero_pmd(orig_pmd)) 1631 if (is_huge_zero_pmd(orig_pmd))
1616 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1632 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);