aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2019-05-13 20:15:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:44 -0400
commitfce86ff5802bac3a7b19db171aa1949ef9caac31 (patch)
tree6617cb1dbddc2362b07858191bf97a15f1b122b1
parenta13f0655503a4a89df67fdc7cac6a7810795d4b3 (diff)
mm/huge_memory: fix vmf_insert_pfn_{pmd, pud}() crash, handle unaligned addresses
Starting with c6f3c5ee40c1 ("mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()") vmf_insert_pfn_pmd() internally calls pmdp_set_access_flags(). That helper enforces a pmd aligned @address argument via VM_BUG_ON() assertion. Update the implementation to take a 'struct vm_fault' argument directly and apply the address alignment fixup internally to fix crash signatures like: kernel BUG at arch/x86/mm/pgtable.c:515! invalid opcode: 0000 [#1] SMP NOPTI CPU: 51 PID: 43713 Comm: java Tainted: G OE 4.19.35 #1 [..] RIP: 0010:pmdp_set_access_flags+0x48/0x50 [..] Call Trace: vmf_insert_pfn_pmd+0x198/0x350 dax_iomap_fault+0xe82/0x1190 ext4_dax_huge_fault+0x103/0x1f0 ? __switch_to_asm+0x40/0x70 __handle_mm_fault+0x3f6/0x1370 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 handle_mm_fault+0xda/0x200 __do_page_fault+0x249/0x4f0 do_page_fault+0x32/0x110 ? page_fault+0x8/0x30 page_fault+0x1e/0x30 Link: http://lkml.kernel.org/r/155741946350.372037.11148198430068238140.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: c6f3c5ee40c1 ("mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()") Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reported-by: Piotr Balcer <piotr.balcer@intel.com> Tested-by: Yan Ma <yan.ma@intel.com> Tested-by: Pankaj Gupta <pagupta@redhat.com> Reviewed-by: Matthew Wilcox <willy@infradead.org> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Chandan Rajendra <chandan@linux.ibm.com> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/dax/device.c6
-rw-r--r--fs/dax.c6
-rw-r--r--include/linux/huge_mm.h6
-rw-r--r--mm/huge_memory.c16
4 files changed, 16 insertions, 18 deletions
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index e428468ab661..996d68ff992a 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -184,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
184 184
185 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 185 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
186 186
187 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, 187 return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
188 vmf->flags & FAULT_FLAG_WRITE);
189} 188}
190 189
191#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 190#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -235,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
235 234
236 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 235 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
237 236
238 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, 237 return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
239 vmf->flags & FAULT_FLAG_WRITE);
240} 238}
241#else 239#else
242static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, 240static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/fs/dax.c b/fs/dax.c
index e5e54da1715f..83009875308c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1575,8 +1575,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1575 } 1575 }
1576 1576
1577 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1577 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1578 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1578 result = vmf_insert_pfn_pmd(vmf, pfn, write);
1579 write);
1580 break; 1579 break;
1581 case IOMAP_UNWRITTEN: 1580 case IOMAP_UNWRITTEN:
1582 case IOMAP_HOLE: 1581 case IOMAP_HOLE:
@@ -1686,8 +1685,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1686 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1685 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1687#ifdef CONFIG_FS_DAX_PMD 1686#ifdef CONFIG_FS_DAX_PMD
1688 else if (order == PMD_ORDER) 1687 else if (order == PMD_ORDER)
1689 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1688 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1690 pfn, true);
1691#endif 1689#endif
1692 else 1690 else
1693 ret = VM_FAULT_FALLBACK; 1691 ret = VM_FAULT_FALLBACK;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 381e872bfde0..7cd5c150c21d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 unsigned long addr, pgprot_t newprot, 48 unsigned long addr, pgprot_t newprot,
49 int prot_numa); 49 int prot_numa);
50vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 50vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
51 pmd_t *pmd, pfn_t pfn, bool write); 51vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
52vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
53 pud_t *pud, pfn_t pfn, bool write);
54enum transparent_hugepage_flag { 52enum transparent_hugepage_flag {
55 TRANSPARENT_HUGEPAGE_FLAG, 53 TRANSPARENT_HUGEPAGE_FLAG,
56 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 54 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b6a34b32d8ac..c314a362c167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -793,11 +793,13 @@ out_unlock:
793 pte_free(mm, pgtable); 793 pte_free(mm, pgtable);
794} 794}
795 795
796vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 796vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
797 pmd_t *pmd, pfn_t pfn, bool write)
798{ 797{
798 unsigned long addr = vmf->address & PMD_MASK;
799 struct vm_area_struct *vma = vmf->vma;
799 pgprot_t pgprot = vma->vm_page_prot; 800 pgprot_t pgprot = vma->vm_page_prot;
800 pgtable_t pgtable = NULL; 801 pgtable_t pgtable = NULL;
802
801 /* 803 /*
802 * If we had pmd_special, we could avoid all these restrictions, 804 * If we had pmd_special, we could avoid all these restrictions,
803 * but we need to be consistent with PTEs and architectures that 805 * but we need to be consistent with PTEs and architectures that
@@ -820,7 +822,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
820 822
821 track_pfn_insert(vma, &pgprot, pfn); 823 track_pfn_insert(vma, &pgprot, pfn);
822 824
823 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); 825 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
824 return VM_FAULT_NOPAGE; 826 return VM_FAULT_NOPAGE;
825} 827}
826EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 828EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -869,10 +871,12 @@ out_unlock:
869 spin_unlock(ptl); 871 spin_unlock(ptl);
870} 872}
871 873
872vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 874vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
873 pud_t *pud, pfn_t pfn, bool write)
874{ 875{
876 unsigned long addr = vmf->address & PUD_MASK;
877 struct vm_area_struct *vma = vmf->vma;
875 pgprot_t pgprot = vma->vm_page_prot; 878 pgprot_t pgprot = vma->vm_page_prot;
879
876 /* 880 /*
877 * If we had pud_special, we could avoid all these restrictions, 881 * If we had pud_special, we could avoid all these restrictions,
878 * but we need to be consistent with PTEs and architectures that 882 * but we need to be consistent with PTEs and architectures that
@@ -889,7 +893,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
889 893
890 track_pfn_insert(vma, &pgprot, pfn); 894 track_pfn_insert(vma, &pgprot, pfn);
891 895
892 insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); 896 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
893 return VM_FAULT_NOPAGE; 897 return VM_FAULT_NOPAGE;
894} 898}
895EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 899EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);