summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2017-02-24 17:57:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:54 -0500
commitc791ace1e747371658237f0d30234fef56c39669 (patch)
treede6b1b2b40d5aee5505987856bc29c259c0ff202 /mm/memory.c
parent9557feee39b75ceb502b4777e08706df1ddf10ed (diff)
mm: replace FAULT_FLAG_SIZE with parameter to huge_fault
Since the introduction of FAULT_FLAG_SIZE to the vm_fault flag, it has been somewhat painful with getting the flags set and removed at the correct locations. More than one kernel oops was introduced due to difficulties of getting the placement correctly. Remove the flag values and introduce an input parameter to huge_fault that indicates the size of the page entry. This makes the code easier to trace and should avoid the issues we see with the fault flags where removal of the flag was necessary in the fallback paths. Link: http://lkml.kernel.org/r/148615748258.43180.1690152053774975329.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Nilesh Choudhury <nilesh.choudhury@oracle.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 41e2a2d4b2a6..6040b74d02a2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3489,7 +3489,7 @@ static int create_huge_pmd(struct vm_fault *vmf)
3489 if (vma_is_anonymous(vmf->vma)) 3489 if (vma_is_anonymous(vmf->vma))
3490 return do_huge_pmd_anonymous_page(vmf); 3490 return do_huge_pmd_anonymous_page(vmf);
3491 if (vmf->vma->vm_ops->huge_fault) 3491 if (vmf->vma->vm_ops->huge_fault)
3492 return vmf->vma->vm_ops->huge_fault(vmf); 3492 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
3493 return VM_FAULT_FALLBACK; 3493 return VM_FAULT_FALLBACK;
3494} 3494}
3495 3495
@@ -3498,7 +3498,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
3498 if (vma_is_anonymous(vmf->vma)) 3498 if (vma_is_anonymous(vmf->vma))
3499 return do_huge_pmd_wp_page(vmf, orig_pmd); 3499 return do_huge_pmd_wp_page(vmf, orig_pmd);
3500 if (vmf->vma->vm_ops->huge_fault) 3500 if (vmf->vma->vm_ops->huge_fault)
3501 return vmf->vma->vm_ops->huge_fault(vmf); 3501 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
3502 3502
3503 /* COW handled on pte level: split pmd */ 3503 /* COW handled on pte level: split pmd */
3504 VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); 3504 VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
@@ -3519,7 +3519,7 @@ static int create_huge_pud(struct vm_fault *vmf)
3519 if (vma_is_anonymous(vmf->vma)) 3519 if (vma_is_anonymous(vmf->vma))
3520 return VM_FAULT_FALLBACK; 3520 return VM_FAULT_FALLBACK;
3521 if (vmf->vma->vm_ops->huge_fault) 3521 if (vmf->vma->vm_ops->huge_fault)
3522 return vmf->vma->vm_ops->huge_fault(vmf); 3522 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
3523#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3523#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3524 return VM_FAULT_FALLBACK; 3524 return VM_FAULT_FALLBACK;
3525} 3525}
@@ -3531,7 +3531,7 @@ static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
3531 if (vma_is_anonymous(vmf->vma)) 3531 if (vma_is_anonymous(vmf->vma))
3532 return VM_FAULT_FALLBACK; 3532 return VM_FAULT_FALLBACK;
3533 if (vmf->vma->vm_ops->huge_fault) 3533 if (vmf->vma->vm_ops->huge_fault)
3534 return vmf->vma->vm_ops->huge_fault(vmf); 3534 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
3535#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3535#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3536 return VM_FAULT_FALLBACK; 3536 return VM_FAULT_FALLBACK;
3537} 3537}
@@ -3659,7 +3659,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3659 if (!vmf.pud) 3659 if (!vmf.pud)
3660 return VM_FAULT_OOM; 3660 return VM_FAULT_OOM;
3661 if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { 3661 if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
3662 vmf.flags |= FAULT_FLAG_SIZE_PUD;
3663 ret = create_huge_pud(&vmf); 3662 ret = create_huge_pud(&vmf);
3664 if (!(ret & VM_FAULT_FALLBACK)) 3663 if (!(ret & VM_FAULT_FALLBACK))
3665 return ret; 3664 return ret;
@@ -3670,8 +3669,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3670 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 3669 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
3671 unsigned int dirty = flags & FAULT_FLAG_WRITE; 3670 unsigned int dirty = flags & FAULT_FLAG_WRITE;
3672 3671
3673 vmf.flags |= FAULT_FLAG_SIZE_PUD;
3674
3675 /* NUMA case for anonymous PUDs would go here */ 3672 /* NUMA case for anonymous PUDs would go here */
3676 3673
3677 if (dirty && !pud_write(orig_pud)) { 3674 if (dirty && !pud_write(orig_pud)) {
@@ -3689,18 +3686,14 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3689 if (!vmf.pmd) 3686 if (!vmf.pmd)
3690 return VM_FAULT_OOM; 3687 return VM_FAULT_OOM;
3691 if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { 3688 if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
3692 vmf.flags |= FAULT_FLAG_SIZE_PMD;
3693 ret = create_huge_pmd(&vmf); 3689 ret = create_huge_pmd(&vmf);
3694 if (!(ret & VM_FAULT_FALLBACK)) 3690 if (!(ret & VM_FAULT_FALLBACK))
3695 return ret; 3691 return ret;
3696 /* fall through path, remove PMD flag */
3697 vmf.flags &= ~FAULT_FLAG_SIZE_PMD;
3698 } else { 3692 } else {
3699 pmd_t orig_pmd = *vmf.pmd; 3693 pmd_t orig_pmd = *vmf.pmd;
3700 3694
3701 barrier(); 3695 barrier();
3702 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 3696 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
3703 vmf.flags |= FAULT_FLAG_SIZE_PMD;
3704 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 3697 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
3705 return do_huge_pmd_numa_page(&vmf, orig_pmd); 3698 return do_huge_pmd_numa_page(&vmf, orig_pmd);
3706 3699
@@ -3709,8 +3702,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3709 ret = wp_huge_pmd(&vmf, orig_pmd); 3702 ret = wp_huge_pmd(&vmf, orig_pmd);
3710 if (!(ret & VM_FAULT_FALLBACK)) 3703 if (!(ret & VM_FAULT_FALLBACK))
3711 return ret; 3704 return ret;
3712 /* fall through path, remove PUD flag */
3713 vmf.flags &= ~FAULT_FLAG_SIZE_PUD;
3714 } else { 3705 } else {
3715 huge_pmd_set_accessed(&vmf, orig_pmd); 3706 huge_pmd_set_accessed(&vmf, orig_pmd);
3716 return 0; 3707 return 0;