summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@linux.intel.com>2015-09-08 17:58:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commitb96375f74a6d4f39fc6cbdc0bce5175115c7f96f (patch)
treecadd8fed9bf3c44ed91e435d0099151767d9b34d /mm/memory.c
parent4897c7655d9419ba7e62bac145ec6a1847134d93 (diff)
mm: add a pmd_fault handler
Allow non-anonymous VMAs to provide huge pages in response to a page fault. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 882c9d7ae2f5..a3f9a8ccec0f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3232,6 +3232,27 @@ out:
3232 return 0; 3232 return 0;
3233} 3233}
3234 3234
3235static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
3236 unsigned long address, pmd_t *pmd, unsigned int flags)
3237{
3238 if (!vma->vm_ops)
3239 return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
3240 if (vma->vm_ops->pmd_fault)
3241 return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
3242 return VM_FAULT_FALLBACK;
3243}
3244
3245static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
3246 unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
3247 unsigned int flags)
3248{
3249 if (!vma->vm_ops)
3250 return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
3251 if (vma->vm_ops->pmd_fault)
3252 return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
3253 return VM_FAULT_FALLBACK;
3254}
3255
3235/* 3256/*
3236 * These routines also need to handle stuff like marking pages dirty 3257 * These routines also need to handle stuff like marking pages dirty
3237 * and/or accessed for architectures that don't do it in hardware (most 3258 * and/or accessed for architectures that don't do it in hardware (most
@@ -3334,10 +3355,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3334 if (!pmd) 3355 if (!pmd)
3335 return VM_FAULT_OOM; 3356 return VM_FAULT_OOM;
3336 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { 3357 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
3337 int ret = VM_FAULT_FALLBACK; 3358 int ret = create_huge_pmd(mm, vma, address, pmd, flags);
3338 if (!vma->vm_ops)
3339 ret = do_huge_pmd_anonymous_page(mm, vma, address,
3340 pmd, flags);
3341 if (!(ret & VM_FAULT_FALLBACK)) 3359 if (!(ret & VM_FAULT_FALLBACK))
3342 return ret; 3360 return ret;
3343 } else { 3361 } else {
@@ -3361,8 +3379,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3361 orig_pmd, pmd); 3379 orig_pmd, pmd);
3362 3380
3363 if (dirty && !pmd_write(orig_pmd)) { 3381 if (dirty && !pmd_write(orig_pmd)) {
3364 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, 3382 ret = wp_huge_pmd(mm, vma, address, pmd,
3365 orig_pmd); 3383 orig_pmd, flags);
3366 if (!(ret & VM_FAULT_FALLBACK)) 3384 if (!(ret & VM_FAULT_FALLBACK))
3367 return ret; 3385 return ret;
3368 } else { 3386 } else {