diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 67 |
1 files changed, 7 insertions, 60 deletions
diff --git a/mm/memory.c b/mm/memory.c index 2302d228fe04..46dbed4b7446 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1058,11 +1058,9 @@ static inline int use_zero_page(struct vm_area_struct *vma) | |||
1058 | if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) | 1058 | if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) |
1059 | return 0; | 1059 | return 0; |
1060 | /* | 1060 | /* |
1061 | * And if we have a fault or a nopfn routine, it's not an | 1061 | * And if we have a fault routine, it's not an anonymous region. |
1062 | * anonymous region. | ||
1063 | */ | 1062 | */ |
1064 | return !vma->vm_ops || | 1063 | return !vma->vm_ops || !vma->vm_ops->fault; |
1065 | (!vma->vm_ops->fault && !vma->vm_ops->nopfn); | ||
1066 | } | 1064 | } |
1067 | 1065 | ||
1068 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1066 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
@@ -1338,6 +1336,11 @@ out: | |||
1338 | * | 1336 | * |
1339 | * This function should only be called from a vm_ops->fault handler, and | 1337 | * This function should only be called from a vm_ops->fault handler, and |
1340 | * in that case the handler should return NULL. | 1338 | * in that case the handler should return NULL. |
1339 | * | ||
1340 | * vma cannot be a COW mapping. | ||
1341 | * | ||
1342 | * As this is called only for pages that do not currently exist, we | ||
1343 | * do not need to flush old virtual caches or the TLB. | ||
1341 | */ | 1344 | */ |
1342 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | 1345 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, |
1343 | unsigned long pfn) | 1346 | unsigned long pfn) |
@@ -2501,59 +2504,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2501 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | 2504 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); |
2502 | } | 2505 | } |
2503 | 2506 | ||
2504 | |||
2505 | /* | ||
2506 | * do_no_pfn() tries to create a new page mapping for a page without | ||
2507 | * a struct_page backing it | ||
2508 | * | ||
2509 | * As this is called only for pages that do not currently exist, we | ||
2510 | * do not need to flush old virtual caches or the TLB. | ||
2511 | * | ||
2512 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | ||
2513 | * but allow concurrent faults), and pte mapped but not yet locked. | ||
2514 | * We return with mmap_sem still held, but pte unmapped and unlocked. | ||
2515 | * | ||
2516 | * It is expected that the ->nopfn handler always returns the same pfn | ||
2517 | * for a given virtual mapping. | ||
2518 | * | ||
2519 | * Mark this `noinline' to prevent it from bloating the main pagefault code. | ||
2520 | */ | ||
2521 | static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, | ||
2522 | unsigned long address, pte_t *page_table, pmd_t *pmd, | ||
2523 | int write_access) | ||
2524 | { | ||
2525 | spinlock_t *ptl; | ||
2526 | pte_t entry; | ||
2527 | unsigned long pfn; | ||
2528 | |||
2529 | pte_unmap(page_table); | ||
2530 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); | ||
2531 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); | ||
2532 | |||
2533 | pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK); | ||
2534 | |||
2535 | BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); | ||
2536 | |||
2537 | if (unlikely(pfn == NOPFN_OOM)) | ||
2538 | return VM_FAULT_OOM; | ||
2539 | else if (unlikely(pfn == NOPFN_SIGBUS)) | ||
2540 | return VM_FAULT_SIGBUS; | ||
2541 | else if (unlikely(pfn == NOPFN_REFAULT)) | ||
2542 | return 0; | ||
2543 | |||
2544 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
2545 | |||
2546 | /* Only go through if we didn't race with anybody else... */ | ||
2547 | if (pte_none(*page_table)) { | ||
2548 | entry = pfn_pte(pfn, vma->vm_page_prot); | ||
2549 | if (write_access) | ||
2550 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
2551 | set_pte_at(mm, address, page_table, entry); | ||
2552 | } | ||
2553 | pte_unmap_unlock(page_table, ptl); | ||
2554 | return 0; | ||
2555 | } | ||
2556 | |||
2557 | /* | 2507 | /* |
2558 | * Fault of a previously existing named mapping. Repopulate the pte | 2508 | * Fault of a previously existing named mapping. Repopulate the pte |
2559 | * from the encoded file_pte if possible. This enables swappable | 2509 | * from the encoded file_pte if possible. This enables swappable |
@@ -2614,9 +2564,6 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2614 | if (likely(vma->vm_ops->fault)) | 2564 | if (likely(vma->vm_ops->fault)) |
2615 | return do_linear_fault(mm, vma, address, | 2565 | return do_linear_fault(mm, vma, address, |
2616 | pte, pmd, write_access, entry); | 2566 | pte, pmd, write_access, entry); |
2617 | if (unlikely(vma->vm_ops->nopfn)) | ||
2618 | return do_no_pfn(mm, vma, address, pte, | ||
2619 | pmd, write_access); | ||
2620 | } | 2567 | } |
2621 | return do_anonymous_page(mm, vma, address, | 2568 | return do_anonymous_page(mm, vma, address, |
2622 | pte, pmd, write_access); | 2569 | pte, pmd, write_access); |