diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 64 |
1 files changed, 59 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index 92a3ebd8d795..f2ef1dcfff77 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2256,6 +2256,54 @@ oom: | |||
2256 | } | 2256 | } |
2257 | 2257 | ||
2258 | /* | 2258 | /* |
2259 | * do_no_pfn() tries to create a new page mapping for a page without | ||
2260 | * a struct_page backing it | ||
2261 | * | ||
2262 | * As this is called only for pages that do not currently exist, we | ||
2263 | * do not need to flush old virtual caches or the TLB. | ||
2264 | * | ||
2265 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | ||
2266 | * but allow concurrent faults), and pte mapped but not yet locked. | ||
2267 | * We return with mmap_sem still held, but pte unmapped and unlocked. | ||
2268 | * | ||
2269 | * It is expected that the ->nopfn handler always returns the same pfn | ||
2270 | * for a given virtual mapping. | ||
2271 | * | ||
2272 | * Mark this `noinline' to prevent it from bloating the main pagefault code. | ||
2273 | */ | ||
2274 | static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, | ||
2275 | unsigned long address, pte_t *page_table, pmd_t *pmd, | ||
2276 | int write_access) | ||
2277 | { | ||
2278 | spinlock_t *ptl; | ||
2279 | pte_t entry; | ||
2280 | unsigned long pfn; | ||
2281 | int ret = VM_FAULT_MINOR; | ||
2282 | |||
2283 | pte_unmap(page_table); | ||
2284 | BUG_ON(!(vma->vm_flags & VM_PFNMAP)); | ||
2285 | BUG_ON(is_cow_mapping(vma->vm_flags)); | ||
2286 | |||
2287 | pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK); | ||
2288 | if (pfn == NOPFN_OOM) | ||
2289 | return VM_FAULT_OOM; | ||
2290 | if (pfn == NOPFN_SIGBUS) | ||
2291 | return VM_FAULT_SIGBUS; | ||
2292 | |||
2293 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
2294 | |||
2295 | /* Only go through if we didn't race with anybody else... */ | ||
2296 | if (pte_none(*page_table)) { | ||
2297 | entry = pfn_pte(pfn, vma->vm_page_prot); | ||
2298 | if (write_access) | ||
2299 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
2300 | set_pte_at(mm, address, page_table, entry); | ||
2301 | } | ||
2302 | pte_unmap_unlock(page_table, ptl); | ||
2303 | return ret; | ||
2304 | } | ||
2305 | |||
2306 | /* | ||
2259 | * Fault of a previously existing named mapping. Repopulate the pte | 2307 | * Fault of a previously existing named mapping. Repopulate the pte |
2260 | * from the encoded file_pte if possible. This enables swappable | 2308 | * from the encoded file_pte if possible. This enables swappable |
2261 | * nonlinear vmas. | 2309 | * nonlinear vmas. |
@@ -2317,11 +2365,17 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2317 | old_entry = entry = *pte; | 2365 | old_entry = entry = *pte; |
2318 | if (!pte_present(entry)) { | 2366 | if (!pte_present(entry)) { |
2319 | if (pte_none(entry)) { | 2367 | if (pte_none(entry)) { |
2320 | if (!vma->vm_ops || !vma->vm_ops->nopage) | 2368 | if (vma->vm_ops) { |
2321 | return do_anonymous_page(mm, vma, address, | 2369 | if (vma->vm_ops->nopage) |
2322 | pte, pmd, write_access); | 2370 | return do_no_page(mm, vma, address, |
2323 | return do_no_page(mm, vma, address, | 2371 | pte, pmd, |
2324 | pte, pmd, write_access); | 2372 | write_access); |
2373 | if (unlikely(vma->vm_ops->nopfn)) | ||
2374 | return do_no_pfn(mm, vma, address, pte, | ||
2375 | pmd, write_access); | ||
2376 | } | ||
2377 | return do_anonymous_page(mm, vma, address, | ||
2378 | pte, pmd, write_access); | ||
2325 | } | 2379 | } |
2326 | if (pte_file(entry)) | 2380 | if (pte_file(entry)) |
2327 | return do_file_page(mm, vma, address, | 2381 | return do_file_page(mm, vma, address, |