diff options
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 117 |
1 files changed, 112 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index 92a3ebd8d795..601159a46ab6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2256,6 +2256,54 @@ oom: | |||
| 2256 | } | 2256 | } |
| 2257 | 2257 | ||
| 2258 | /* | 2258 | /* |
| 2259 | * do_no_pfn() tries to create a new page mapping for a page without | ||
| 2260 | * a struct_page backing it | ||
| 2261 | * | ||
| 2262 | * As this is called only for pages that do not currently exist, we | ||
| 2263 | * do not need to flush old virtual caches or the TLB. | ||
| 2264 | * | ||
| 2265 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | ||
| 2266 | * but allow concurrent faults), and pte mapped but not yet locked. | ||
| 2267 | * We return with mmap_sem still held, but pte unmapped and unlocked. | ||
| 2268 | * | ||
| 2269 | * It is expected that the ->nopfn handler always returns the same pfn | ||
| 2270 | * for a given virtual mapping. | ||
| 2271 | * | ||
| 2272 | * Mark this `noinline' to prevent it from bloating the main pagefault code. | ||
| 2273 | */ | ||
| 2274 | static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 2275 | unsigned long address, pte_t *page_table, pmd_t *pmd, | ||
| 2276 | int write_access) | ||
| 2277 | { | ||
| 2278 | spinlock_t *ptl; | ||
| 2279 | pte_t entry; | ||
| 2280 | unsigned long pfn; | ||
| 2281 | int ret = VM_FAULT_MINOR; | ||
| 2282 | |||
| 2283 | pte_unmap(page_table); | ||
| 2284 | BUG_ON(!(vma->vm_flags & VM_PFNMAP)); | ||
| 2285 | BUG_ON(is_cow_mapping(vma->vm_flags)); | ||
| 2286 | |||
| 2287 | pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK); | ||
| 2288 | if (pfn == NOPFN_OOM) | ||
| 2289 | return VM_FAULT_OOM; | ||
| 2290 | if (pfn == NOPFN_SIGBUS) | ||
| 2291 | return VM_FAULT_SIGBUS; | ||
| 2292 | |||
| 2293 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
| 2294 | |||
| 2295 | /* Only go through if we didn't race with anybody else... */ | ||
| 2296 | if (pte_none(*page_table)) { | ||
| 2297 | entry = pfn_pte(pfn, vma->vm_page_prot); | ||
| 2298 | if (write_access) | ||
| 2299 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
| 2300 | set_pte_at(mm, address, page_table, entry); | ||
| 2301 | } | ||
| 2302 | pte_unmap_unlock(page_table, ptl); | ||
| 2303 | return ret; | ||
| 2304 | } | ||
| 2305 | |||
| 2306 | /* | ||
| 2259 | * Fault of a previously existing named mapping. Repopulate the pte | 2307 | * Fault of a previously existing named mapping. Repopulate the pte |
| 2260 | * from the encoded file_pte if possible. This enables swappable | 2308 | * from the encoded file_pte if possible. This enables swappable |
| 2261 | * nonlinear vmas. | 2309 | * nonlinear vmas. |
| @@ -2317,11 +2365,17 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
| 2317 | old_entry = entry = *pte; | 2365 | old_entry = entry = *pte; |
| 2318 | if (!pte_present(entry)) { | 2366 | if (!pte_present(entry)) { |
| 2319 | if (pte_none(entry)) { | 2367 | if (pte_none(entry)) { |
| 2320 | if (!vma->vm_ops || !vma->vm_ops->nopage) | 2368 | if (vma->vm_ops) { |
| 2321 | return do_anonymous_page(mm, vma, address, | 2369 | if (vma->vm_ops->nopage) |
| 2322 | pte, pmd, write_access); | 2370 | return do_no_page(mm, vma, address, |
| 2323 | return do_no_page(mm, vma, address, | 2371 | pte, pmd, |
| 2324 | pte, pmd, write_access); | 2372 | write_access); |
| 2373 | if (unlikely(vma->vm_ops->nopfn)) | ||
| 2374 | return do_no_pfn(mm, vma, address, pte, | ||
| 2375 | pmd, write_access); | ||
| 2376 | } | ||
| 2377 | return do_anonymous_page(mm, vma, address, | ||
| 2378 | pte, pmd, write_access); | ||
| 2325 | } | 2379 | } |
| 2326 | if (pte_file(entry)) | 2380 | if (pte_file(entry)) |
| 2327 | return do_file_page(mm, vma, address, | 2381 | return do_file_page(mm, vma, address, |
| @@ -2550,3 +2604,56 @@ int in_gate_area_no_task(unsigned long addr) | |||
| 2550 | } | 2604 | } |
| 2551 | 2605 | ||
| 2552 | #endif /* __HAVE_ARCH_GATE_AREA */ | 2606 | #endif /* __HAVE_ARCH_GATE_AREA */ |
| 2607 | |||
| 2608 | /* | ||
| 2609 | * Access another process' address space. | ||
| 2610 | * Source/target buffer must be kernel space, | ||
| 2611 | * Do not walk the page table directly, use get_user_pages | ||
| 2612 | */ | ||
| 2613 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | ||
| 2614 | { | ||
| 2615 | struct mm_struct *mm; | ||
| 2616 | struct vm_area_struct *vma; | ||
| 2617 | struct page *page; | ||
| 2618 | void *old_buf = buf; | ||
| 2619 | |||
| 2620 | mm = get_task_mm(tsk); | ||
| 2621 | if (!mm) | ||
| 2622 | return 0; | ||
| 2623 | |||
| 2624 | down_read(&mm->mmap_sem); | ||
| 2625 | /* ignore errors, just check how much was sucessfully transfered */ | ||
| 2626 | while (len) { | ||
| 2627 | int bytes, ret, offset; | ||
| 2628 | void *maddr; | ||
| 2629 | |||
| 2630 | ret = get_user_pages(tsk, mm, addr, 1, | ||
| 2631 | write, 1, &page, &vma); | ||
| 2632 | if (ret <= 0) | ||
| 2633 | break; | ||
| 2634 | |||
| 2635 | bytes = len; | ||
| 2636 | offset = addr & (PAGE_SIZE-1); | ||
| 2637 | if (bytes > PAGE_SIZE-offset) | ||
| 2638 | bytes = PAGE_SIZE-offset; | ||
| 2639 | |||
| 2640 | maddr = kmap(page); | ||
| 2641 | if (write) { | ||
| 2642 | copy_to_user_page(vma, page, addr, | ||
| 2643 | maddr + offset, buf, bytes); | ||
| 2644 | set_page_dirty_lock(page); | ||
| 2645 | } else { | ||
| 2646 | copy_from_user_page(vma, page, addr, | ||
| 2647 | buf, maddr + offset, bytes); | ||
| 2648 | } | ||
| 2649 | kunmap(page); | ||
| 2650 | page_cache_release(page); | ||
| 2651 | len -= bytes; | ||
| 2652 | buf += bytes; | ||
| 2653 | addr += bytes; | ||
| 2654 | } | ||
| 2655 | up_read(&mm->mmap_sem); | ||
| 2656 | mmput(mm); | ||
| 2657 | |||
| 2658 | return buf - old_buf; | ||
| 2659 | } | ||
