diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 63 |
1 files changed, 41 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c index 107b619cfb16..3666a4c6dd22 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -350,6 +350,22 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) | |||
350 | } | 350 | } |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * page_is_anon applies strict checks for an anonymous page belonging to | ||
354 | * this vma at this address. It is used on VM_UNPAGED vmas, which are | ||
355 | * usually populated with shared originals (which must not be counted), | ||
356 | * but occasionally contain private COWed copies (when !VM_SHARED, or | ||
357 | * perhaps via ptrace when VM_SHARED). An mmap of /dev/mem might window | ||
358 | * free pages, pages from other processes, or from other parts of this: | ||
359 | * it's tricky, but try not to be deceived by foreign anonymous pages. | ||
360 | */ | ||
361 | static inline int page_is_anon(struct page *page, | ||
362 | struct vm_area_struct *vma, unsigned long addr) | ||
363 | { | ||
364 | return page && PageAnon(page) && page_mapped(page) && | ||
365 | page_address_in_vma(page, vma) == addr; | ||
366 | } | ||
367 | |||
368 | /* | ||
353 | * copy one vm_area from one task to the other. Assumes the page tables | 369 | * copy one vm_area from one task to the other. Assumes the page tables |
354 | * already present in the new task to be cleared in the whole range | 370 | * already present in the new task to be cleared in the whole range |
355 | * covered by this vma. | 371 | * covered by this vma. |
@@ -381,23 +397,22 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
381 | goto out_set_pte; | 397 | goto out_set_pte; |
382 | } | 398 | } |
383 | 399 | ||
384 | /* If the region is VM_UNPAGED, the mapping is not | ||
385 | * mapped via rmap - duplicate the pte as is. | ||
386 | */ | ||
387 | if (vm_flags & VM_UNPAGED) | ||
388 | goto out_set_pte; | ||
389 | |||
390 | pfn = pte_pfn(pte); | 400 | pfn = pte_pfn(pte); |
391 | /* If the pte points outside of valid memory but | 401 | page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; |
402 | |||
403 | if (unlikely(vm_flags & VM_UNPAGED)) | ||
404 | if (!page_is_anon(page, vma, addr)) | ||
405 | goto out_set_pte; | ||
406 | |||
407 | /* | ||
408 | * If the pte points outside of valid memory but | ||
392 | * the region is not VM_UNPAGED, we have a problem. | 409 | * the region is not VM_UNPAGED, we have a problem. |
393 | */ | 410 | */ |
394 | if (unlikely(!pfn_valid(pfn))) { | 411 | if (unlikely(!page)) { |
395 | print_bad_pte(vma, pte, addr); | 412 | print_bad_pte(vma, pte, addr); |
396 | goto out_set_pte; /* try to do something sane */ | 413 | goto out_set_pte; /* try to do something sane */ |
397 | } | 414 | } |
398 | 415 | ||
399 | page = pfn_to_page(pfn); | ||
400 | |||
401 | /* | 416 | /* |
402 | * If it's a COW mapping, write protect it both | 417 | * If it's a COW mapping, write protect it both |
403 | * in the parent and the child | 418 | * in the parent and the child |
@@ -568,17 +583,20 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
568 | continue; | 583 | continue; |
569 | } | 584 | } |
570 | if (pte_present(ptent)) { | 585 | if (pte_present(ptent)) { |
571 | struct page *page = NULL; | 586 | struct page *page; |
587 | unsigned long pfn; | ||
572 | 588 | ||
573 | (*zap_work) -= PAGE_SIZE; | 589 | (*zap_work) -= PAGE_SIZE; |
574 | 590 | ||
575 | if (!(vma->vm_flags & VM_UNPAGED)) { | 591 | pfn = pte_pfn(ptent); |
576 | unsigned long pfn = pte_pfn(ptent); | 592 | page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; |
577 | if (unlikely(!pfn_valid(pfn))) | 593 | |
578 | print_bad_pte(vma, ptent, addr); | 594 | if (unlikely(vma->vm_flags & VM_UNPAGED)) { |
579 | else | 595 | if (!page_is_anon(page, vma, addr)) |
580 | page = pfn_to_page(pfn); | 596 | page = NULL; |
581 | } | 597 | } else if (unlikely(!page)) |
598 | print_bad_pte(vma, ptent, addr); | ||
599 | |||
582 | if (unlikely(details) && page) { | 600 | if (unlikely(details) && page) { |
583 | /* | 601 | /* |
584 | * unmap_shared_mapping_pages() wants to | 602 | * unmap_shared_mapping_pages() wants to |
@@ -1295,10 +1313,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1295 | old_page = pfn_to_page(pfn); | 1313 | old_page = pfn_to_page(pfn); |
1296 | src_page = old_page; | 1314 | src_page = old_page; |
1297 | 1315 | ||
1298 | if (unlikely(vma->vm_flags & VM_UNPAGED)) { | 1316 | if (unlikely(vma->vm_flags & VM_UNPAGED)) |
1299 | old_page = NULL; | 1317 | if (!page_is_anon(old_page, vma, address)) { |
1300 | goto gotten; | 1318 | old_page = NULL; |
1301 | } | 1319 | goto gotten; |
1320 | } | ||
1302 | 1321 | ||
1303 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { | 1322 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { |
1304 | int reuse = can_share_swap_page(old_page); | 1323 | int reuse = can_share_swap_page(old_page); |