diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 36 |
1 files changed, 25 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c index 5c694f2b9c12..9bdbd10cb418 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -108,7 +108,7 @@ static int __init disable_randmaps(char *s) | |||
108 | } | 108 | } |
109 | __setup("norandmaps", disable_randmaps); | 109 | __setup("norandmaps", disable_randmaps); |
110 | 110 | ||
111 | static unsigned long zero_pfn __read_mostly; | 111 | unsigned long zero_pfn __read_mostly; |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() | 114 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
@@ -455,6 +455,20 @@ static inline int is_cow_mapping(unsigned int flags) | |||
455 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 455 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; |
456 | } | 456 | } |
457 | 457 | ||
458 | #ifndef is_zero_pfn | ||
459 | static inline int is_zero_pfn(unsigned long pfn) | ||
460 | { | ||
461 | return pfn == zero_pfn; | ||
462 | } | ||
463 | #endif | ||
464 | |||
465 | #ifndef my_zero_pfn | ||
466 | static inline unsigned long my_zero_pfn(unsigned long addr) | ||
467 | { | ||
468 | return zero_pfn; | ||
469 | } | ||
470 | #endif | ||
471 | |||
458 | /* | 472 | /* |
459 | * vm_normal_page -- This function gets the "struct page" associated with a pte. | 473 | * vm_normal_page -- This function gets the "struct page" associated with a pte. |
460 | * | 474 | * |
@@ -512,7 +526,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
512 | goto check_pfn; | 526 | goto check_pfn; |
513 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) | 527 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
514 | return NULL; | 528 | return NULL; |
515 | if (pfn != zero_pfn) | 529 | if (!is_zero_pfn(pfn)) |
516 | print_bad_pte(vma, addr, pte, NULL); | 530 | print_bad_pte(vma, addr, pte, NULL); |
517 | return NULL; | 531 | return NULL; |
518 | } | 532 | } |
@@ -534,6 +548,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
534 | } | 548 | } |
535 | } | 549 | } |
536 | 550 | ||
551 | if (is_zero_pfn(pfn)) | ||
552 | return NULL; | ||
537 | check_pfn: | 553 | check_pfn: |
538 | if (unlikely(pfn > highest_memmap_pfn)) { | 554 | if (unlikely(pfn > highest_memmap_pfn)) { |
539 | print_bad_pte(vma, addr, pte, NULL); | 555 | print_bad_pte(vma, addr, pte, NULL); |
@@ -1161,7 +1177,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | |||
1161 | page = vm_normal_page(vma, address, pte); | 1177 | page = vm_normal_page(vma, address, pte); |
1162 | if (unlikely(!page)) { | 1178 | if (unlikely(!page)) { |
1163 | if ((flags & FOLL_DUMP) || | 1179 | if ((flags & FOLL_DUMP) || |
1164 | pte_pfn(pte) != zero_pfn) | 1180 | !is_zero_pfn(pte_pfn(pte))) |
1165 | goto bad_page; | 1181 | goto bad_page; |
1166 | page = pte_page(pte); | 1182 | page = pte_page(pte); |
1167 | } | 1183 | } |
@@ -1443,10 +1459,6 @@ struct page *get_dump_page(unsigned long addr) | |||
1443 | if (__get_user_pages(current, current->mm, addr, 1, | 1459 | if (__get_user_pages(current, current->mm, addr, 1, |
1444 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) | 1460 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) |
1445 | return NULL; | 1461 | return NULL; |
1446 | if (page == ZERO_PAGE(0)) { | ||
1447 | page_cache_release(page); | ||
1448 | return NULL; | ||
1449 | } | ||
1450 | flush_cache_page(vma, addr, page_to_pfn(page)); | 1462 | flush_cache_page(vma, addr, page_to_pfn(page)); |
1451 | return page; | 1463 | return page; |
1452 | } | 1464 | } |
@@ -1629,7 +1641,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, | |||
1629 | * If we don't have pte special, then we have to use the pfn_valid() | 1641 | * If we don't have pte special, then we have to use the pfn_valid() |
1630 | * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* | 1642 | * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* |
1631 | * refcount the page if pfn_valid is true (hence insert_page rather | 1643 | * refcount the page if pfn_valid is true (hence insert_page rather |
1632 | * than insert_pfn). | 1644 | * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP |
1645 | * without pte special, it would there be refcounted as a normal page. | ||
1633 | */ | 1646 | */ |
1634 | if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { | 1647 | if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { |
1635 | struct page *page; | 1648 | struct page *page; |
@@ -2097,7 +2110,7 @@ gotten: | |||
2097 | if (unlikely(anon_vma_prepare(vma))) | 2110 | if (unlikely(anon_vma_prepare(vma))) |
2098 | goto oom; | 2111 | goto oom; |
2099 | 2112 | ||
2100 | if (pte_pfn(orig_pte) == zero_pfn) { | 2113 | if (is_zero_pfn(pte_pfn(orig_pte))) { |
2101 | new_page = alloc_zeroed_user_highpage_movable(vma, address); | 2114 | new_page = alloc_zeroed_user_highpage_movable(vma, address); |
2102 | if (!new_page) | 2115 | if (!new_page) |
2103 | goto oom; | 2116 | goto oom; |
@@ -2658,8 +2671,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2658 | spinlock_t *ptl; | 2671 | spinlock_t *ptl; |
2659 | pte_t entry; | 2672 | pte_t entry; |
2660 | 2673 | ||
2661 | if (HAVE_PTE_SPECIAL && !(flags & FAULT_FLAG_WRITE)) { | 2674 | if (!(flags & FAULT_FLAG_WRITE)) { |
2662 | entry = pte_mkspecial(pfn_pte(zero_pfn, vma->vm_page_prot)); | 2675 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
2676 | vma->vm_page_prot)); | ||
2663 | ptl = pte_lockptr(mm, pmd); | 2677 | ptl = pte_lockptr(mm, pmd); |
2664 | spin_lock(ptl); | 2678 | spin_lock(ptl); |
2665 | if (!pte_none(*page_table)) | 2679 | if (!pte_none(*page_table)) |