diff options
-rw-r--r-- | mm/memory.c | 7 | ||||
-rw-r--r-- | mm/swapfile.c | 7 |
2 files changed, 7 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index fd5d4c6dc762..13667681cd16 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1768,13 +1768,14 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1768 | unsigned long addr) | 1768 | unsigned long addr) |
1769 | { | 1769 | { |
1770 | pte_t entry; | 1770 | pte_t entry; |
1771 | struct page * page = ZERO_PAGE(addr); | ||
1772 | 1771 | ||
1773 | /* Read-only mapping of ZERO_PAGE. */ | 1772 | /* Mapping of ZERO_PAGE - vm_page_prot is readonly */ |
1774 | entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot)); | 1773 | entry = mk_pte(ZERO_PAGE(addr), vma->vm_page_prot); |
1775 | 1774 | ||
1776 | /* ..except if it's a write access */ | 1775 | /* ..except if it's a write access */ |
1777 | if (write_access) { | 1776 | if (write_access) { |
1777 | struct page *page; | ||
1778 | |||
1778 | /* Allocate our own private page. */ | 1779 | /* Allocate our own private page. */ |
1779 | pte_unmap(page_table); | 1780 | pte_unmap(page_table); |
1780 | spin_unlock(&mm->page_table_lock); | 1781 | spin_unlock(&mm->page_table_lock); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1dcaeda039f4..05c851291241 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -398,10 +398,9 @@ void free_swap_and_cache(swp_entry_t entry) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Always set the resulting pte to be nowrite (the same as COW pages | 401 | * No need to decide whether this PTE shares the swap entry with others, |
402 | * after one process has exited). We don't know just how many PTEs will | 402 | * just let do_wp_page work it out if a write is requested later - to |
403 | * share this swap entry, so be cautious and let do_wp_page work out | 403 | * force COW, vm_page_prot omits write permission from any private vma. |
404 | * what to do if a write is requested later. | ||
405 | * | 404 | * |
406 | * vma->vm_mm->page_table_lock is held. | 405 | * vma->vm_mm->page_table_lock is held. |
407 | */ | 406 | */ |