diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 44 |
1 files changed, 34 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c index bde42c6d3633..b6e5fd23cc5a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -307,7 +307,6 @@ void free_pgd_range(struct mmu_gather *tlb, | |||
307 | { | 307 | { |
308 | pgd_t *pgd; | 308 | pgd_t *pgd; |
309 | unsigned long next; | 309 | unsigned long next; |
310 | unsigned long start; | ||
311 | 310 | ||
312 | /* | 311 | /* |
313 | * The next few lines have given us lots of grief... | 312 | * The next few lines have given us lots of grief... |
@@ -351,7 +350,6 @@ void free_pgd_range(struct mmu_gather *tlb, | |||
351 | if (addr > end - 1) | 350 | if (addr > end - 1) |
352 | return; | 351 | return; |
353 | 352 | ||
354 | start = addr; | ||
355 | pgd = pgd_offset(tlb->mm, addr); | 353 | pgd = pgd_offset(tlb->mm, addr); |
356 | do { | 354 | do { |
357 | next = pgd_addr_end(addr, end); | 355 | next = pgd_addr_end(addr, end); |
@@ -2008,11 +2006,10 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, | |||
2008 | { | 2006 | { |
2009 | pgd_t *pgd; | 2007 | pgd_t *pgd; |
2010 | unsigned long next; | 2008 | unsigned long next; |
2011 | unsigned long start = addr, end = addr + size; | 2009 | unsigned long end = addr + size; |
2012 | int err; | 2010 | int err; |
2013 | 2011 | ||
2014 | BUG_ON(addr >= end); | 2012 | BUG_ON(addr >= end); |
2015 | mmu_notifier_invalidate_range_start(mm, start, end); | ||
2016 | pgd = pgd_offset(mm, addr); | 2013 | pgd = pgd_offset(mm, addr); |
2017 | do { | 2014 | do { |
2018 | next = pgd_addr_end(addr, end); | 2015 | next = pgd_addr_end(addr, end); |
@@ -2020,7 +2017,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, | |||
2020 | if (err) | 2017 | if (err) |
2021 | break; | 2018 | break; |
2022 | } while (pgd++, addr = next, addr != end); | 2019 | } while (pgd++, addr = next, addr != end); |
2023 | mmu_notifier_invalidate_range_end(mm, start, end); | 2020 | |
2024 | return err; | 2021 | return err; |
2025 | } | 2022 | } |
2026 | EXPORT_SYMBOL_GPL(apply_to_page_range); | 2023 | EXPORT_SYMBOL_GPL(apply_to_page_range); |
@@ -2630,6 +2627,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2630 | swp_entry_t entry; | 2627 | swp_entry_t entry; |
2631 | pte_t pte; | 2628 | pte_t pte; |
2632 | struct mem_cgroup *ptr = NULL; | 2629 | struct mem_cgroup *ptr = NULL; |
2630 | int exclusive = 0; | ||
2633 | int ret = 0; | 2631 | int ret = 0; |
2634 | 2632 | ||
2635 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) | 2633 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) |
@@ -2724,10 +2722,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2724 | if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { | 2722 | if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { |
2725 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); | 2723 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); |
2726 | flags &= ~FAULT_FLAG_WRITE; | 2724 | flags &= ~FAULT_FLAG_WRITE; |
2725 | ret |= VM_FAULT_WRITE; | ||
2726 | exclusive = 1; | ||
2727 | } | 2727 | } |
2728 | flush_icache_page(vma, page); | 2728 | flush_icache_page(vma, page); |
2729 | set_pte_at(mm, address, page_table, pte); | 2729 | set_pte_at(mm, address, page_table, pte); |
2730 | page_add_anon_rmap(page, vma, address); | 2730 | do_page_add_anon_rmap(page, vma, address, exclusive); |
2731 | /* It's better to call commit-charge after rmap is established */ | 2731 | /* It's better to call commit-charge after rmap is established */ |
2732 | mem_cgroup_commit_charge_swapin(page, ptr); | 2732 | mem_cgroup_commit_charge_swapin(page, ptr); |
2733 | 2733 | ||
@@ -2760,6 +2760,26 @@ out_release: | |||
2760 | } | 2760 | } |
2761 | 2761 | ||
2762 | /* | 2762 | /* |
2763 | * This is like a special single-page "expand_downwards()", | ||
2764 | * except we must first make sure that 'address-PAGE_SIZE' | ||
2765 | * doesn't hit another vma. | ||
2766 | * | ||
2767 | * The "find_vma()" will do the right thing even if we wrap | ||
2768 | */ | ||
2769 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) | ||
2770 | { | ||
2771 | address &= PAGE_MASK; | ||
2772 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { | ||
2773 | address -= PAGE_SIZE; | ||
2774 | if (find_vma(vma->vm_mm, address) != vma) | ||
2775 | return -ENOMEM; | ||
2776 | |||
2777 | expand_stack(vma, address); | ||
2778 | } | ||
2779 | return 0; | ||
2780 | } | ||
2781 | |||
2782 | /* | ||
2763 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | 2783 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
2764 | * but allow concurrent faults), and pte mapped but not yet locked. | 2784 | * but allow concurrent faults), and pte mapped but not yet locked. |
2765 | * We return with mmap_sem still held, but pte unmapped and unlocked. | 2785 | * We return with mmap_sem still held, but pte unmapped and unlocked. |
@@ -2772,19 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2772 | spinlock_t *ptl; | 2792 | spinlock_t *ptl; |
2773 | pte_t entry; | 2793 | pte_t entry; |
2774 | 2794 | ||
2795 | pte_unmap(page_table); | ||
2796 | |||
2797 | /* Check if we need to add a guard page to the stack */ | ||
2798 | if (check_stack_guard_page(vma, address) < 0) | ||
2799 | return VM_FAULT_SIGBUS; | ||
2800 | |||
2801 | /* Use the zero-page for reads */ | ||
2775 | if (!(flags & FAULT_FLAG_WRITE)) { | 2802 | if (!(flags & FAULT_FLAG_WRITE)) { |
2776 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), | 2803 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
2777 | vma->vm_page_prot)); | 2804 | vma->vm_page_prot)); |
2778 | ptl = pte_lockptr(mm, pmd); | 2805 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
2779 | spin_lock(ptl); | ||
2780 | if (!pte_none(*page_table)) | 2806 | if (!pte_none(*page_table)) |
2781 | goto unlock; | 2807 | goto unlock; |
2782 | goto setpte; | 2808 | goto setpte; |
2783 | } | 2809 | } |
2784 | 2810 | ||
2785 | /* Allocate our own private page. */ | 2811 | /* Allocate our own private page. */ |
2786 | pte_unmap(page_table); | ||
2787 | |||
2788 | if (unlikely(anon_vma_prepare(vma))) | 2812 | if (unlikely(anon_vma_prepare(vma))) |
2789 | goto oom; | 2813 | goto oom; |
2790 | page = alloc_zeroed_user_highpage_movable(vma, address); | 2814 | page = alloc_zeroed_user_highpage_movable(vma, address); |