diff options
-rw-r--r-- | fs/exec.c | 4 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 10 | ||||
-rw-r--r-- | mm/mmap.c | 4 |
3 files changed, 14 insertions, 4 deletions
@@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
613 | * when the old and new regions overlap clear from new_end. | 613 | * when the old and new regions overlap clear from new_end. |
614 | */ | 614 | */ |
615 | free_pgd_range(&tlb, new_end, old_end, new_end, | 615 | free_pgd_range(&tlb, new_end, old_end, new_end, |
616 | vma->vm_next ? vma->vm_next->vm_start : 0); | 616 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); |
617 | } else { | 617 | } else { |
618 | /* | 618 | /* |
619 | * otherwise, clean from old_start; this is done to not touch | 619 | * otherwise, clean from old_start; this is done to not touch |
@@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
622 | * for the others its just a little faster. | 622 | * for the others its just a little faster. |
623 | */ | 623 | */ |
624 | free_pgd_range(&tlb, old_start, old_end, new_end, | 624 | free_pgd_range(&tlb, old_start, old_end, new_end, |
625 | vma->vm_next ? vma->vm_next->vm_start : 0); | 625 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); |
626 | } | 626 | } |
627 | tlb_finish_mmu(&tlb, new_end, old_end); | 627 | tlb_finish_mmu(&tlb, new_end, old_end); |
628 | 628 | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index bfd87685fc1f..a59ff51b0166 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -7,6 +7,16 @@ | |||
7 | #include <linux/mm_types.h> | 7 | #include <linux/mm_types.h> |
8 | #include <linux/bug.h> | 8 | #include <linux/bug.h> |
9 | 9 | ||
10 | /* | ||
11 | * On almost all architectures and configurations, 0 can be used as the | ||
12 | * upper ceiling to free_pgtables(): on many architectures it has the same | ||
13 | * effect as using TASK_SIZE. However, there is one configuration which | ||
14 | * must impose a more careful limit, to avoid freeing kernel pgtables. | ||
15 | */ | ||
16 | #ifndef USER_PGTABLES_CEILING | ||
17 | #define USER_PGTABLES_CEILING 0UL | ||
18 | #endif | ||
19 | |||
10 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 20 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
11 | extern int ptep_set_access_flags(struct vm_area_struct *vma, | 21 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
12 | unsigned long address, pte_t *ptep, | 22 | unsigned long address, pte_t *ptep, |
@@ -2302,7 +2302,7 @@ static void unmap_region(struct mm_struct *mm, | |||
2302 | update_hiwater_rss(mm); | 2302 | update_hiwater_rss(mm); |
2303 | unmap_vmas(&tlb, vma, start, end); | 2303 | unmap_vmas(&tlb, vma, start, end); |
2304 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 2304 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
2305 | next ? next->vm_start : 0); | 2305 | next ? next->vm_start : USER_PGTABLES_CEILING); |
2306 | tlb_finish_mmu(&tlb, start, end); | 2306 | tlb_finish_mmu(&tlb, start, end); |
2307 | } | 2307 | } |
2308 | 2308 | ||
@@ -2682,7 +2682,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2682 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2682 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2683 | unmap_vmas(&tlb, vma, 0, -1); | 2683 | unmap_vmas(&tlb, vma, 0, -1); |
2684 | 2684 | ||
2685 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | 2685 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); |
2686 | tlb_finish_mmu(&tlb, 0, -1); | 2686 | tlb_finish_mmu(&tlb, 0, -1); |
2687 | 2687 | ||
2688 | /* | 2688 | /* |