summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/exec.c4
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--mm/mmap.c4
3 files changed, 14 insertions, 4 deletions
diff --git a/fs/exec.c b/fs/exec.c
index a96a4885bbbf..87e731f020fb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
613 * when the old and new regions overlap clear from new_end. 613 * when the old and new regions overlap clear from new_end.
614 */ 614 */
615 free_pgd_range(&tlb, new_end, old_end, new_end, 615 free_pgd_range(&tlb, new_end, old_end, new_end,
616 vma->vm_next ? vma->vm_next->vm_start : 0); 616 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
617 } else { 617 } else {
618 /* 618 /*
619 * otherwise, clean from old_start; this is done to not touch 619 * otherwise, clean from old_start; this is done to not touch
@@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
622 * for the others its just a little faster. 622 * for the others its just a little faster.
623 */ 623 */
624 free_pgd_range(&tlb, old_start, old_end, new_end, 624 free_pgd_range(&tlb, old_start, old_end, new_end,
625 vma->vm_next ? vma->vm_next->vm_start : 0); 625 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
626 } 626 }
627 tlb_finish_mmu(&tlb, new_end, old_end); 627 tlb_finish_mmu(&tlb, new_end, old_end);
628 628
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bfd87685fc1f..a59ff51b0166 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -7,6 +7,16 @@
7#include <linux/mm_types.h> 7#include <linux/mm_types.h>
8#include <linux/bug.h> 8#include <linux/bug.h>
9 9
10/*
11 * On almost all architectures and configurations, 0 can be used as the
12 * upper ceiling to free_pgtables(): on many architectures it has the same
13 * effect as using TASK_SIZE. However, there is one configuration which
14 * must impose a more careful limit, to avoid freeing kernel pgtables.
15 */
16#ifndef USER_PGTABLES_CEILING
17#define USER_PGTABLES_CEILING 0UL
18#endif
19
10#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 20#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
11extern int ptep_set_access_flags(struct vm_area_struct *vma, 21extern int ptep_set_access_flags(struct vm_area_struct *vma,
12 unsigned long address, pte_t *ptep, 22 unsigned long address, pte_t *ptep,
diff --git a/mm/mmap.c b/mm/mmap.c
index b2c363f7ae54..288958f05f1c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2302,7 +2302,7 @@ static void unmap_region(struct mm_struct *mm,
2302 update_hiwater_rss(mm); 2302 update_hiwater_rss(mm);
2303 unmap_vmas(&tlb, vma, start, end); 2303 unmap_vmas(&tlb, vma, start, end);
2304 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2304 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2305 next ? next->vm_start : 0); 2305 next ? next->vm_start : USER_PGTABLES_CEILING);
2306 tlb_finish_mmu(&tlb, start, end); 2306 tlb_finish_mmu(&tlb, start, end);
2307} 2307}
2308 2308
@@ -2682,7 +2682,7 @@ void exit_mmap(struct mm_struct *mm)
2682 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2682 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2683 unmap_vmas(&tlb, vma, 0, -1); 2683 unmap_vmas(&tlb, vma, 0, -1);
2684 2684
2685 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2685 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2686 tlb_finish_mmu(&tlb, 0, -1); 2686 tlb_finish_mmu(&tlb, 0, -1);
2687 2687
2688 /* 2688 /*