diff options
| -rw-r--r-- | arch/arm/include/asm/tlb.h | 7 | ||||
| -rw-r--r-- | arch/arm64/include/asm/tlb.h | 7 | ||||
| -rw-r--r-- | arch/ia64/include/asm/tlb.h | 9 | ||||
| -rw-r--r-- | arch/s390/include/asm/tlb.h | 8 | ||||
| -rw-r--r-- | arch/sh/include/asm/tlb.h | 6 | ||||
| -rw-r--r-- | arch/um/include/asm/tlb.h | 6 | ||||
| -rw-r--r-- | fs/exec.c | 4 | ||||
| -rw-r--r-- | include/asm-generic/tlb.h | 2 | ||||
| -rw-r--r-- | mm/hugetlb.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 36 | ||||
| -rw-r--r-- | mm/mmap.c | 4 |
11 files changed, 57 insertions, 34 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
| @@ -43,6 +43,7 @@ struct mmu_gather { | |||
| 43 | struct mm_struct *mm; | 43 | struct mm_struct *mm; |
| 44 | unsigned int fullmm; | 44 | unsigned int fullmm; |
| 45 | struct vm_area_struct *vma; | 45 | struct vm_area_struct *vma; |
| 46 | unsigned long start, end; | ||
| 46 | unsigned long range_start; | 47 | unsigned long range_start; |
| 47 | unsigned long range_end; | 48 | unsigned long range_end; |
| 48 | unsigned int nr; | 49 | unsigned int nr; |
| @@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
| 107 | } | 108 | } |
| 108 | 109 | ||
| 109 | static inline void | 110 | static inline void |
| 110 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 111 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 111 | { | 112 | { |
| 112 | tlb->mm = mm; | 113 | tlb->mm = mm; |
| 113 | tlb->fullmm = fullmm; | 114 | tlb->fullmm = !(start | (end+1)); |
| 115 | tlb->start = start; | ||
| 116 | tlb->end = end; | ||
| 114 | tlb->vma = NULL; | 117 | tlb->vma = NULL; |
| 115 | tlb->max = ARRAY_SIZE(tlb->local); | 118 | tlb->max = ARRAY_SIZE(tlb->local); |
| 116 | tlb->pages = tlb->local; | 119 | tlb->pages = tlb->local; |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
| @@ -35,6 +35,7 @@ struct mmu_gather { | |||
| 35 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
| 36 | unsigned int fullmm; | 36 | unsigned int fullmm; |
| 37 | struct vm_area_struct *vma; | 37 | struct vm_area_struct *vma; |
| 38 | unsigned long start, end; | ||
| 38 | unsigned long range_start; | 39 | unsigned long range_start; |
| 39 | unsigned long range_end; | 40 | unsigned long range_end; |
| 40 | unsigned int nr; | 41 | unsigned int nr; |
| @@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | static inline void | 100 | static inline void |
| 100 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 101 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 101 | { | 102 | { |
| 102 | tlb->mm = mm; | 103 | tlb->mm = mm; |
| 103 | tlb->fullmm = fullmm; | 104 | tlb->fullmm = !(start | (end+1)); |
| 105 | tlb->start = start; | ||
| 106 | tlb->end = end; | ||
| 104 | tlb->vma = NULL; | 107 | tlb->vma = NULL; |
| 105 | tlb->max = ARRAY_SIZE(tlb->local); | 108 | tlb->max = ARRAY_SIZE(tlb->local); |
| 106 | tlb->pages = tlb->local; | 109 | tlb->pages = tlb->local; |
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | * unmapping a portion of the virtual address space, these hooks are called according to | 22 | * unmapping a portion of the virtual address space, these hooks are called according to |
| 23 | * the following template: | 23 | * the following template: |
| 24 | * | 24 | * |
| 25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | 25 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
| 26 | * { | 26 | * { |
| 27 | * for each vma that needs a shootdown do { | 27 | * for each vma that needs a shootdown do { |
| 28 | * tlb_start_vma(tlb, vma); | 28 | * tlb_start_vma(tlb, vma); |
| @@ -58,6 +58,7 @@ struct mmu_gather { | |||
| 58 | unsigned int max; | 58 | unsigned int max; |
| 59 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
| 60 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
| 61 | unsigned long start, end; | ||
| 61 | unsigned long start_addr; | 62 | unsigned long start_addr; |
| 62 | unsigned long end_addr; | 63 | unsigned long end_addr; |
| 63 | struct page **pages; | 64 | struct page **pages; |
| @@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
| 155 | 156 | ||
| 156 | 157 | ||
| 157 | static inline void | 158 | static inline void |
| 158 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 159 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 159 | { | 160 | { |
| 160 | tlb->mm = mm; | 161 | tlb->mm = mm; |
| 161 | tlb->max = ARRAY_SIZE(tlb->local); | 162 | tlb->max = ARRAY_SIZE(tlb->local); |
| 162 | tlb->pages = tlb->local; | 163 | tlb->pages = tlb->local; |
| 163 | tlb->nr = 0; | 164 | tlb->nr = 0; |
| 164 | tlb->fullmm = full_mm_flush; | 165 | tlb->fullmm = !(start | (end+1)); |
| 166 | tlb->start = start; | ||
| 167 | tlb->end = end; | ||
| 165 | tlb->start_addr = ~0UL; | 168 | tlb->start_addr = ~0UL; |
| 166 | } | 169 | } |
| 167 | 170 | ||
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..23a64d25f2b1 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
| @@ -32,6 +32,7 @@ struct mmu_gather { | |||
| 32 | struct mm_struct *mm; | 32 | struct mm_struct *mm; |
| 33 | struct mmu_table_batch *batch; | 33 | struct mmu_table_batch *batch; |
| 34 | unsigned int fullmm; | 34 | unsigned int fullmm; |
| 35 | unsigned long start, unsigned long end; | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 37 | struct mmu_table_batch { | 38 | struct mmu_table_batch { |
| @@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |||
| 48 | 49 | ||
| 49 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, | 50 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, |
| 50 | struct mm_struct *mm, | 51 | struct mm_struct *mm, |
| 51 | unsigned int full_mm_flush) | 52 | unsigned long start, |
| 53 | unsigned long end) | ||
| 52 | { | 54 | { |
| 53 | tlb->mm = mm; | 55 | tlb->mm = mm; |
| 54 | tlb->fullmm = full_mm_flush; | 56 | tlb->start = start; |
| 57 | tlb->end = end; | ||
| 58 | tlb->fullmm = !(start | (end+1)); | ||
| 55 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
| 56 | if (tlb->fullmm) | 60 | if (tlb->fullmm) |
| 57 | __tlb_flush_mm(mm); | 61 | __tlb_flush_mm(mm); |
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
| @@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static inline void | 38 | static inline void |
| 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 40 | { | 40 | { |
| 41 | tlb->mm = mm; | 41 | tlb->mm = mm; |
| 42 | tlb->fullmm = full_mm_flush; | 42 | tlb->start = start; |
| 43 | tlb->end = end; | ||
| 44 | tlb->fullmm = !(start | (end+1)); | ||
| 43 | 45 | ||
| 44 | init_tlb_gather(tlb); | 46 | init_tlb_gather(tlb); |
| 45 | } | 47 | } |
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
| @@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void | 47 | static inline void |
| 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 49 | { | 49 | { |
| 50 | tlb->mm = mm; | 50 | tlb->mm = mm; |
| 51 | tlb->fullmm = full_mm_flush; | 51 | tlb->start = start; |
| 52 | tlb->end = end; | ||
| 53 | tlb->fullmm = !(start | (end+1)); | ||
| 52 | 54 | ||
| 53 | init_tlb_gather(tlb); | 55 | init_tlb_gather(tlb); |
| 54 | } | 56 | } |
| @@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
| 608 | return -ENOMEM; | 608 | return -ENOMEM; |
| 609 | 609 | ||
| 610 | lru_add_drain(); | 610 | lru_add_drain(); |
| 611 | tlb_gather_mmu(&tlb, mm, 0); | 611 | tlb_gather_mmu(&tlb, mm, old_start, old_end); |
| 612 | if (new_end > old_start) { | 612 | if (new_end > old_start) { |
| 613 | /* | 613 | /* |
| 614 | * when the old and new regions overlap clear from new_end. | 614 | * when the old and new regions overlap clear from new_end. |
| @@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
| 625 | free_pgd_range(&tlb, old_start, old_end, new_end, | 625 | free_pgd_range(&tlb, old_start, old_end, new_end, |
| 626 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); | 626 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); |
| 627 | } | 627 | } |
| 628 | tlb_finish_mmu(&tlb, new_end, old_end); | 628 | tlb_finish_mmu(&tlb, old_start, old_end); |
| 629 | 629 | ||
| 630 | /* | 630 | /* |
| 631 | * Shrink the vma to just the new range. Always succeeds. | 631 | * Shrink the vma to just the new range. Always succeeds. |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 13821c339a41..5672d7ea1fa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
| @@ -112,7 +112,7 @@ struct mmu_gather { | |||
| 112 | 112 | ||
| 113 | #define HAVE_GENERIC_MMU_GATHER | 113 | #define HAVE_GENERIC_MMU_GATHER |
| 114 | 114 | ||
| 115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); | 115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); |
| 116 | void tlb_flush_mmu(struct mmu_gather *tlb); | 116 | void tlb_flush_mmu(struct mmu_gather *tlb); |
| 117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, | 117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
| 118 | unsigned long end); | 118 | unsigned long end); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
| 2490 | 2490 | ||
| 2491 | mm = vma->vm_mm; | 2491 | mm = vma->vm_mm; |
| 2492 | 2492 | ||
| 2493 | tlb_gather_mmu(&tlb, mm, 0); | 2493 | tlb_gather_mmu(&tlb, mm, start, end); |
| 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); | 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); |
| 2495 | tlb_finish_mmu(&tlb, start, end); | 2495 | tlb_finish_mmu(&tlb, start, end); |
| 2496 | } | 2496 | } |
diff --git a/mm/memory.c b/mm/memory.c index 40268410732a..af84bc0ec17c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) | |||
| 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without | 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
| 210 | * users and we're going to destroy the full address space (exit/execve). | 210 | * users and we're going to destroy the full address space (exit/execve). |
| 211 | */ | 211 | */ |
| 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
| 213 | { | 213 | { |
| 214 | tlb->mm = mm; | 214 | tlb->mm = mm; |
| 215 | 215 | ||
| 216 | tlb->fullmm = fullmm; | 216 | /* Is it from 0 to ~0? */ |
| 217 | tlb->fullmm = !(start | (end+1)); | ||
| 217 | tlb->need_flush_all = 0; | 218 | tlb->need_flush_all = 0; |
| 218 | tlb->start = -1UL; | 219 | tlb->start = start; |
| 219 | tlb->end = 0; | 220 | tlb->end = end; |
| 220 | tlb->need_flush = 0; | 221 | tlb->need_flush = 0; |
| 221 | tlb->local.next = NULL; | 222 | tlb->local.next = NULL; |
| 222 | tlb->local.nr = 0; | 223 | tlb->local.nr = 0; |
| @@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
| 256 | { | 257 | { |
| 257 | struct mmu_gather_batch *batch, *next; | 258 | struct mmu_gather_batch *batch, *next; |
| 258 | 259 | ||
| 259 | tlb->start = start; | ||
| 260 | tlb->end = end; | ||
| 261 | tlb_flush_mmu(tlb); | 260 | tlb_flush_mmu(tlb); |
| 262 | 261 | ||
| 263 | /* keep the page table cache within bounds */ | 262 | /* keep the page table cache within bounds */ |
| @@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
| 1099 | spinlock_t *ptl; | 1098 | spinlock_t *ptl; |
| 1100 | pte_t *start_pte; | 1099 | pte_t *start_pte; |
| 1101 | pte_t *pte; | 1100 | pte_t *pte; |
| 1102 | unsigned long range_start = addr; | ||
| 1103 | 1101 | ||
| 1104 | again: | 1102 | again: |
| 1105 | init_rss_vec(rss); | 1103 | init_rss_vec(rss); |
| @@ -1205,17 +1203,25 @@ again: | |||
| 1205 | * and page-free while holding it. | 1203 | * and page-free while holding it. |
| 1206 | */ | 1204 | */ |
| 1207 | if (force_flush) { | 1205 | if (force_flush) { |
| 1206 | unsigned long old_end; | ||
| 1207 | |||
| 1208 | force_flush = 0; | 1208 | force_flush = 0; |
| 1209 | 1209 | ||
| 1210 | #ifdef HAVE_GENERIC_MMU_GATHER | 1210 | /* |
| 1211 | tlb->start = range_start; | 1211 | * Flush the TLB just for the previous segment, |
| 1212 | * then update the range to be the remaining | ||
| 1213 | * TLB range. | ||
| 1214 | */ | ||
| 1215 | old_end = tlb->end; | ||
| 1212 | tlb->end = addr; | 1216 | tlb->end = addr; |
| 1213 | #endif | 1217 | |
| 1214 | tlb_flush_mmu(tlb); | 1218 | tlb_flush_mmu(tlb); |
| 1215 | if (addr != end) { | 1219 | |
| 1216 | range_start = addr; | 1220 | tlb->start = addr; |
| 1221 | tlb->end = old_end; | ||
| 1222 | |||
| 1223 | if (addr != end) | ||
| 1217 | goto again; | 1224 | goto again; |
| 1218 | } | ||
| 1219 | } | 1225 | } |
| 1220 | 1226 | ||
| 1221 | return addr; | 1227 | return addr; |
| @@ -1400,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | |||
| 1400 | unsigned long end = start + size; | 1406 | unsigned long end = start + size; |
| 1401 | 1407 | ||
| 1402 | lru_add_drain(); | 1408 | lru_add_drain(); |
| 1403 | tlb_gather_mmu(&tlb, mm, 0); | 1409 | tlb_gather_mmu(&tlb, mm, start, end); |
| 1404 | update_hiwater_rss(mm); | 1410 | update_hiwater_rss(mm); |
| 1405 | mmu_notifier_invalidate_range_start(mm, start, end); | 1411 | mmu_notifier_invalidate_range_start(mm, start, end); |
| 1406 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) | 1412 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
| @@ -1426,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
| 1426 | unsigned long end = address + size; | 1432 | unsigned long end = address + size; |
| 1427 | 1433 | ||
| 1428 | lru_add_drain(); | 1434 | lru_add_drain(); |
| 1429 | tlb_gather_mmu(&tlb, mm, 0); | 1435 | tlb_gather_mmu(&tlb, mm, address, end); |
| 1430 | update_hiwater_rss(mm); | 1436 | update_hiwater_rss(mm); |
| 1431 | mmu_notifier_invalidate_range_start(mm, address, end); | 1437 | mmu_notifier_invalidate_range_start(mm, address, end); |
| 1432 | unmap_single_vma(&tlb, vma, address, end, details); | 1438 | unmap_single_vma(&tlb, vma, address, end, details); |
| @@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, | |||
| 2336 | struct mmu_gather tlb; | 2336 | struct mmu_gather tlb; |
| 2337 | 2337 | ||
| 2338 | lru_add_drain(); | 2338 | lru_add_drain(); |
| 2339 | tlb_gather_mmu(&tlb, mm, 0); | 2339 | tlb_gather_mmu(&tlb, mm, start, end); |
| 2340 | update_hiwater_rss(mm); | 2340 | update_hiwater_rss(mm); |
| 2341 | unmap_vmas(&tlb, vma, start, end); | 2341 | unmap_vmas(&tlb, vma, start, end); |
| 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
| @@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) | |||
| 2709 | 2709 | ||
| 2710 | lru_add_drain(); | 2710 | lru_add_drain(); |
| 2711 | flush_cache_mm(mm); | 2711 | flush_cache_mm(mm); |
| 2712 | tlb_gather_mmu(&tlb, mm, 1); | 2712 | tlb_gather_mmu(&tlb, mm, 0, -1); |
| 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
| 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
| 2715 | unmap_vmas(&tlb, vma, 0, -1); | 2715 | unmap_vmas(&tlb, vma, 0, -1); |
