aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c36
-rw-r--r--mm/mmap.c4
3 files changed, 24 insertions, 18 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5cf99bf8cce2..7c5eb85ec645 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2490 2490
2491 mm = vma->vm_mm; 2491 mm = vma->vm_mm;
2492 2492
2493 tlb_gather_mmu(&tlb, mm, 0); 2493 tlb_gather_mmu(&tlb, mm, start, end);
2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2495 tlb_finish_mmu(&tlb, start, end); 2495 tlb_finish_mmu(&tlb, start, end);
2496} 2496}
diff --git a/mm/memory.c b/mm/memory.c
index 5e5080005bc4..5a35443c01ad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
211 * tear-down from @mm. The @fullmm argument is used when @mm is without 211 * tear-down from @mm. The @fullmm argument is used when @mm is without
212 * users and we're going to destroy the full address space (exit/execve). 212 * users and we're going to destroy the full address space (exit/execve).
213 */ 213 */
214void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 214void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
215{ 215{
216 tlb->mm = mm; 216 tlb->mm = mm;
217 217
218 tlb->fullmm = fullmm; 218 /* Is it from 0 to ~0? */
219 tlb->fullmm = !(start | (end+1));
219 tlb->need_flush_all = 0; 220 tlb->need_flush_all = 0;
220 tlb->start = -1UL; 221 tlb->start = start;
221 tlb->end = 0; 222 tlb->end = end;
222 tlb->need_flush = 0; 223 tlb->need_flush = 0;
223 tlb->local.next = NULL; 224 tlb->local.next = NULL;
224 tlb->local.nr = 0; 225 tlb->local.nr = 0;
@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
258{ 259{
259 struct mmu_gather_batch *batch, *next; 260 struct mmu_gather_batch *batch, *next;
260 261
261 tlb->start = start;
262 tlb->end = end;
263 tlb_flush_mmu(tlb); 262 tlb_flush_mmu(tlb);
264 263
265 /* keep the page table cache within bounds */ 264 /* keep the page table cache within bounds */
@@ -1101,7 +1100,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1101 spinlock_t *ptl; 1100 spinlock_t *ptl;
1102 pte_t *start_pte; 1101 pte_t *start_pte;
1103 pte_t *pte; 1102 pte_t *pte;
1104 unsigned long range_start = addr;
1105 1103
1106again: 1104again:
1107 init_rss_vec(rss); 1105 init_rss_vec(rss);
@@ -1204,17 +1202,25 @@ again:
1204 * and page-free while holding it. 1202 * and page-free while holding it.
1205 */ 1203 */
1206 if (force_flush) { 1204 if (force_flush) {
1205 unsigned long old_end;
1206
1207 force_flush = 0; 1207 force_flush = 0;
1208 1208
1209#ifdef HAVE_GENERIC_MMU_GATHER 1209 /*
1210 tlb->start = range_start; 1210 * Flush the TLB just for the previous segment,
1211 * then update the range to be the remaining
1212 * TLB range.
1213 */
1214 old_end = tlb->end;
1211 tlb->end = addr; 1215 tlb->end = addr;
1212#endif 1216
1213 tlb_flush_mmu(tlb); 1217 tlb_flush_mmu(tlb);
1214 if (addr != end) { 1218
1215 range_start = addr; 1219 tlb->start = addr;
1220 tlb->end = old_end;
1221
1222 if (addr != end)
1216 goto again; 1223 goto again;
1217 }
1218 } 1224 }
1219 1225
1220 return addr; 1226 return addr;
@@ -1399,7 +1405,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1399 unsigned long end = start + size; 1405 unsigned long end = start + size;
1400 1406
1401 lru_add_drain(); 1407 lru_add_drain();
1402 tlb_gather_mmu(&tlb, mm, 0); 1408 tlb_gather_mmu(&tlb, mm, start, end);
1403 update_hiwater_rss(mm); 1409 update_hiwater_rss(mm);
1404 mmu_notifier_invalidate_range_start(mm, start, end); 1410 mmu_notifier_invalidate_range_start(mm, start, end);
1405 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1411 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1425,7 +1431,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1425 unsigned long end = address + size; 1431 unsigned long end = address + size;
1426 1432
1427 lru_add_drain(); 1433 lru_add_drain();
1428 tlb_gather_mmu(&tlb, mm, 0); 1434 tlb_gather_mmu(&tlb, mm, address, end);
1429 update_hiwater_rss(mm); 1435 update_hiwater_rss(mm);
1430 mmu_notifier_invalidate_range_start(mm, address, end); 1436 mmu_notifier_invalidate_range_start(mm, address, end);
1431 unmap_single_vma(&tlb, vma, address, end, details); 1437 unmap_single_vma(&tlb, vma, address, end, details);
diff --git a/mm/mmap.c b/mm/mmap.c
index 7dbe39745be9..8d25fdc653be 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2356,7 +2356,7 @@ static void unmap_region(struct mm_struct *mm,
2356 struct mmu_gather tlb; 2356 struct mmu_gather tlb;
2357 2357
2358 lru_add_drain(); 2358 lru_add_drain();
2359 tlb_gather_mmu(&tlb, mm, 0); 2359 tlb_gather_mmu(&tlb, mm, start, end);
2360 update_hiwater_rss(mm); 2360 update_hiwater_rss(mm);
2361 unmap_vmas(&tlb, vma, start, end); 2361 unmap_vmas(&tlb, vma, start, end);
2362 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2362 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2735,7 +2735,7 @@ void exit_mmap(struct mm_struct *mm)
2735 2735
2736 lru_add_drain(); 2736 lru_add_drain();
2737 flush_cache_mm(mm); 2737 flush_cache_mm(mm);
2738 tlb_gather_mmu(&tlb, mm, 1); 2738 tlb_gather_mmu(&tlb, mm, 0, -1);
2739 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2739 /* update_hiwater_rss(mm) here? but nobody should be looking */
2740 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2740 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2741 unmap_vmas(&tlb, vma, 0, -1); 2741 unmap_vmas(&tlb, vma, 0, -1);