aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/memory.c18
-rw-r--r--mm/mmap.c4
3 files changed, 12 insertions, 13 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 74aa71bea1e4..0aeded3a2f7c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -898,8 +898,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
898 unsigned long size, struct zap_details *); 898 unsigned long size, struct zap_details *);
899void unmap_vmas(struct mmu_gather *tlb, 899void unmap_vmas(struct mmu_gather *tlb,
900 struct vm_area_struct *start_vma, unsigned long start_addr, 900 struct vm_area_struct *start_vma, unsigned long start_addr,
901 unsigned long end_addr, unsigned long *nr_accounted, 901 unsigned long end_addr, unsigned long *nr_accounted);
902 struct zap_details *);
903 902
904/** 903/**
905 * mm_walk - callbacks for walk_page_range 904 * mm_walk - callbacks for walk_page_range
diff --git a/mm/memory.c b/mm/memory.c
index 6105f475fa86..f7b6c9859796 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1340,7 +1340,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1340 * @start_addr: virtual address at which to start unmapping 1340 * @start_addr: virtual address at which to start unmapping
1341 * @end_addr: virtual address at which to end unmapping 1341 * @end_addr: virtual address at which to end unmapping
1342 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 1342 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1343 * @details: details of nonlinear truncation or shared cache invalidation
1344 * 1343 *
1345 * Unmap all pages in the vma list. 1344 * Unmap all pages in the vma list.
1346 * 1345 *
@@ -1355,15 +1354,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1355 */ 1354 */
1356void unmap_vmas(struct mmu_gather *tlb, 1355void unmap_vmas(struct mmu_gather *tlb,
1357 struct vm_area_struct *vma, unsigned long start_addr, 1356 struct vm_area_struct *vma, unsigned long start_addr,
1358 unsigned long end_addr, unsigned long *nr_accounted, 1357 unsigned long end_addr, unsigned long *nr_accounted)
1359 struct zap_details *details)
1360{ 1358{
1361 struct mm_struct *mm = vma->vm_mm; 1359 struct mm_struct *mm = vma->vm_mm;
1362 1360
1363 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1361 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1364 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1362 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1365 unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, 1363 unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, NULL);
1366 details);
1367 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1364 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1368} 1365}
1369 1366
@@ -1376,19 +1373,22 @@ void unmap_vmas(struct mmu_gather *tlb,
1376 * 1373 *
1377 * Caller must protect the VMA list 1374 * Caller must protect the VMA list
1378 */ 1375 */
1379void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1376void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1380 unsigned long size, struct zap_details *details) 1377 unsigned long size, struct zap_details *details)
1381{ 1378{
1382 struct mm_struct *mm = vma->vm_mm; 1379 struct mm_struct *mm = vma->vm_mm;
1383 struct mmu_gather tlb; 1380 struct mmu_gather tlb;
1384 unsigned long end = address + size; 1381 unsigned long end = start + size;
1385 unsigned long nr_accounted = 0; 1382 unsigned long nr_accounted = 0;
1386 1383
1387 lru_add_drain(); 1384 lru_add_drain();
1388 tlb_gather_mmu(&tlb, mm, 0); 1385 tlb_gather_mmu(&tlb, mm, 0);
1389 update_hiwater_rss(mm); 1386 update_hiwater_rss(mm);
1390 unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1387 mmu_notifier_invalidate_range_start(mm, start, end);
1391 tlb_finish_mmu(&tlb, address, end); 1388 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1389 unmap_single_vma(&tlb, vma, start, end, &nr_accounted, details);
1390 mmu_notifier_invalidate_range_end(mm, start, end);
1391 tlb_finish_mmu(&tlb, start, end);
1392} 1392}
1393 1393
1394/** 1394/**
diff --git a/mm/mmap.c b/mm/mmap.c
index 848ef52d9603..58806106fab6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1917,7 +1917,7 @@ static void unmap_region(struct mm_struct *mm,
1917 lru_add_drain(); 1917 lru_add_drain();
1918 tlb_gather_mmu(&tlb, mm, 0); 1918 tlb_gather_mmu(&tlb, mm, 0);
1919 update_hiwater_rss(mm); 1919 update_hiwater_rss(mm);
1920 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1920 unmap_vmas(&tlb, vma, start, end, &nr_accounted);
1921 vm_unacct_memory(nr_accounted); 1921 vm_unacct_memory(nr_accounted);
1922 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 1922 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1923 next ? next->vm_start : 0); 1923 next ? next->vm_start : 0);
@@ -2305,7 +2305,7 @@ void exit_mmap(struct mm_struct *mm)
2305 tlb_gather_mmu(&tlb, mm, 1); 2305 tlb_gather_mmu(&tlb, mm, 1);
2306 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2306 /* update_hiwater_rss(mm) here? but nobody should be looking */
2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2308 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2308 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted);
2309 vm_unacct_memory(nr_accounted); 2309 vm_unacct_memory(nr_accounted);
2310 2310
2311 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2311 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);