aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-03-05 13:41:15 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2012-03-20 21:39:51 -0400
commit6e8bb0193af3f308ef22817a5560422d33e58b90 (patch)
tree6001421c8d389bd00b18e0510e3f6c9130f9f80b
parent853f5e264018113b1f96f05551b07a74b836c7fc (diff)
VM: make unmap_vmas() return void
same story - nobody uses it and it's been pointless since "mm: Remove i_mmap_lock lockbreak" went in. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/memory.c6
-rw-r--r--mm/mmap.c3
3 files changed, 3 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6c65d24852e5..b5bb54d6d667 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -895,7 +895,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
895 unsigned long size); 895 unsigned long size);
896void zap_page_range(struct vm_area_struct *vma, unsigned long address, 896void zap_page_range(struct vm_area_struct *vma, unsigned long address,
897 unsigned long size, struct zap_details *); 897 unsigned long size, struct zap_details *);
898unsigned long unmap_vmas(struct mmu_gather *tlb, 898void unmap_vmas(struct mmu_gather *tlb,
899 struct vm_area_struct *start_vma, unsigned long start_addr, 899 struct vm_area_struct *start_vma, unsigned long start_addr,
900 unsigned long end_addr, unsigned long *nr_accounted, 900 unsigned long end_addr, unsigned long *nr_accounted,
901 struct zap_details *); 901 struct zap_details *);
diff --git a/mm/memory.c b/mm/memory.c
index cfb57b007a6c..016c67587ef4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1316,8 +1316,6 @@ static void unmap_page_range(struct mmu_gather *tlb,
1316 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 1316 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1317 * @details: details of nonlinear truncation or shared cache invalidation 1317 * @details: details of nonlinear truncation or shared cache invalidation
1318 * 1318 *
1319 * Returns the end address of the unmapping (restart addr if interrupted).
1320 *
1321 * Unmap all pages in the vma list. 1319 * Unmap all pages in the vma list.
1322 * 1320 *
1323 * Only addresses between `start' and `end' will be unmapped. 1321 * Only addresses between `start' and `end' will be unmapped.
@@ -1329,7 +1327,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
1329 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1327 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1330 * drops the lock and schedules. 1328 * drops the lock and schedules.
1331 */ 1329 */
1332unsigned long unmap_vmas(struct mmu_gather *tlb, 1330void unmap_vmas(struct mmu_gather *tlb,
1333 struct vm_area_struct *vma, unsigned long start_addr, 1331 struct vm_area_struct *vma, unsigned long start_addr,
1334 unsigned long end_addr, unsigned long *nr_accounted, 1332 unsigned long end_addr, unsigned long *nr_accounted,
1335 struct zap_details *details) 1333 struct zap_details *details)
@@ -1372,11 +1370,9 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
1372 } else 1370 } else
1373 unmap_page_range(tlb, vma, start, end, details); 1371 unmap_page_range(tlb, vma, start, end, details);
1374 } 1372 }
1375 start = end;
1376 } 1373 }
1377 1374
1378 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1375 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1379 return start; /* which is now the end (or restart) address */
1380} 1376}
1381 1377
1382/** 1378/**
diff --git a/mm/mmap.c b/mm/mmap.c
index 2b2b45eb816c..9365a8fe3701 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2224,7 +2224,6 @@ void exit_mmap(struct mm_struct *mm)
2224 struct mmu_gather tlb; 2224 struct mmu_gather tlb;
2225 struct vm_area_struct *vma; 2225 struct vm_area_struct *vma;
2226 unsigned long nr_accounted = 0; 2226 unsigned long nr_accounted = 0;
2227 unsigned long end;
2228 2227
2229 /* mm's last user has gone, and its about to be pulled down */ 2228 /* mm's last user has gone, and its about to be pulled down */
2230 mmu_notifier_release(mm); 2229 mmu_notifier_release(mm);
@@ -2249,7 +2248,7 @@ void exit_mmap(struct mm_struct *mm)
2249 tlb_gather_mmu(&tlb, mm, 1); 2248 tlb_gather_mmu(&tlb, mm, 1);
2250 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2249 /* update_hiwater_rss(mm) here? but nobody should be looking */
2251 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2250 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2252 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2251 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2253 vm_unacct_memory(nr_accounted); 2252 vm_unacct_memory(nr_accounted);
2254 2253
2255 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2254 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);