summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-06 16:54:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-06 17:05:17 -0400
commit4f74d2c8e827af12596f153a564c868bf6dbe3dd (patch)
tree6ef2bafd6c23a4c4a9ef716ea530daea824a7721 /mm/memory.c
parent7e027b14d53e9729f823ba8652095d1e309aa8e9 (diff)
vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces
The VM accounting makes no sense at this level, and half of the callers didn't ever actually use the end result. The only time we want to unaccount the memory is when we actually remove the vma, so do the accounting at that point instead. This simplifies the interfaces (no need to pass down that silly page counter to functions that really don't care), and also makes it much more obvious what is actually going on: we do vm_[un]acct_memory() when adding or removing the vma, not on random page walking. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f7b6c9859796..1e77da6d82c1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
1295 1295
1296static void unmap_single_vma(struct mmu_gather *tlb, 1296static void unmap_single_vma(struct mmu_gather *tlb,
1297 struct vm_area_struct *vma, unsigned long start_addr, 1297 struct vm_area_struct *vma, unsigned long start_addr,
1298 unsigned long end_addr, unsigned long *nr_accounted, 1298 unsigned long end_addr,
1299 struct zap_details *details) 1299 struct zap_details *details)
1300{ 1300{
1301 unsigned long start = max(vma->vm_start, start_addr); 1301 unsigned long start = max(vma->vm_start, start_addr);
@@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1307 if (end <= vma->vm_start) 1307 if (end <= vma->vm_start)
1308 return; 1308 return;
1309 1309
1310 if (vma->vm_flags & VM_ACCOUNT)
1311 *nr_accounted += (end - start) >> PAGE_SHIFT;
1312
1313 if (unlikely(is_pfn_mapping(vma))) 1310 if (unlikely(is_pfn_mapping(vma)))
1314 untrack_pfn_vma(vma, 0, 0); 1311 untrack_pfn_vma(vma, 0, 0);
1315 1312
@@ -1339,7 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1339 * @vma: the starting vma 1336 * @vma: the starting vma
1340 * @start_addr: virtual address at which to start unmapping 1337 * @start_addr: virtual address at which to start unmapping
1341 * @end_addr: virtual address at which to end unmapping 1338 * @end_addr: virtual address at which to end unmapping
1342 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1343 * 1339 *
1344 * Unmap all pages in the vma list. 1340 * Unmap all pages in the vma list.
1345 * 1341 *
@@ -1354,13 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1354 */ 1350 */
1355void unmap_vmas(struct mmu_gather *tlb, 1351void unmap_vmas(struct mmu_gather *tlb,
1356 struct vm_area_struct *vma, unsigned long start_addr, 1352 struct vm_area_struct *vma, unsigned long start_addr,
1357 unsigned long end_addr, unsigned long *nr_accounted) 1353 unsigned long end_addr)
1358{ 1354{
1359 struct mm_struct *mm = vma->vm_mm; 1355 struct mm_struct *mm = vma->vm_mm;
1360 1356
1361 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1357 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1362 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1358 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1363 unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, NULL); 1359 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1364 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1360 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1365} 1361}
1366 1362
@@ -1379,14 +1375,13 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1379 struct mm_struct *mm = vma->vm_mm; 1375 struct mm_struct *mm = vma->vm_mm;
1380 struct mmu_gather tlb; 1376 struct mmu_gather tlb;
1381 unsigned long end = start + size; 1377 unsigned long end = start + size;
1382 unsigned long nr_accounted = 0;
1383 1378
1384 lru_add_drain(); 1379 lru_add_drain();
1385 tlb_gather_mmu(&tlb, mm, 0); 1380 tlb_gather_mmu(&tlb, mm, 0);
1386 update_hiwater_rss(mm); 1381 update_hiwater_rss(mm);
1387 mmu_notifier_invalidate_range_start(mm, start, end); 1382 mmu_notifier_invalidate_range_start(mm, start, end);
1388 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1383 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1389 unmap_single_vma(&tlb, vma, start, end, &nr_accounted, details); 1384 unmap_single_vma(&tlb, vma, start, end, details);
1390 mmu_notifier_invalidate_range_end(mm, start, end); 1385 mmu_notifier_invalidate_range_end(mm, start, end);
1391 tlb_finish_mmu(&tlb, start, end); 1386 tlb_finish_mmu(&tlb, start, end);
1392} 1387}
@@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1406 struct mm_struct *mm = vma->vm_mm; 1401 struct mm_struct *mm = vma->vm_mm;
1407 struct mmu_gather tlb; 1402 struct mmu_gather tlb;
1408 unsigned long end = address + size; 1403 unsigned long end = address + size;
1409 unsigned long nr_accounted = 0;
1410 1404
1411 lru_add_drain(); 1405 lru_add_drain();
1412 tlb_gather_mmu(&tlb, mm, 0); 1406 tlb_gather_mmu(&tlb, mm, 0);
1413 update_hiwater_rss(mm); 1407 update_hiwater_rss(mm);
1414 mmu_notifier_invalidate_range_start(mm, address, end); 1408 mmu_notifier_invalidate_range_start(mm, address, end);
1415 unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); 1409 unmap_single_vma(&tlb, vma, address, end, details);
1416 mmu_notifier_invalidate_range_end(mm, address, end); 1410 mmu_notifier_invalidate_range_end(mm, address, end);
1417 tlb_finish_mmu(&tlb, address, end); 1411 tlb_finish_mmu(&tlb, address, end);
1418} 1412}