aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-06 16:54:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-06 17:05:17 -0400
commit4f74d2c8e827af12596f153a564c868bf6dbe3dd (patch)
tree6ef2bafd6c23a4c4a9ef716ea530daea824a7721 /mm
parent7e027b14d53e9729f823ba8652095d1e309aa8e9 (diff)
vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces
The VM accounting makes no sense at this level, and half of the callers didn't ever actually use the end result. The only time we want to unaccount the memory is when we actually remove the vma, so do the accounting at that point instead. This simplifies the interfaces (no need to pass down that silly page counter to functions that really don't care), and also makes it much more obvious what is actually going on: we do vm_[un]acct_memory() when adding or removing the vma, not on random page walking. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mmap.c18
2 files changed, 17 insertions, 17 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f7b6c9859796..1e77da6d82c1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
1295 1295
1296static void unmap_single_vma(struct mmu_gather *tlb, 1296static void unmap_single_vma(struct mmu_gather *tlb,
1297 struct vm_area_struct *vma, unsigned long start_addr, 1297 struct vm_area_struct *vma, unsigned long start_addr,
1298 unsigned long end_addr, unsigned long *nr_accounted, 1298 unsigned long end_addr,
1299 struct zap_details *details) 1299 struct zap_details *details)
1300{ 1300{
1301 unsigned long start = max(vma->vm_start, start_addr); 1301 unsigned long start = max(vma->vm_start, start_addr);
@@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1307 if (end <= vma->vm_start) 1307 if (end <= vma->vm_start)
1308 return; 1308 return;
1309 1309
1310 if (vma->vm_flags & VM_ACCOUNT)
1311 *nr_accounted += (end - start) >> PAGE_SHIFT;
1312
1313 if (unlikely(is_pfn_mapping(vma))) 1310 if (unlikely(is_pfn_mapping(vma)))
1314 untrack_pfn_vma(vma, 0, 0); 1311 untrack_pfn_vma(vma, 0, 0);
1315 1312
@@ -1339,7 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1339 * @vma: the starting vma 1336 * @vma: the starting vma
1340 * @start_addr: virtual address at which to start unmapping 1337 * @start_addr: virtual address at which to start unmapping
1341 * @end_addr: virtual address at which to end unmapping 1338 * @end_addr: virtual address at which to end unmapping
1342 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1343 * 1339 *
1344 * Unmap all pages in the vma list. 1340 * Unmap all pages in the vma list.
1345 * 1341 *
@@ -1354,13 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1354 */ 1350 */
1355void unmap_vmas(struct mmu_gather *tlb, 1351void unmap_vmas(struct mmu_gather *tlb,
1356 struct vm_area_struct *vma, unsigned long start_addr, 1352 struct vm_area_struct *vma, unsigned long start_addr,
1357 unsigned long end_addr, unsigned long *nr_accounted) 1353 unsigned long end_addr)
1358{ 1354{
1359 struct mm_struct *mm = vma->vm_mm; 1355 struct mm_struct *mm = vma->vm_mm;
1360 1356
1361 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1357 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1362 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1358 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1363 unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, NULL); 1359 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1364 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1360 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1365} 1361}
1366 1362
@@ -1379,14 +1375,13 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1379 struct mm_struct *mm = vma->vm_mm; 1375 struct mm_struct *mm = vma->vm_mm;
1380 struct mmu_gather tlb; 1376 struct mmu_gather tlb;
1381 unsigned long end = start + size; 1377 unsigned long end = start + size;
1382 unsigned long nr_accounted = 0;
1383 1378
1384 lru_add_drain(); 1379 lru_add_drain();
1385 tlb_gather_mmu(&tlb, mm, 0); 1380 tlb_gather_mmu(&tlb, mm, 0);
1386 update_hiwater_rss(mm); 1381 update_hiwater_rss(mm);
1387 mmu_notifier_invalidate_range_start(mm, start, end); 1382 mmu_notifier_invalidate_range_start(mm, start, end);
1388 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1383 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1389 unmap_single_vma(&tlb, vma, start, end, &nr_accounted, details); 1384 unmap_single_vma(&tlb, vma, start, end, details);
1390 mmu_notifier_invalidate_range_end(mm, start, end); 1385 mmu_notifier_invalidate_range_end(mm, start, end);
1391 tlb_finish_mmu(&tlb, start, end); 1386 tlb_finish_mmu(&tlb, start, end);
1392} 1387}
@@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1406 struct mm_struct *mm = vma->vm_mm; 1401 struct mm_struct *mm = vma->vm_mm;
1407 struct mmu_gather tlb; 1402 struct mmu_gather tlb;
1408 unsigned long end = address + size; 1403 unsigned long end = address + size;
1409 unsigned long nr_accounted = 0;
1410 1404
1411 lru_add_drain(); 1405 lru_add_drain();
1412 tlb_gather_mmu(&tlb, mm, 0); 1406 tlb_gather_mmu(&tlb, mm, 0);
1413 update_hiwater_rss(mm); 1407 update_hiwater_rss(mm);
1414 mmu_notifier_invalidate_range_start(mm, address, end); 1408 mmu_notifier_invalidate_range_start(mm, address, end);
1415 unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); 1409 unmap_single_vma(&tlb, vma, address, end, details);
1416 mmu_notifier_invalidate_range_end(mm, address, end); 1410 mmu_notifier_invalidate_range_end(mm, address, end);
1417 tlb_finish_mmu(&tlb, address, end); 1411 tlb_finish_mmu(&tlb, address, end);
1418} 1412}
diff --git a/mm/mmap.c b/mm/mmap.c
index 58806106fab6..69a1889f3790 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1889 */ 1889 */
1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1891{ 1891{
1892 unsigned long nr_accounted = 0;
1893
1892 /* Update high watermark before we lower total_vm */ 1894 /* Update high watermark before we lower total_vm */
1893 update_hiwater_vm(mm); 1895 update_hiwater_vm(mm);
1894 do { 1896 do {
1895 long nrpages = vma_pages(vma); 1897 long nrpages = vma_pages(vma);
1896 1898
1899 if (vma->vm_flags & VM_ACCOUNT)
1900 nr_accounted += nrpages;
1897 mm->total_vm -= nrpages; 1901 mm->total_vm -= nrpages;
1898 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1902 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1899 vma = remove_vma(vma); 1903 vma = remove_vma(vma);
1900 } while (vma); 1904 } while (vma);
1905 vm_unacct_memory(nr_accounted);
1901 validate_mm(mm); 1906 validate_mm(mm);
1902} 1907}
1903 1908
@@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm,
1912{ 1917{
1913 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1918 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1914 struct mmu_gather tlb; 1919 struct mmu_gather tlb;
1915 unsigned long nr_accounted = 0;
1916 1920
1917 lru_add_drain(); 1921 lru_add_drain();
1918 tlb_gather_mmu(&tlb, mm, 0); 1922 tlb_gather_mmu(&tlb, mm, 0);
1919 update_hiwater_rss(mm); 1923 update_hiwater_rss(mm);
1920 unmap_vmas(&tlb, vma, start, end, &nr_accounted); 1924 unmap_vmas(&tlb, vma, start, end);
1921 vm_unacct_memory(nr_accounted);
1922 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 1925 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1923 next ? next->vm_start : 0); 1926 next ? next->vm_start : 0);
1924 tlb_finish_mmu(&tlb, start, end); 1927 tlb_finish_mmu(&tlb, start, end);
@@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
2305 tlb_gather_mmu(&tlb, mm, 1); 2308 tlb_gather_mmu(&tlb, mm, 1);
2306 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2309 /* update_hiwater_rss(mm) here? but nobody should be looking */
2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2310 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2308 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted); 2311 unmap_vmas(&tlb, vma, 0, -1);
2309 vm_unacct_memory(nr_accounted);
2310 2312
2311 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2313 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2312 tlb_finish_mmu(&tlb, 0, -1); 2314 tlb_finish_mmu(&tlb, 0, -1);
@@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm)
2315 * Walk the list again, actually closing and freeing it, 2317 * Walk the list again, actually closing and freeing it,
2316 * with preemption enabled, without holding any MM locks. 2318 * with preemption enabled, without holding any MM locks.
2317 */ 2319 */
2318 while (vma) 2320 while (vma) {
2321 if (vma->vm_flags & VM_ACCOUNT)
2322 nr_accounted += vma_pages(vma);
2319 vma = remove_vma(vma); 2323 vma = remove_vma(vma);
2324 }
2325 vm_unacct_memory(nr_accounted);
2320 2326
2321 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2327 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2322} 2328}