aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 11:37:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 11:37:07 -0400
commitdddbd5414bee86d14bbc835163ca72e4e457c80a (patch)
tree97f2fc5fcfb543ffc1fa7d301f2e41a37e5e12f2 /mm
parent76e10d158efb6d4516018846f60c2ab5501900bc (diff)
parent4f74d2c8e827af12596f153a564c868bf6dbe3dd (diff)
Merge branch 'vm-cleanups' (unmap_vma() interface cleanup)
This series sanitizes the interface to unmap_vma(). The crazy interface annoyed me no end when I was looking at unmap_single_vma(), which we can spend quite a lot of time in (especially with loads that have a lot of small fork/exec's: shell scripts etc). Moving the nr_accounted calculations to where they belong at least clarifies things a little. I hope to come back to look at the performance of this later, but if/when I get back to it I at least don't have to see the crazy interfaces any more. * vm-cleanups: vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces vm: simplify unmap_vmas() calling convention
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c28
-rw-r--r--mm/mmap.c18
2 files changed, 23 insertions, 23 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6105f475fa86..1e77da6d82c1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
1295 1295
1296static void unmap_single_vma(struct mmu_gather *tlb, 1296static void unmap_single_vma(struct mmu_gather *tlb,
1297 struct vm_area_struct *vma, unsigned long start_addr, 1297 struct vm_area_struct *vma, unsigned long start_addr,
1298 unsigned long end_addr, unsigned long *nr_accounted, 1298 unsigned long end_addr,
1299 struct zap_details *details) 1299 struct zap_details *details)
1300{ 1300{
1301 unsigned long start = max(vma->vm_start, start_addr); 1301 unsigned long start = max(vma->vm_start, start_addr);
@@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1307 if (end <= vma->vm_start) 1307 if (end <= vma->vm_start)
1308 return; 1308 return;
1309 1309
1310 if (vma->vm_flags & VM_ACCOUNT)
1311 *nr_accounted += (end - start) >> PAGE_SHIFT;
1312
1313 if (unlikely(is_pfn_mapping(vma))) 1310 if (unlikely(is_pfn_mapping(vma)))
1314 untrack_pfn_vma(vma, 0, 0); 1311 untrack_pfn_vma(vma, 0, 0);
1315 1312
@@ -1339,8 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1339 * @vma: the starting vma 1336 * @vma: the starting vma
1340 * @start_addr: virtual address at which to start unmapping 1337 * @start_addr: virtual address at which to start unmapping
1341 * @end_addr: virtual address at which to end unmapping 1338 * @end_addr: virtual address at which to end unmapping
1342 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1343 * @details: details of nonlinear truncation or shared cache invalidation
1344 * 1339 *
1345 * Unmap all pages in the vma list. 1340 * Unmap all pages in the vma list.
1346 * 1341 *
@@ -1355,15 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1355 */ 1350 */
1356void unmap_vmas(struct mmu_gather *tlb, 1351void unmap_vmas(struct mmu_gather *tlb,
1357 struct vm_area_struct *vma, unsigned long start_addr, 1352 struct vm_area_struct *vma, unsigned long start_addr,
1358 unsigned long end_addr, unsigned long *nr_accounted, 1353 unsigned long end_addr)
1359 struct zap_details *details)
1360{ 1354{
1361 struct mm_struct *mm = vma->vm_mm; 1355 struct mm_struct *mm = vma->vm_mm;
1362 1356
1363 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1357 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1364 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1358 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1365 unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, 1359 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1366 details);
1367 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1360 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1368} 1361}
1369 1362
@@ -1376,19 +1369,21 @@ void unmap_vmas(struct mmu_gather *tlb,
1376 * 1369 *
1377 * Caller must protect the VMA list 1370 * Caller must protect the VMA list
1378 */ 1371 */
1379void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1372void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1380 unsigned long size, struct zap_details *details) 1373 unsigned long size, struct zap_details *details)
1381{ 1374{
1382 struct mm_struct *mm = vma->vm_mm; 1375 struct mm_struct *mm = vma->vm_mm;
1383 struct mmu_gather tlb; 1376 struct mmu_gather tlb;
1384 unsigned long end = address + size; 1377 unsigned long end = start + size;
1385 unsigned long nr_accounted = 0;
1386 1378
1387 lru_add_drain(); 1379 lru_add_drain();
1388 tlb_gather_mmu(&tlb, mm, 0); 1380 tlb_gather_mmu(&tlb, mm, 0);
1389 update_hiwater_rss(mm); 1381 update_hiwater_rss(mm);
1390 unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1382 mmu_notifier_invalidate_range_start(mm, start, end);
1391 tlb_finish_mmu(&tlb, address, end); 1383 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1384 unmap_single_vma(&tlb, vma, start, end, details);
1385 mmu_notifier_invalidate_range_end(mm, start, end);
1386 tlb_finish_mmu(&tlb, start, end);
1392} 1387}
1393 1388
1394/** 1389/**
@@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1406 struct mm_struct *mm = vma->vm_mm; 1401 struct mm_struct *mm = vma->vm_mm;
1407 struct mmu_gather tlb; 1402 struct mmu_gather tlb;
1408 unsigned long end = address + size; 1403 unsigned long end = address + size;
1409 unsigned long nr_accounted = 0;
1410 1404
1411 lru_add_drain(); 1405 lru_add_drain();
1412 tlb_gather_mmu(&tlb, mm, 0); 1406 tlb_gather_mmu(&tlb, mm, 0);
1413 update_hiwater_rss(mm); 1407 update_hiwater_rss(mm);
1414 mmu_notifier_invalidate_range_start(mm, address, end); 1408 mmu_notifier_invalidate_range_start(mm, address, end);
1415 unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); 1409 unmap_single_vma(&tlb, vma, address, end, details);
1416 mmu_notifier_invalidate_range_end(mm, address, end); 1410 mmu_notifier_invalidate_range_end(mm, address, end);
1417 tlb_finish_mmu(&tlb, address, end); 1411 tlb_finish_mmu(&tlb, address, end);
1418} 1412}
diff --git a/mm/mmap.c b/mm/mmap.c
index 848ef52d9603..69a1889f3790 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1889 */ 1889 */
1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1891{ 1891{
1892 unsigned long nr_accounted = 0;
1893
1892 /* Update high watermark before we lower total_vm */ 1894 /* Update high watermark before we lower total_vm */
1893 update_hiwater_vm(mm); 1895 update_hiwater_vm(mm);
1894 do { 1896 do {
1895 long nrpages = vma_pages(vma); 1897 long nrpages = vma_pages(vma);
1896 1898
1899 if (vma->vm_flags & VM_ACCOUNT)
1900 nr_accounted += nrpages;
1897 mm->total_vm -= nrpages; 1901 mm->total_vm -= nrpages;
1898 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1902 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1899 vma = remove_vma(vma); 1903 vma = remove_vma(vma);
1900 } while (vma); 1904 } while (vma);
1905 vm_unacct_memory(nr_accounted);
1901 validate_mm(mm); 1906 validate_mm(mm);
1902} 1907}
1903 1908
@@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm,
1912{ 1917{
1913 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1918 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1914 struct mmu_gather tlb; 1919 struct mmu_gather tlb;
1915 unsigned long nr_accounted = 0;
1916 1920
1917 lru_add_drain(); 1921 lru_add_drain();
1918 tlb_gather_mmu(&tlb, mm, 0); 1922 tlb_gather_mmu(&tlb, mm, 0);
1919 update_hiwater_rss(mm); 1923 update_hiwater_rss(mm);
1920 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1924 unmap_vmas(&tlb, vma, start, end);
1921 vm_unacct_memory(nr_accounted);
1922 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 1925 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1923 next ? next->vm_start : 0); 1926 next ? next->vm_start : 0);
1924 tlb_finish_mmu(&tlb, start, end); 1927 tlb_finish_mmu(&tlb, start, end);
@@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
2305 tlb_gather_mmu(&tlb, mm, 1); 2308 tlb_gather_mmu(&tlb, mm, 1);
2306 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2309 /* update_hiwater_rss(mm) here? but nobody should be looking */
2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2310 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2308 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2311 unmap_vmas(&tlb, vma, 0, -1);
2309 vm_unacct_memory(nr_accounted);
2310 2312
2311 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2313 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2312 tlb_finish_mmu(&tlb, 0, -1); 2314 tlb_finish_mmu(&tlb, 0, -1);
@@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm)
2315 * Walk the list again, actually closing and freeing it, 2317 * Walk the list again, actually closing and freeing it,
2316 * with preemption enabled, without holding any MM locks. 2318 * with preemption enabled, without holding any MM locks.
2317 */ 2319 */
2318 while (vma) 2320 while (vma) {
2321 if (vma->vm_flags & VM_ACCOUNT)
2322 nr_accounted += vma_pages(vma);
2319 vma = remove_vma(vma); 2323 vma = remove_vma(vma);
2324 }
2325 vm_unacct_memory(nr_accounted);
2320 2326
2321 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2327 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2322} 2328}