aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-06 16:54:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-06 17:05:17 -0400
commit4f74d2c8e827af12596f153a564c868bf6dbe3dd (patch)
tree6ef2bafd6c23a4c4a9ef716ea530daea824a7721 /mm/mmap.c
parent7e027b14d53e9729f823ba8652095d1e309aa8e9 (diff)
vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces
The VM accounting makes no sense at this level, and half of the callers didn't ever actually use the end result. The only time we want to unaccount the memory is when we actually remove the vma, so do the accounting at that point instead. This simplifies the interfaces (no need to pass down that silly page counter to functions that really don't care), and also makes it much more obvious what is actually going on: we do vm_[un]acct_memory() when adding or removing the vma, not on random page walking. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 58806106fab6..69a1889f3790 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1889 */ 1889 */
1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1891{ 1891{
1892 unsigned long nr_accounted = 0;
1893
1892 /* Update high watermark before we lower total_vm */ 1894 /* Update high watermark before we lower total_vm */
1893 update_hiwater_vm(mm); 1895 update_hiwater_vm(mm);
1894 do { 1896 do {
1895 long nrpages = vma_pages(vma); 1897 long nrpages = vma_pages(vma);
1896 1898
1899 if (vma->vm_flags & VM_ACCOUNT)
1900 nr_accounted += nrpages;
1897 mm->total_vm -= nrpages; 1901 mm->total_vm -= nrpages;
1898 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1902 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1899 vma = remove_vma(vma); 1903 vma = remove_vma(vma);
1900 } while (vma); 1904 } while (vma);
1905 vm_unacct_memory(nr_accounted);
1901 validate_mm(mm); 1906 validate_mm(mm);
1902} 1907}
1903 1908
@@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm,
1912{ 1917{
1913 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1918 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1914 struct mmu_gather tlb; 1919 struct mmu_gather tlb;
1915 unsigned long nr_accounted = 0;
1916 1920
1917 lru_add_drain(); 1921 lru_add_drain();
1918 tlb_gather_mmu(&tlb, mm, 0); 1922 tlb_gather_mmu(&tlb, mm, 0);
1919 update_hiwater_rss(mm); 1923 update_hiwater_rss(mm);
1920 unmap_vmas(&tlb, vma, start, end, &nr_accounted); 1924 unmap_vmas(&tlb, vma, start, end);
1921 vm_unacct_memory(nr_accounted);
1922 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 1925 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1923 next ? next->vm_start : 0); 1926 next ? next->vm_start : 0);
1924 tlb_finish_mmu(&tlb, start, end); 1927 tlb_finish_mmu(&tlb, start, end);
@@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
2305 tlb_gather_mmu(&tlb, mm, 1); 2308 tlb_gather_mmu(&tlb, mm, 1);
2306 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2309 /* update_hiwater_rss(mm) here? but nobody should be looking */
2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2310 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2308 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted); 2311 unmap_vmas(&tlb, vma, 0, -1);
2309 vm_unacct_memory(nr_accounted);
2310 2312
2311 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2313 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2312 tlb_finish_mmu(&tlb, 0, -1); 2314 tlb_finish_mmu(&tlb, 0, -1);
@@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm)
2315 * Walk the list again, actually closing and freeing it, 2317 * Walk the list again, actually closing and freeing it,
2316 * with preemption enabled, without holding any MM locks. 2318 * with preemption enabled, without holding any MM locks.
2317 */ 2319 */
2318 while (vma) 2320 while (vma) {
2321 if (vma->vm_flags & VM_ACCOUNT)
2322 nr_accounted += vma_pages(vma);
2319 vma = remove_vma(vma); 2323 vma = remove_vma(vma);
2324 }
2325 vm_unacct_memory(nr_accounted);
2320 2326
2321 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2327 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2322} 2328}