aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:05 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:38 -0400
commit4294621f41a85497019fae64341aa5351a1921b7 (patch)
treefdeb7eb44384a99d0679ffa6de5019bab0ea2166 /mm
parent404351e67a9facb475abf1492245374a28d13e90 (diff)
[PATCH] mm: rss = file_rss + anon_rss
I was lazy when we added anon_rss, and chose to change as few places as possible. So currently each anonymous page has to be counted twice, in rss and in anon_rss. Which won't be so good if those are atomic counts in some configurations. Change that around: keep file_rss and anon_rss separately, and add them together (with get_mm_rss macro) when the total is needed - reading two atomics is much cheaper than updating two atomics. And update anon_rss upfront, typically in memory.c, not tucked away in page_add_anon_rmap. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/fremap.c4
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c31
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/swapfile.c2
6 files changed, 27 insertions, 26 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index ab23a0673c3..fd7f2a17ff3 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -39,7 +39,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
39 set_page_dirty(page); 39 set_page_dirty(page);
40 page_remove_rmap(page); 40 page_remove_rmap(page);
41 page_cache_release(page); 41 page_cache_release(page);
42 dec_mm_counter(mm, rss); 42 dec_mm_counter(mm, file_rss);
43 } 43 }
44 } 44 }
45 } else { 45 } else {
@@ -95,7 +95,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
95 95
96 zap_pte(mm, vma, addr, pte); 96 zap_pte(mm, vma, addr, pte);
97 97
98 inc_mm_counter(mm,rss); 98 inc_mm_counter(mm, file_rss);
99 flush_icache_page(vma, page); 99 flush_icache_page(vma, page);
100 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 100 set_pte_at(mm, addr, pte, mk_pte(page, prot));
101 page_add_file_rmap(page); 101 page_add_file_rmap(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 61d38067803..094455bcbbf 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -286,7 +286,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
286 entry = *src_pte; 286 entry = *src_pte;
287 ptepage = pte_page(entry); 287 ptepage = pte_page(entry);
288 get_page(ptepage); 288 get_page(ptepage);
289 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); 289 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
290 set_huge_pte_at(dst, addr, dst_pte, entry); 290 set_huge_pte_at(dst, addr, dst_pte, entry);
291 } 291 }
292 spin_unlock(&src->page_table_lock); 292 spin_unlock(&src->page_table_lock);
@@ -324,7 +324,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
324 324
325 page = pte_page(pte); 325 page = pte_page(pte);
326 put_page(page); 326 put_page(page);
327 add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE)); 327 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
328 } 328 }
329 flush_tlb_range(vma, start, end); 329 flush_tlb_range(vma, start, end);
330} 330}
@@ -386,7 +386,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
386 goto out; 386 goto out;
387 } 387 }
388 } 388 }
389 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); 389 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
390 set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page)); 390 set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page));
391 } 391 }
392out: 392out:
diff --git a/mm/memory.c b/mm/memory.c
index 51eb3857483..59d42e50fa5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -397,9 +397,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
397 pte = pte_mkclean(pte); 397 pte = pte_mkclean(pte);
398 pte = pte_mkold(pte); 398 pte = pte_mkold(pte);
399 get_page(page); 399 get_page(page);
400 inc_mm_counter(dst_mm, rss);
401 if (PageAnon(page)) 400 if (PageAnon(page))
402 inc_mm_counter(dst_mm, anon_rss); 401 inc_mm_counter(dst_mm, anon_rss);
402 else
403 inc_mm_counter(dst_mm, file_rss);
403 set_pte_at(dst_mm, addr, dst_pte, pte); 404 set_pte_at(dst_mm, addr, dst_pte, pte);
404 page_dup_rmap(page); 405 page_dup_rmap(page);
405} 406}
@@ -581,8 +582,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
581 set_page_dirty(page); 582 set_page_dirty(page);
582 if (pte_young(ptent)) 583 if (pte_young(ptent))
583 mark_page_accessed(page); 584 mark_page_accessed(page);
585 dec_mm_counter(tlb->mm, file_rss);
584 } 586 }
585 dec_mm_counter(tlb->mm, rss);
586 page_remove_rmap(page); 587 page_remove_rmap(page);
587 tlb_remove_page(tlb, page); 588 tlb_remove_page(tlb, page);
588 continue; 589 continue;
@@ -1290,13 +1291,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1290 spin_lock(&mm->page_table_lock); 1291 spin_lock(&mm->page_table_lock);
1291 page_table = pte_offset_map(pmd, address); 1292 page_table = pte_offset_map(pmd, address);
1292 if (likely(pte_same(*page_table, orig_pte))) { 1293 if (likely(pte_same(*page_table, orig_pte))) {
1293 if (PageAnon(old_page))
1294 dec_mm_counter(mm, anon_rss);
1295 if (PageReserved(old_page)) 1294 if (PageReserved(old_page))
1296 inc_mm_counter(mm, rss); 1295 inc_mm_counter(mm, anon_rss);
1297 else 1296 else {
1298 page_remove_rmap(old_page); 1297 page_remove_rmap(old_page);
1299 1298 if (!PageAnon(old_page)) {
1299 inc_mm_counter(mm, anon_rss);
1300 dec_mm_counter(mm, file_rss);
1301 }
1302 }
1300 flush_cache_page(vma, address, pfn); 1303 flush_cache_page(vma, address, pfn);
1301 entry = mk_pte(new_page, vma->vm_page_prot); 1304 entry = mk_pte(new_page, vma->vm_page_prot);
1302 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1305 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -1701,7 +1704,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1701 1704
1702 /* The page isn't present yet, go ahead with the fault. */ 1705 /* The page isn't present yet, go ahead with the fault. */
1703 1706
1704 inc_mm_counter(mm, rss); 1707 inc_mm_counter(mm, anon_rss);
1705 pte = mk_pte(page, vma->vm_page_prot); 1708 pte = mk_pte(page, vma->vm_page_prot);
1706 if (write_access && can_share_swap_page(page)) { 1709 if (write_access && can_share_swap_page(page)) {
1707 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 1710 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
@@ -1774,7 +1777,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1774 page_cache_release(page); 1777 page_cache_release(page);
1775 goto unlock; 1778 goto unlock;
1776 } 1779 }
1777 inc_mm_counter(mm, rss); 1780 inc_mm_counter(mm, anon_rss);
1778 entry = mk_pte(page, vma->vm_page_prot); 1781 entry = mk_pte(page, vma->vm_page_prot);
1779 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1782 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1780 lru_cache_add_active(page); 1783 lru_cache_add_active(page);
@@ -1887,19 +1890,19 @@ retry:
1887 */ 1890 */
1888 /* Only go through if we didn't race with anybody else... */ 1891 /* Only go through if we didn't race with anybody else... */
1889 if (pte_none(*page_table)) { 1892 if (pte_none(*page_table)) {
1890 if (!PageReserved(new_page))
1891 inc_mm_counter(mm, rss);
1892
1893 flush_icache_page(vma, new_page); 1893 flush_icache_page(vma, new_page);
1894 entry = mk_pte(new_page, vma->vm_page_prot); 1894 entry = mk_pte(new_page, vma->vm_page_prot);
1895 if (write_access) 1895 if (write_access)
1896 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1896 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1897 set_pte_at(mm, address, page_table, entry); 1897 set_pte_at(mm, address, page_table, entry);
1898 if (anon) { 1898 if (anon) {
1899 inc_mm_counter(mm, anon_rss);
1899 lru_cache_add_active(new_page); 1900 lru_cache_add_active(new_page);
1900 page_add_anon_rmap(new_page, vma, address); 1901 page_add_anon_rmap(new_page, vma, address);
1901 } else 1902 } else if (!PageReserved(new_page)) {
1903 inc_mm_counter(mm, file_rss);
1902 page_add_file_rmap(new_page); 1904 page_add_file_rmap(new_page);
1905 }
1903 } else { 1906 } else {
1904 /* One of our sibling threads was faster, back out. */ 1907 /* One of our sibling threads was faster, back out. */
1905 page_cache_release(new_page); 1908 page_cache_release(new_page);
@@ -2192,7 +2195,7 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
2192void update_mem_hiwater(struct task_struct *tsk) 2195void update_mem_hiwater(struct task_struct *tsk)
2193{ 2196{
2194 if (tsk->mm) { 2197 if (tsk->mm) {
2195 unsigned long rss = get_mm_counter(tsk->mm, rss); 2198 unsigned long rss = get_mm_rss(tsk->mm);
2196 2199
2197 if (tsk->mm->hiwater_rss < rss) 2200 if (tsk->mm->hiwater_rss < rss)
2198 tsk->mm->hiwater_rss = rss; 2201 tsk->mm->hiwater_rss = rss;
diff --git a/mm/nommu.c b/mm/nommu.c
index 0ef241ae376..599924886eb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1083,7 +1083,7 @@ void update_mem_hiwater(struct task_struct *tsk)
1083 unsigned long rss; 1083 unsigned long rss;
1084 1084
1085 if (likely(tsk->mm)) { 1085 if (likely(tsk->mm)) {
1086 rss = get_mm_counter(tsk->mm, rss); 1086 rss = get_mm_rss(tsk->mm);
1087 if (tsk->mm->hiwater_rss < rss) 1087 if (tsk->mm->hiwater_rss < rss)
1088 tsk->mm->hiwater_rss = rss; 1088 tsk->mm->hiwater_rss = rss;
1089 if (tsk->mm->hiwater_vm < tsk->mm->total_vm) 1089 if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
diff --git a/mm/rmap.c b/mm/rmap.c
index 1fc559e09ca..504757624cc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -445,8 +445,6 @@ void page_add_anon_rmap(struct page *page,
445{ 445{
446 BUG_ON(PageReserved(page)); 446 BUG_ON(PageReserved(page));
447 447
448 inc_mm_counter(vma->vm_mm, anon_rss);
449
450 if (atomic_inc_and_test(&page->_mapcount)) { 448 if (atomic_inc_and_test(&page->_mapcount)) {
451 struct anon_vma *anon_vma = vma->anon_vma; 449 struct anon_vma *anon_vma = vma->anon_vma;
452 450
@@ -561,9 +559,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
561 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 559 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
562 BUG_ON(pte_file(*pte)); 560 BUG_ON(pte_file(*pte));
563 dec_mm_counter(mm, anon_rss); 561 dec_mm_counter(mm, anon_rss);
564 } 562 } else
563 dec_mm_counter(mm, file_rss);
565 564
566 dec_mm_counter(mm, rss);
567 page_remove_rmap(page); 565 page_remove_rmap(page);
568 page_cache_release(page); 566 page_cache_release(page);
569 567
@@ -667,7 +665,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
667 665
668 page_remove_rmap(page); 666 page_remove_rmap(page);
669 page_cache_release(page); 667 page_cache_release(page);
670 dec_mm_counter(mm, rss); 668 dec_mm_counter(mm, file_rss);
671 (*mapcount)--; 669 (*mapcount)--;
672 } 670 }
673 671
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 05c85129124..296e0bbf783 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -407,7 +407,7 @@ void free_swap_and_cache(swp_entry_t entry)
407static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, 407static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
408 unsigned long addr, swp_entry_t entry, struct page *page) 408 unsigned long addr, swp_entry_t entry, struct page *page)
409{ 409{
410 inc_mm_counter(vma->vm_mm, rss); 410 inc_mm_counter(vma->vm_mm, anon_rss);
411 get_page(page); 411 get_page(page);
412 set_pte_at(vma->vm_mm, addr, pte, 412 set_pte_at(vma->vm_mm, addr, pte,
413 pte_mkold(mk_pte(page, vma->vm_page_prot))); 413 pte_mkold(mk_pte(page, vma->vm_page_prot)));