diff options
author | Chen, Kenneth W <kenneth.w.chen@intel.com> | 2006-12-06 23:32:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:39:21 -0500 |
commit | cace673d376d97b0c66ffa0a49b8d588a696d5d2 (patch) | |
tree | c85c2ad65ad10132f00b484e91c8bfcd6e3f1afc /mm/hugetlb.c | |
parent | 39dde65c9940c97fcd178a3d2b1c57ed8b7b68aa (diff) |
[PATCH] htlb forget rss with pt sharing
Imprecise RSS accounting is an irritating ill effect with pt sharing. After
consulted with several VM experts, I have tried various methods to solve that
problem: (1) iterate through all mm_structs that share the PT and increment
count; (2) keep RSS count in page table structure and then sum them up at
reporting time. None of the above methods yield any satisfactory
implementation.
Since process RSS accounting is pure information only, I propose we don't
count them at all for hugetlb page. rlimit has such field, though there is
absolutely no enforcement on limiting that resource. One other method is to
account all RSS at hugetlb mmap time regardless they are faulted or not. I
opt for the simplicity of no accounting at all.
Hugetlb page are special, they are reserved up front in global reservation
pool and is not reclaimable. From physical memory resource point of view, it
is already consumed regardless whether there are users using them.
If the concern is that RSS can be used to control resource allocation, we
already can specify hugetlb fs size limit and sysadmin can enforce that at
mount time. Combined with the two points mentioned above, I fail to see if
there is anything got affected because of this patch.
Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Acked-by: Hugh Dickins <hugh@veritas.com>
Cc: Dave McCracken <dmccr@us.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 8 |
1 files changed, 0 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9244971b6791..2911a364481e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -344,7 +344,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
344 | entry = *src_pte; | 344 | entry = *src_pte; |
345 | ptepage = pte_page(entry); | 345 | ptepage = pte_page(entry); |
346 | get_page(ptepage); | 346 | get_page(ptepage); |
347 | add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); | ||
348 | set_huge_pte_at(dst, addr, dst_pte, entry); | 347 | set_huge_pte_at(dst, addr, dst_pte, entry); |
349 | } | 348 | } |
350 | spin_unlock(&src->page_table_lock); | 349 | spin_unlock(&src->page_table_lock); |
@@ -377,10 +376,6 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
377 | BUG_ON(end & ~HPAGE_MASK); | 376 | BUG_ON(end & ~HPAGE_MASK); |
378 | 377 | ||
379 | spin_lock(&mm->page_table_lock); | 378 | spin_lock(&mm->page_table_lock); |
380 | |||
381 | /* Update high watermark before we lower rss */ | ||
382 | update_hiwater_rss(mm); | ||
383 | |||
384 | for (address = start; address < end; address += HPAGE_SIZE) { | 379 | for (address = start; address < end; address += HPAGE_SIZE) { |
385 | ptep = huge_pte_offset(mm, address); | 380 | ptep = huge_pte_offset(mm, address); |
386 | if (!ptep) | 381 | if (!ptep) |
@@ -395,9 +390,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
395 | 390 | ||
396 | page = pte_page(pte); | 391 | page = pte_page(pte); |
397 | list_add(&page->lru, &page_list); | 392 | list_add(&page->lru, &page_list); |
398 | add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); | ||
399 | } | 393 | } |
400 | |||
401 | spin_unlock(&mm->page_table_lock); | 394 | spin_unlock(&mm->page_table_lock); |
402 | flush_tlb_range(vma, start, end); | 395 | flush_tlb_range(vma, start, end); |
403 | list_for_each_entry_safe(page, tmp, &page_list, lru) { | 396 | list_for_each_entry_safe(page, tmp, &page_list, lru) { |
@@ -523,7 +516,6 @@ retry: | |||
523 | if (!pte_none(*ptep)) | 516 | if (!pte_none(*ptep)) |
524 | goto backout; | 517 | goto backout; |
525 | 518 | ||
526 | add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); | ||
527 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 519 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) |
528 | && (vma->vm_flags & VM_SHARED))); | 520 | && (vma->vm_flags & VM_SHARED))); |
529 | set_huge_pte_at(mm, address, ptep, new_pte); | 521 | set_huge_pte_at(mm, address, ptep, new_pte); |