aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <andrea@suse.de>2005-10-29 21:16:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:43 -0400
commit1a44e149084d772a1bcf4cdbdde8a013a8a1cfde (patch)
treeb3f682ce8df89edb9740fdd5c178df5accc49736 /mm/memory.c
parent2e9b367c2273ed21c9852a04d90944d472c4f3e6 (diff)
[PATCH] .text page fault SMP scalability optimization
We had a problem on ppc64 where with more than 4 threads a large system wouldn't scale well while faulting in the .text (most of the time was spent in the kernel despite it was an userland compute intensive app). The reason is the useless overwrite of the same pte from all cpu. I fixed it this way (verified on an older kernel but the forward port is almost identical). This will benefit all archs not just ppc64. Signed-off-by: Andrea Arcangeli <andrea@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d68421dd64ef..0f60baf6f69b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1980,9 +1980,10 @@ static inline int handle_pte_fault(struct mm_struct *mm,
1980 pte_t *pte, pmd_t *pmd, int write_access) 1980 pte_t *pte, pmd_t *pmd, int write_access)
1981{ 1981{
1982 pte_t entry; 1982 pte_t entry;
1983 pte_t old_entry;
1983 spinlock_t *ptl; 1984 spinlock_t *ptl;
1984 1985
1985 entry = *pte; 1986 old_entry = entry = *pte;
1986 if (!pte_present(entry)) { 1987 if (!pte_present(entry)) {
1987 if (pte_none(entry)) { 1988 if (pte_none(entry)) {
1988 if (!vma->vm_ops || !vma->vm_ops->nopage) 1989 if (!vma->vm_ops || !vma->vm_ops->nopage)
@@ -2009,9 +2010,20 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2009 entry = pte_mkdirty(entry); 2010 entry = pte_mkdirty(entry);
2010 } 2011 }
2011 entry = pte_mkyoung(entry); 2012 entry = pte_mkyoung(entry);
2012 ptep_set_access_flags(vma, address, pte, entry, write_access); 2013 if (!pte_same(old_entry, entry)) {
2013 update_mmu_cache(vma, address, entry); 2014 ptep_set_access_flags(vma, address, pte, entry, write_access);
2014 lazy_mmu_prot_update(entry); 2015 update_mmu_cache(vma, address, entry);
2016 lazy_mmu_prot_update(entry);
2017 } else {
2018 /*
2019 * This is needed only for protection faults but the arch code
2020 * is not yet telling us if this is a protection fault or not.
2021 * This still avoids useless tlb flushes for .text page faults
2022 * with threads.
2023 */
2024 if (write_access)
2025 flush_tlb_page(vma, address);
2026 }
2015unlock: 2027unlock:
2016 pte_unmap_unlock(pte, ptl); 2028 pte_unmap_unlock(pte, ptl);
2017 return VM_FAULT_MINOR; 2029 return VM_FAULT_MINOR;