aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:14 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 06:40:44 -0400
commit25cbbef1924299249756bc4030fcb2436c019813 (patch)
tree9369e1194bf3ddc91a5298b04fe590845851565b /mm
parent4591ce4f2d22dc9de7a6719161ce409b5fd1caac (diff)
mm: numa: Trap pmd hinting faults only if we would otherwise trap PTE faults
Base page PMD faulting is meant to batch handle NUMA hinting faults from PTEs. However, even is no PTE faults would ever be handled within a range the kernel still traps PMD hinting faults. This patch avoids the overhead. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-37-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mprotect.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index f0b087d1069c..5aae39017d6d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -146,6 +146,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
146 146
147 pmd = pmd_offset(pud, addr); 147 pmd = pmd_offset(pud, addr);
148 do { 148 do {
149 unsigned long this_pages;
150
149 next = pmd_addr_end(addr, end); 151 next = pmd_addr_end(addr, end);
150 if (pmd_trans_huge(*pmd)) { 152 if (pmd_trans_huge(*pmd)) {
151 if (next - addr != HPAGE_PMD_SIZE) 153 if (next - addr != HPAGE_PMD_SIZE)
@@ -165,8 +167,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
165 } 167 }
166 if (pmd_none_or_clear_bad(pmd)) 168 if (pmd_none_or_clear_bad(pmd))
167 continue; 169 continue;
168 pages += change_pte_range(vma, pmd, addr, next, newprot, 170 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
169 dirty_accountable, prot_numa, &all_same_nidpid); 171 dirty_accountable, prot_numa, &all_same_nidpid);
172 pages += this_pages;
170 173
171 /* 174 /*
172 * If we are changing protections for NUMA hinting faults then 175 * If we are changing protections for NUMA hinting faults then
@@ -174,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
174 * node. This allows a regular PMD to be handled as one fault 177 * node. This allows a regular PMD to be handled as one fault
175 * and effectively batches the taking of the PTL 178 * and effectively batches the taking of the PTL
176 */ 179 */
177 if (prot_numa && all_same_nidpid) 180 if (prot_numa && this_pages && all_same_nidpid)
178 change_pmd_protnuma(vma->vm_mm, addr, pmd); 181 change_pmd_protnuma(vma->vm_mm, addr, pmd);
179 } while (pmd++, addr = next, addr != end); 182 } while (pmd++, addr = next, addr != end);
180 183