aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-01-21 17:33:18 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-25 03:10:42 -0500
commit15aa368255f249df0b2af630c9487bb5471bd7da (patch)
treef78e19005bb5f270f1de1bf3186bfc017c9ff170
parentec65993443736a5091b68e80ff1734548944a4b8 (diff)
x86/mm: Clean up inconsistencies when flushing TLB ranges
NR_TLB_LOCAL_FLUSH_ALL is not always accounted for correctly and the comparison with total_vm is done before taking tlb_flushall_shift into account. Clean it up. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Davidlohr Bueso <davidlohr@hp.com> Reviewed-by: Alex Shi <alex.shi@linaro.org> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Hugh Dickins <hughd@google.com> Link: http://lkml.kernel.org/n/tip-Iz5gcahrgskIldvukulzi0hh@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/tlb.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 05446c1cccfe..5176526ddd59 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -189,6 +189,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
189{ 189{
190 unsigned long addr; 190 unsigned long addr;
191 unsigned act_entries, tlb_entries = 0; 191 unsigned act_entries, tlb_entries = 0;
192 unsigned long nr_base_pages;
192 193
193 preempt_disable(); 194 preempt_disable();
194 if (current->active_mm != mm) 195 if (current->active_mm != mm)
@@ -210,18 +211,17 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
210 tlb_entries = tlb_lli_4k[ENTRIES]; 211 tlb_entries = tlb_lli_4k[ENTRIES];
211 else 212 else
212 tlb_entries = tlb_lld_4k[ENTRIES]; 213 tlb_entries = tlb_lld_4k[ENTRIES];
214
213 /* Assume all of TLB entries was occupied by this task */ 215 /* Assume all of TLB entries was occupied by this task */
214 act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; 216 act_entries = tlb_entries >> tlb_flushall_shift;
217 act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
218 nr_base_pages = (end - start) >> PAGE_SHIFT;
215 219
216 /* tlb_flushall_shift is on balance point, details in commit log */ 220 /* tlb_flushall_shift is on balance point, details in commit log */
217 if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { 221 if (nr_base_pages > act_entries || has_large_page(mm, start, end)) {
218 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 222 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
219 local_flush_tlb(); 223 local_flush_tlb();
220 } else { 224 } else {
221 if (has_large_page(mm, start, end)) {
222 local_flush_tlb();
223 goto flush_all;
224 }
225 /* flush range by one by one 'invlpg' */ 225 /* flush range by one by one 'invlpg' */
226 for (addr = start; addr < end; addr += PAGE_SIZE) { 226 for (addr = start; addr < end; addr += PAGE_SIZE) {
227 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 227 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);