aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-01-21 17:33:16 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-25 03:10:41 -0500
commitec65993443736a5091b68e80ff1734548944a4b8 (patch)
treea2f9e960f8b9113012118de382ffcda741a1feaf /arch/x86
parent2993ae3305ad10b41e0d0bc2662f7754ee8e30fa (diff)
mm, x86: Account for TLB flushes only when debugging
Bisection between 3.11 and 3.12 fingered commit 9824cf97 ("mm: vmstats: tlb flush counters") to cause overhead problems. The counters are undeniably useful but how often do we really need to debug TLB flush related issues? It does not justify taking the penalty everywhere so make it a debugging option. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Davidlohr Bueso <davidlohr@hp.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Hugh Dickins <hughd@google.com> Cc: Alex Shi <alex.shi@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-XzxjntugxuwpxXhcrxqqh53b@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/mm/tlb.c14
3 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e6d90babc245..04905bfc508b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
62 62
63static inline void __flush_tlb_one(unsigned long addr) 63static inline void __flush_tlb_one(unsigned long addr)
64{ 64{
65 count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); 65 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
66 __flush_tlb_single(addr); 66 __flush_tlb_single(addr);
67} 67}
68 68
@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
93 */ 93 */
94static inline void __flush_tlb_up(void) 94static inline void __flush_tlb_up(void)
95{ 95{
96 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 96 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
97 __flush_tlb(); 97 __flush_tlb();
98} 98}
99 99
100static inline void flush_tlb_all(void) 100static inline void flush_tlb_all(void)
101{ 101{
102 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 102 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
103 __flush_tlb_all(); 103 __flush_tlb_all();
104} 104}
105 105
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ce2d0a2c3e4f..0e25a1bc5ab5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
683 } 683 }
684 684
685 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 685 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
686 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 686 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
687 __flush_tlb(); 687 __flush_tlb();
688 688
689 /* Save MTRR state */ 689 /* Save MTRR state */
@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
697static void post_set(void) __releases(set_atomicity_lock) 697static void post_set(void) __releases(set_atomicity_lock)
698{ 698{
699 /* Flush TLBs (no need to flush caches - they are disabled) */ 699 /* Flush TLBs (no need to flush caches - they are disabled) */
700 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 700 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
701 __flush_tlb(); 701 __flush_tlb();
702 702
703 /* Intel (P6) standard MTRRs */ 703 /* Intel (P6) standard MTRRs */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ae699b3bbac8..05446c1cccfe 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
103 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 103 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
104 return; 104 return;
105 105
106 count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 106 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
107 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 107 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
108 if (f->flush_end == TLB_FLUSH_ALL) 108 if (f->flush_end == TLB_FLUSH_ALL)
109 local_flush_tlb(); 109 local_flush_tlb();
@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
131 info.flush_start = start; 131 info.flush_start = start;
132 info.flush_end = end; 132 info.flush_end = end;
133 133
134 count_vm_event(NR_TLB_REMOTE_FLUSH); 134 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
135 if (is_uv_system()) { 135 if (is_uv_system()) {
136 unsigned int cpu; 136 unsigned int cpu;
137 137
@@ -151,7 +151,7 @@ void flush_tlb_current_task(void)
151 151
152 preempt_disable(); 152 preempt_disable();
153 153
154 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 154 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
155 local_flush_tlb(); 155 local_flush_tlb();
156 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 156 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
157 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 157 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
@@ -215,7 +215,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
215 215
216 /* tlb_flushall_shift is on balance point, details in commit log */ 216 /* tlb_flushall_shift is on balance point, details in commit log */
217 if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { 217 if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
218 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 218 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
219 local_flush_tlb(); 219 local_flush_tlb();
220 } else { 220 } else {
221 if (has_large_page(mm, start, end)) { 221 if (has_large_page(mm, start, end)) {
@@ -224,7 +224,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
224 } 224 }
225 /* flush range by one by one 'invlpg' */ 225 /* flush range by one by one 'invlpg' */
226 for (addr = start; addr < end; addr += PAGE_SIZE) { 226 for (addr = start; addr < end; addr += PAGE_SIZE) {
227 count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); 227 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
228 __flush_tlb_single(addr); 228 __flush_tlb_single(addr);
229 } 229 }
230 230
@@ -262,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
262 262
263static void do_flush_tlb_all(void *info) 263static void do_flush_tlb_all(void *info)
264{ 264{
265 count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 265 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
266 __flush_tlb_all(); 266 __flush_tlb_all();
267 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 267 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
268 leave_mm(smp_processor_id()); 268 leave_mm(smp_processor_id());
@@ -270,7 +270,7 @@ static void do_flush_tlb_all(void *info)
270 270
271void flush_tlb_all(void) 271void flush_tlb_all(void)
272{ 272{
273 count_vm_event(NR_TLB_REMOTE_FLUSH); 273 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
274 on_each_cpu(do_flush_tlb_all, NULL, 1); 274 on_each_cpu(do_flush_tlb_all, NULL, 1);
275} 275}
276 276