aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorDave Hansen <dave@sr71.net>2014-07-31 11:40:59 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-07-31 11:48:51 -0400
commitd17d8f9dedb9dd76fd540a5c497101529d9eb25a (patch)
tree9419fa5866856e22eb1fc26202085904556f346c /arch/x86/mm
parenta23421f111bb256cdbf23dcfe15f35567ab88e98 (diff)
x86/mm: Add tracepoints for TLB flushes
We don't have any good way to figure out what kinds of flushes are being attempted. Right now, we can try to use the vm counters, but those only tell us what we actually did with the hardware (one-by-one vs full) and don't tell us what was actually _requested_. This allows us to select out "interesting" TLB flushes that we might want to optimize (like the ranged ones) and ignore the ones that we have very little control over (the ones at context switch). Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: http://lkml.kernel.org/r/20140731154059.4C96CBA5@viggo.jf.intel.com Acked-by: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init.c7
-rw-r--r--arch/x86/mm/tlb.c11
2 files changed, 16 insertions, 2 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f97130618113..66dba36f2343 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -18,6 +18,13 @@
18#include <asm/dma.h> /* for MAX_DMA_PFN */ 18#include <asm/dma.h> /* for MAX_DMA_PFN */
19#include <asm/microcode.h> 19#include <asm/microcode.h>
20 20
21/*
22 * We need to define the tracepoints somewhere, and tlb.c
23 * is only compied when SMP=y.
24 */
25#define CREATE_TRACE_POINTS
26#include <trace/events/tlb.h>
27
21#include "mm_internal.h" 28#include "mm_internal.h"
22 29
23static unsigned long __initdata pgt_buf_start; 30static unsigned long __initdata pgt_buf_start;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index add5a0fc3c5f..6f00ecb9feeb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -49,6 +49,7 @@ void leave_mm(int cpu)
49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { 49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); 50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir); 51 load_cr3(swapper_pg_dir);
52 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
52 } 53 }
53} 54}
54EXPORT_SYMBOL_GPL(leave_mm); 55EXPORT_SYMBOL_GPL(leave_mm);
@@ -107,15 +108,19 @@ static void flush_tlb_func(void *info)
107 108
108 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 109 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
109 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 110 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
110 if (f->flush_end == TLB_FLUSH_ALL) 111 if (f->flush_end == TLB_FLUSH_ALL) {
111 local_flush_tlb(); 112 local_flush_tlb();
112 else { 113 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
114 } else {
113 unsigned long addr; 115 unsigned long addr;
116 unsigned long nr_pages =
117 f->flush_end - f->flush_start / PAGE_SIZE;
114 addr = f->flush_start; 118 addr = f->flush_start;
115 while (addr < f->flush_end) { 119 while (addr < f->flush_end) {
116 __flush_tlb_single(addr); 120 __flush_tlb_single(addr);
117 addr += PAGE_SIZE; 121 addr += PAGE_SIZE;
118 } 122 }
123 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
119 } 124 }
120 } else 125 } else
121 leave_mm(smp_processor_id()); 126 leave_mm(smp_processor_id());
@@ -153,6 +158,7 @@ void flush_tlb_current_task(void)
153 158
154 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 159 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
155 local_flush_tlb(); 160 local_flush_tlb();
161 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
156 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 162 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
157 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 163 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
158 preempt_enable(); 164 preempt_enable();
@@ -191,6 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
191 __flush_tlb_single(addr); 197 __flush_tlb_single(addr);
192 } 198 }
193 } 199 }
200 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
194out: 201out:
195 if (base_pages_to_flush == TLB_FLUSH_ALL) { 202 if (base_pages_to_flush == TLB_FLUSH_ALL) {
196 start = 0UL; 203 start = 0UL;