diff options
author | ztong <ztong@cs.unc.edu> | 2021-02-03 18:15:37 -0500 |
---|---|---|
committer | ztong <ztong@cs.unc.edu> | 2021-02-03 18:15:37 -0500 |
commit | ffcea58b914c7febd2d3126552dee41216d3a203 (patch) | |
tree | 2e63e893807fb908cfd398c9a63d0412b019ea12 /arch/x86 | |
parent | 8b3b8657637b5a03b2f12f260516be964e6fc228 (diff) |
Added KUTrace Support
Moved sched_litmus back to the top scheduling class
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 7 | ||||
-rw-r--r-- | arch/x86/entry/common.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/apic/ipi.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/irq_work.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 2 |
10 files changed, 117 insertions, 0 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3765164809c5..aa63a2cafc1f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -32,6 +32,13 @@ config X86_64 | |||
32 | select SWIOTLB | 32 | select SWIOTLB |
33 | select ARCH_HAS_SYSCALL_WRAPPER | 33 | select ARCH_HAS_SYSCALL_WRAPPER |
34 | 34 | ||
35 | config KUTRACE | ||
36 | bool "kernel/user tracing, dsites" | ||
37 | depends on 64BIT | ||
38 | def_bool y | ||
39 | ---help--- | ||
40 | Enables kernel/user tracing patches | ||
41 | |||
35 | config FORCE_DYNAMIC_FTRACE | 42 | config FORCE_DYNAMIC_FTRACE |
36 | def_bool y | 43 | def_bool y |
37 | depends on X86_32 | 44 | depends on X86_32 |
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3f8e22615812..24f30bb65ac4 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #define CREATE_TRACE_POINTS | 37 | #define CREATE_TRACE_POINTS |
38 | #include <trace/events/syscalls.h> | 38 | #include <trace/events/syscalls.h> |
39 | 39 | ||
40 | #include <linux/kutrace.h> | ||
41 | |||
40 | #ifdef CONFIG_CONTEXT_TRACKING | 42 | #ifdef CONFIG_CONTEXT_TRACKING |
41 | /* Called on entry from user mode with IRQs off. */ | 43 | /* Called on entry from user mode with IRQs off. */ |
42 | __visible inline void enter_from_user_mode(void) | 44 | __visible inline void enter_from_user_mode(void) |
@@ -287,7 +289,29 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) | |||
287 | 289 | ||
288 | if (likely(nr < NR_syscalls)) { | 290 | if (likely(nr < NR_syscalls)) { |
289 | nr = array_index_nospec(nr, NR_syscalls); | 291 | nr = array_index_nospec(nr, NR_syscalls); |
292 | /* dsites 2019.03.05 track all syscalls and normal returns */ | ||
293 | /* Pass in low 16 bits of call arg0 and return value */ | ||
294 | kutrace1(KUTRACE_SYSCALL64 | kutrace_map_nr(nr), regs->di & 0xFFFFul); | ||
295 | |||
290 | regs->ax = sys_call_table[nr](regs); | 296 | regs->ax = sys_call_table[nr](regs); |
297 | |||
298 | /* dsites 2019.03.05 track all syscalls and normal returns */ | ||
299 | /* Pass in low 16 bits of return value */ | ||
300 | kutrace1(KUTRACE_SYSRET64 | kutrace_map_nr(nr), regs->ax & 0xFFFFul); | ||
301 | #ifdef CONFIG_KUTRACE | ||
302 | } else if ((nr == __NR_kutrace_control) && | ||
303 | (kutrace_global_ops.kutrace_trace_control != NULL)) { | ||
304 | BUILD_BUG_ON_MSG(NR_syscalls > __NR_kutrace_control, | ||
305 | "__NR_kutrace_control is too small"); | ||
306 | BUILD_BUG_ON_MSG(16 > TASK_COMM_LEN, | ||
307 | "TASK_COMM_LEN is less than 16"); | ||
308 | |||
309 | /* Calling kutrace_control(u64 command, u64 arg) */ | ||
310 | /* see arch/x86/calling.h: */ | ||
311 | /* syscall arg0 in rdi (command), arg1 in rsi (arg) */ | ||
312 | regs->ax = (*kutrace_global_ops.kutrace_trace_control)( | ||
313 | regs->di, regs->si); | ||
314 | #endif | ||
291 | #ifdef CONFIG_X86_X32_ABI | 315 | #ifdef CONFIG_X86_X32_ABI |
292 | } else if (likely((nr & __X32_SYSCALL_BIT) && | 316 | } else if (likely((nr & __X32_SYSCALL_BIT) && |
293 | (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) { | 317 | (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) { |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index caf2edccbad2..503712c45066 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <asm/mwait.h> | 16 | #include <asm/mwait.h> |
17 | #include <asm/special_insns.h> | 17 | #include <asm/special_insns.h> |
18 | 18 | ||
19 | #include <linux/kutrace.h> | ||
20 | |||
19 | /* | 21 | /* |
20 | * Initialize bm_flags based on the CPU cache properties | 22 | * Initialize bm_flags based on the CPU cache properties |
21 | * On SMP it depends on cache configuration | 23 | * On SMP it depends on cache configuration |
@@ -186,6 +188,9 @@ void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) | |||
186 | struct cstate_entry *percpu_entry; | 188 | struct cstate_entry *percpu_entry; |
187 | 189 | ||
188 | percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); | 190 | percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); |
191 | |||
192 | kutrace1(KUTRACE_MWAIT, percpu_entry->states[cx->index].eax); | ||
193 | |||
189 | mwait_idle_with_hints(percpu_entry->states[cx->index].eax, | 194 | mwait_idle_with_hints(percpu_entry->states[cx->index].eax, |
190 | percpu_entry->states[cx->index].ecx); | 195 | percpu_entry->states[cx->index].ecx); |
191 | } | 196 | } |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2b0faf86da1b..4d6a2a7569db 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -60,6 +60,9 @@ | |||
60 | #include <asm/intel-family.h> | 60 | #include <asm/intel-family.h> |
61 | #include <asm/irq_regs.h> | 61 | #include <asm/irq_regs.h> |
62 | 62 | ||
63 | #include <linux/kutrace.h> | ||
64 | #include <linux/cpufreq.h> | ||
65 | |||
63 | unsigned int num_processors; | 66 | unsigned int num_processors; |
64 | 67 | ||
65 | unsigned disabled_cpus; | 68 | unsigned disabled_cpus; |
@@ -1131,9 +1134,28 @@ __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) | |||
1131 | * interrupt lock, which is the WrongThing (tm) to do. | 1134 | * interrupt lock, which is the WrongThing (tm) to do. |
1132 | */ | 1135 | */ |
1133 | entering_ack_irq(); | 1136 | entering_ack_irq(); |
1137 | |||
1138 | kutrace1(KUTRACE_IRQ + LOCAL_TIMER_VECTOR, 0); | ||
1139 | |||
1134 | trace_local_timer_entry(LOCAL_TIMER_VECTOR); | 1140 | trace_local_timer_entry(LOCAL_TIMER_VECTOR); |
1135 | local_apic_timer_interrupt(); | 1141 | local_apic_timer_interrupt(); |
1136 | trace_local_timer_exit(LOCAL_TIMER_VECTOR); | 1142 | trace_local_timer_exit(LOCAL_TIMER_VECTOR); |
1143 | |||
1144 | kutrace1(KUTRACE_IRQRET + LOCAL_TIMER_VECTOR, 0); | ||
1145 | |||
1146 | /* dsites 2020.05.02 Trace current CPU frequency in MHz */ | ||
1147 | /* NOT FINISHED. Recent Intel only. Not AMD. Not ARM */ | ||
1148 | //{ | ||
1149 | // u64 mhz; | ||
1150 | // kutrace_freq(mhz); | ||
1151 | // kutrace1(KUTRACE_PSTATE, mhz); | ||
1152 | //} | ||
1153 | |||
1154 | /* dsites 2020.01.29 Trace return address -- we are also a profiler now */ | ||
1155 | if (kutrace_tracing) { | ||
1156 | (*kutrace_global_ops.kutrace_trace_2)(KUTRACE_PC, 0, regs->ip); | ||
1157 | } | ||
1158 | |||
1137 | exiting_irq(); | 1159 | exiting_irq(); |
1138 | 1160 | ||
1139 | set_irq_regs(old_regs); | 1161 | set_irq_regs(old_regs); |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 6ca0f91372fd..b8fac56af4d8 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -5,6 +5,8 @@ | |||
5 | 5 | ||
6 | #include "local.h" | 6 | #include "local.h" |
7 | 7 | ||
8 | #include <linux/kutrace.h> | ||
9 | |||
8 | DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); | 10 | DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); |
9 | 11 | ||
10 | #ifdef CONFIG_SMP | 12 | #ifdef CONFIG_SMP |
@@ -67,16 +69,25 @@ void native_smp_send_reschedule(int cpu) | |||
67 | WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); | 69 | WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); |
68 | return; | 70 | return; |
69 | } | 71 | } |
72 | |||
73 | kutrace1(KUTRACE_IPI, cpu); | ||
74 | |||
70 | apic->send_IPI(cpu, RESCHEDULE_VECTOR); | 75 | apic->send_IPI(cpu, RESCHEDULE_VECTOR); |
71 | } | 76 | } |
72 | 77 | ||
73 | void native_send_call_func_single_ipi(int cpu) | 78 | void native_send_call_func_single_ipi(int cpu) |
74 | { | 79 | { |
80 | kutrace1(KUTRACE_IPI, cpu); | ||
81 | |||
75 | apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); | 82 | apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); |
76 | } | 83 | } |
77 | 84 | ||
78 | void native_send_call_func_ipi(const struct cpumask *mask) | 85 | void native_send_call_func_ipi(const struct cpumask *mask) |
79 | { | 86 | { |
87 | /* dsites 2019.03.06 */ | ||
88 | /* Use CPU 0 as a placeholder to indicate when mask was sent */ | ||
89 | kutrace1(KUTRACE_IPI, 0); | ||
90 | |||
80 | if (static_branch_likely(&apic_use_ipi_shorthand)) { | 91 | if (static_branch_likely(&apic_use_ipi_shorthand)) { |
81 | unsigned int cpu = smp_processor_id(); | 92 | unsigned int cpu = smp_processor_id(); |
82 | 93 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 21efee32e2b1..e643db0fb67e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #define CREATE_TRACE_POINTS | 23 | #define CREATE_TRACE_POINTS |
24 | #include <asm/trace/irq_vectors.h> | 24 | #include <asm/trace/irq_vectors.h> |
25 | 25 | ||
26 | #include <linux/kutrace.h> | ||
27 | |||
26 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | 28 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
27 | EXPORT_PER_CPU_SYMBOL(irq_stat); | 29 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
28 | 30 | ||
@@ -239,6 +241,8 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
239 | 241 | ||
240 | entering_irq(); | 242 | entering_irq(); |
241 | 243 | ||
244 | kutrace1(KUTRACE_IRQ + (vector & 0xFF), 0); | ||
245 | |||
242 | /* entering_irq() tells RCU that we're not quiescent. Check it. */ | 246 | /* entering_irq() tells RCU that we're not quiescent. Check it. */ |
243 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); | 247 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); |
244 | 248 | ||
@@ -260,6 +264,8 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
260 | } | 264 | } |
261 | } | 265 | } |
262 | 266 | ||
267 | kutrace1(KUTRACE_IRQRET + (vector & 0xFF), 0); | ||
268 | |||
263 | exiting_irq(); | 269 | exiting_irq(); |
264 | 270 | ||
265 | set_irq_regs(old_regs); | 271 | set_irq_regs(old_regs); |
@@ -277,11 +283,17 @@ __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs) | |||
277 | struct pt_regs *old_regs = set_irq_regs(regs); | 283 | struct pt_regs *old_regs = set_irq_regs(regs); |
278 | 284 | ||
279 | entering_ack_irq(); | 285 | entering_ack_irq(); |
286 | |||
287 | kutrace1(KUTRACE_IRQ + X86_PLATFORM_IPI_VECTOR, 0); | ||
288 | |||
280 | trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); | 289 | trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); |
281 | inc_irq_stat(x86_platform_ipis); | 290 | inc_irq_stat(x86_platform_ipis); |
282 | if (x86_platform_ipi_callback) | 291 | if (x86_platform_ipi_callback) |
283 | x86_platform_ipi_callback(); | 292 | x86_platform_ipi_callback(); |
284 | trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); | 293 | trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); |
294 | |||
295 | kutrace1(KUTRACE_IRQRET + X86_PLATFORM_IPI_VECTOR, 0); | ||
296 | |||
285 | exiting_irq(); | 297 | exiting_irq(); |
286 | set_irq_regs(old_regs); | 298 | set_irq_regs(old_regs); |
287 | } | 299 | } |
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index 80bee7695a20..964610b2d6dc 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c | |||
@@ -12,14 +12,22 @@ | |||
12 | #include <asm/trace/irq_vectors.h> | 12 | #include <asm/trace/irq_vectors.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | 14 | ||
15 | #include <linux/kutrace.h> | ||
16 | |||
15 | #ifdef CONFIG_X86_LOCAL_APIC | 17 | #ifdef CONFIG_X86_LOCAL_APIC |
16 | __visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs) | 18 | __visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs) |
17 | { | 19 | { |
18 | ipi_entering_ack_irq(); | 20 | ipi_entering_ack_irq(); |
21 | |||
22 | kutrace1(KUTRACE_IRQ + IRQ_WORK_VECTOR, 0); | ||
23 | |||
19 | trace_irq_work_entry(IRQ_WORK_VECTOR); | 24 | trace_irq_work_entry(IRQ_WORK_VECTOR); |
20 | inc_irq_stat(apic_irq_work_irqs); | 25 | inc_irq_stat(apic_irq_work_irqs); |
21 | irq_work_run(); | 26 | irq_work_run(); |
22 | trace_irq_work_exit(IRQ_WORK_VECTOR); | 27 | trace_irq_work_exit(IRQ_WORK_VECTOR); |
28 | |||
29 | kutrace1(KUTRACE_IRQRET + IRQ_WORK_VECTOR, 0); | ||
30 | |||
23 | exiting_irq(); | 31 | exiting_irq(); |
24 | } | 32 | } |
25 | 33 | ||
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index b8d4e9c3c070..d9d8fed828e4 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <asm/kexec.h> | 33 | #include <asm/kexec.h> |
34 | #include <asm/virtext.h> | 34 | #include <asm/virtext.h> |
35 | 35 | ||
36 | #include <linux/kutrace.h> | ||
37 | |||
36 | /* | 38 | /* |
37 | * Some notes on x86 processor bugs affecting SMP operation: | 39 | * Some notes on x86 processor bugs affecting SMP operation: |
38 | * | 40 | * |
@@ -226,6 +228,9 @@ static void native_stop_other_cpus(int wait) | |||
226 | __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) | 228 | __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) |
227 | { | 229 | { |
228 | ack_APIC_irq(); | 230 | ack_APIC_irq(); |
231 | |||
232 | kutrace1(KUTRACE_IRQ + RESCHEDULE_VECTOR, 0); | ||
233 | |||
229 | inc_irq_stat(irq_resched_count); | 234 | inc_irq_stat(irq_resched_count); |
230 | kvm_set_cpu_l1tf_flush_l1d(); | 235 | kvm_set_cpu_l1tf_flush_l1d(); |
231 | 236 | ||
@@ -242,25 +247,38 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) | |||
242 | return; | 247 | return; |
243 | } | 248 | } |
244 | scheduler_ipi(); | 249 | scheduler_ipi(); |
250 | /* dsites 2019.03.06 continued in sched/core.c */ | ||
245 | } | 251 | } |
246 | 252 | ||
247 | __visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs) | 253 | __visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs) |
248 | { | 254 | { |
249 | ipi_entering_ack_irq(); | 255 | ipi_entering_ack_irq(); |
256 | |||
257 | kutrace1(KUTRACE_IRQ + CALL_FUNCTION_VECTOR, 0); | ||
258 | |||
250 | trace_call_function_entry(CALL_FUNCTION_VECTOR); | 259 | trace_call_function_entry(CALL_FUNCTION_VECTOR); |
251 | inc_irq_stat(irq_call_count); | 260 | inc_irq_stat(irq_call_count); |
252 | generic_smp_call_function_interrupt(); | 261 | generic_smp_call_function_interrupt(); |
253 | trace_call_function_exit(CALL_FUNCTION_VECTOR); | 262 | trace_call_function_exit(CALL_FUNCTION_VECTOR); |
263 | |||
264 | kutrace1(KUTRACE_IRQRET + CALL_FUNCTION_VECTOR, 0); | ||
265 | |||
254 | exiting_irq(); | 266 | exiting_irq(); |
255 | } | 267 | } |
256 | 268 | ||
257 | __visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r) | 269 | __visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r) |
258 | { | 270 | { |
259 | ipi_entering_ack_irq(); | 271 | ipi_entering_ack_irq(); |
272 | |||
273 | kutrace1(KUTRACE_IRQ + CALL_FUNCTION_SINGLE_VECTOR, 0); | ||
274 | |||
260 | trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); | 275 | trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); |
261 | inc_irq_stat(irq_call_count); | 276 | inc_irq_stat(irq_call_count); |
262 | generic_smp_call_function_single_interrupt(); | 277 | generic_smp_call_function_single_interrupt(); |
263 | trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); | 278 | trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); |
279 | |||
280 | kutrace1(KUTRACE_IRQRET + CALL_FUNCTION_SINGLE_VECTOR, 0); | ||
281 | |||
264 | exiting_irq(); | 282 | exiting_irq(); |
265 | } | 283 | } |
266 | 284 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ceacd1156db..6bc4011901a0 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #define CREATE_TRACE_POINTS | 33 | #define CREATE_TRACE_POINTS |
34 | #include <asm/trace/exceptions.h> | 34 | #include <asm/trace/exceptions.h> |
35 | 35 | ||
36 | #include <linux/kutrace.h> | ||
37 | |||
36 | /* | 38 | /* |
37 | * Returns 0 if mmiotrace is disabled, or if the fault is not | 39 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
38 | * handled by mmiotrace: | 40 | * handled by mmiotrace: |
@@ -1527,7 +1529,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long addr | |||
1527 | 1529 | ||
1528 | prev_state = exception_enter(); | 1530 | prev_state = exception_enter(); |
1529 | trace_page_fault_entries(regs, error_code, address); | 1531 | trace_page_fault_entries(regs, error_code, address); |
1532 | |||
1533 | kutrace1(KUTRACE_TRAP + KUTRACE_PAGEFAULT, 0); | ||
1534 | |||
1530 | __do_page_fault(regs, error_code, address); | 1535 | __do_page_fault(regs, error_code, address); |
1536 | |||
1537 | kutrace1(KUTRACE_TRAPRET + KUTRACE_PAGEFAULT, 0); | ||
1538 | |||
1531 | exception_exit(prev_state); | 1539 | exception_exit(prev_state); |
1532 | } | 1540 | } |
1533 | NOKPROBE_SYMBOL(do_page_fault); | 1541 | NOKPROBE_SYMBOL(do_page_fault); |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index e6a9edc5baaf..36fac9e7b37f 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -18,6 +18,8 @@ | |||
18 | 18 | ||
19 | #include "mm_internal.h" | 19 | #include "mm_internal.h" |
20 | 20 | ||
21 | #include <linux/kutrace.h> | ||
22 | |||
21 | /* | 23 | /* |
22 | * TLB flushing, formerly SMP-only | 24 | * TLB flushing, formerly SMP-only |
23 | * c/o Linus Torvalds. | 25 | * c/o Linus Torvalds. |