diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 7 | ||||
-rw-r--r-- | arch/x86/Kconfig.debug | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 34 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_32.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 6 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 42 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 5 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 312 | ||||
-rw-r--r-- | arch/x86/kernel/stacktrace.c | 64 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 3 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 8 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 3 |
18 files changed, 484 insertions, 31 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ac22bb7719f7..352f63df1d80 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -29,11 +29,14 @@ config X86 | |||
29 | select HAVE_FTRACE_MCOUNT_RECORD | 29 | select HAVE_FTRACE_MCOUNT_RECORD |
30 | select HAVE_DYNAMIC_FTRACE | 30 | select HAVE_DYNAMIC_FTRACE |
31 | select HAVE_FUNCTION_TRACER | 31 | select HAVE_FUNCTION_TRACER |
32 | select HAVE_FUNCTION_RET_TRACER if X86_32 | ||
33 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
32 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 34 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
33 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 35 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
34 | select HAVE_ARCH_TRACEHOOK | 36 | select HAVE_ARCH_TRACEHOOK |
35 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 37 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
36 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 38 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
39 | select USER_STACKTRACE_SUPPORT | ||
37 | 40 | ||
38 | config ARCH_DEFCONFIG | 41 | config ARCH_DEFCONFIG |
39 | string | 42 | string |
@@ -367,10 +370,10 @@ config X86_RDC321X | |||
367 | as R-8610-(G). | 370 | as R-8610-(G). |
368 | If you don't have one of these chips, you should say N here. | 371 | If you don't have one of these chips, you should say N here. |
369 | 372 | ||
370 | config SCHED_NO_NO_OMIT_FRAME_POINTER | 373 | config SCHED_OMIT_FRAME_POINTER |
371 | def_bool y | 374 | def_bool y |
372 | prompt "Single-depth WCHAN output" | 375 | prompt "Single-depth WCHAN output" |
373 | depends on X86_32 | 376 | depends on X86 |
374 | help | 377 | help |
375 | Calculate simpler /proc/<PID>/wchan values. If this option | 378 | Calculate simpler /proc/<PID>/wchan values. If this option |
376 | is disabled then wchan values will recurse back to the | 379 | is disabled then wchan values will recurse back to the |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 2a3dfbd5e677..fa013f529b74 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -186,14 +186,10 @@ config IOMMU_LEAK | |||
186 | Add a simple leak tracer to the IOMMU code. This is useful when you | 186 | Add a simple leak tracer to the IOMMU code. This is useful when you |
187 | are debugging a buggy device driver that leaks IOMMU mappings. | 187 | are debugging a buggy device driver that leaks IOMMU mappings. |
188 | 188 | ||
189 | config MMIOTRACE_HOOKS | ||
190 | bool | ||
191 | |||
192 | config MMIOTRACE | 189 | config MMIOTRACE |
193 | bool "Memory mapped IO tracing" | 190 | bool "Memory mapped IO tracing" |
194 | depends on DEBUG_KERNEL && PCI | 191 | depends on DEBUG_KERNEL && PCI |
195 | select TRACING | 192 | select TRACING |
196 | select MMIOTRACE_HOOKS | ||
197 | help | 193 | help |
198 | Mmiotrace traces Memory Mapped I/O access and is meant for | 194 | Mmiotrace traces Memory Mapped I/O access and is meant for |
199 | debugging and reverse engineering. It is called from the ioremap | 195 | debugging and reverse engineering. It is called from the ioremap |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9e8bc29b8b17..754a3e082f94 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -17,8 +17,40 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
17 | */ | 17 | */ |
18 | return addr - 1; | 18 | return addr - 1; |
19 | } | 19 | } |
20 | #endif | ||
21 | 20 | ||
21 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
22 | |||
23 | struct dyn_arch_ftrace { | ||
24 | /* No extra data needed for x86 */ | ||
25 | }; | ||
26 | |||
27 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
28 | #endif /* __ASSEMBLY__ */ | ||
22 | #endif /* CONFIG_FUNCTION_TRACER */ | 29 | #endif /* CONFIG_FUNCTION_TRACER */ |
23 | 30 | ||
31 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
32 | |||
33 | #ifndef __ASSEMBLY__ | ||
34 | |||
35 | /* | ||
36 | * Stack of return addresses for functions | ||
37 | * of a thread. | ||
38 | * Used in struct thread_info | ||
39 | */ | ||
40 | struct ftrace_ret_stack { | ||
41 | unsigned long ret; | ||
42 | unsigned long func; | ||
43 | unsigned long long calltime; | ||
44 | }; | ||
45 | |||
46 | /* | ||
47 | * Primary handler of a function return. | ||
48 | * It relays on ftrace_return_to_handler. | ||
49 | * Defined in entry32.S | ||
50 | */ | ||
51 | extern void return_to_handler(void); | ||
52 | |||
53 | #endif /* __ASSEMBLY__ */ | ||
54 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
55 | |||
24 | #endif /* _ASM_X86_FTRACE_H */ | 56 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e44d379faad2..0921b4018c11 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -20,6 +20,8 @@ | |||
20 | struct task_struct; | 20 | struct task_struct; |
21 | struct exec_domain; | 21 | struct exec_domain; |
22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
23 | #include <asm/ftrace.h> | ||
24 | #include <asm/atomic.h> | ||
23 | 25 | ||
24 | struct thread_info { | 26 | struct thread_info { |
25 | struct task_struct *task; /* main task structure */ | 27 | struct task_struct *task; /* main task structure */ |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 35c54921b2e4..99192bb55a53 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -157,6 +157,7 @@ extern int __get_user_bad(void); | |||
157 | int __ret_gu; \ | 157 | int __ret_gu; \ |
158 | unsigned long __val_gu; \ | 158 | unsigned long __val_gu; \ |
159 | __chk_user_ptr(ptr); \ | 159 | __chk_user_ptr(ptr); \ |
160 | might_fault(); \ | ||
160 | switch (sizeof(*(ptr))) { \ | 161 | switch (sizeof(*(ptr))) { \ |
161 | case 1: \ | 162 | case 1: \ |
162 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ | 163 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
@@ -241,6 +242,7 @@ extern void __put_user_8(void); | |||
241 | int __ret_pu; \ | 242 | int __ret_pu; \ |
242 | __typeof__(*(ptr)) __pu_val; \ | 243 | __typeof__(*(ptr)) __pu_val; \ |
243 | __chk_user_ptr(ptr); \ | 244 | __chk_user_ptr(ptr); \ |
245 | might_fault(); \ | ||
244 | __pu_val = x; \ | 246 | __pu_val = x; \ |
245 | switch (sizeof(*(ptr))) { \ | 247 | switch (sizeof(*(ptr))) { \ |
246 | case 1: \ | 248 | case 1: \ |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index d095a3aeea1b..5e06259e90e5 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -82,8 +82,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
82 | static __always_inline unsigned long __must_check | 82 | static __always_inline unsigned long __must_check |
83 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 83 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
84 | { | 84 | { |
85 | might_sleep(); | 85 | might_fault(); |
86 | return __copy_to_user_inatomic(to, from, n); | 86 | return __copy_to_user_inatomic(to, from, n); |
87 | } | 87 | } |
88 | 88 | ||
89 | static __always_inline unsigned long | 89 | static __always_inline unsigned long |
@@ -137,7 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |||
137 | static __always_inline unsigned long | 137 | static __always_inline unsigned long |
138 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 138 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
139 | { | 139 | { |
140 | might_sleep(); | 140 | might_fault(); |
141 | if (__builtin_constant_p(n)) { | 141 | if (__builtin_constant_p(n)) { |
142 | unsigned long ret; | 142 | unsigned long ret; |
143 | 143 | ||
@@ -159,7 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) | |||
159 | static __always_inline unsigned long __copy_from_user_nocache(void *to, | 159 | static __always_inline unsigned long __copy_from_user_nocache(void *to, |
160 | const void __user *from, unsigned long n) | 160 | const void __user *from, unsigned long n) |
161 | { | 161 | { |
162 | might_sleep(); | 162 | might_fault(); |
163 | if (__builtin_constant_p(n)) { | 163 | if (__builtin_constant_p(n)) { |
164 | unsigned long ret; | 164 | unsigned long ret; |
165 | 165 | ||
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index f8cfd00db450..84210c479fca 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -29,6 +29,8 @@ static __always_inline __must_check | |||
29 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 29 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
30 | { | 30 | { |
31 | int ret = 0; | 31 | int ret = 0; |
32 | |||
33 | might_fault(); | ||
32 | if (!__builtin_constant_p(size)) | 34 | if (!__builtin_constant_p(size)) |
33 | return copy_user_generic(dst, (__force void *)src, size); | 35 | return copy_user_generic(dst, (__force void *)src, size); |
34 | switch (size) { | 36 | switch (size) { |
@@ -71,6 +73,8 @@ static __always_inline __must_check | |||
71 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 73 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
72 | { | 74 | { |
73 | int ret = 0; | 75 | int ret = 0; |
76 | |||
77 | might_fault(); | ||
74 | if (!__builtin_constant_p(size)) | 78 | if (!__builtin_constant_p(size)) |
75 | return copy_user_generic((__force void *)dst, src, size); | 79 | return copy_user_generic((__force void *)dst, src, size); |
76 | switch (size) { | 80 | switch (size) { |
@@ -113,6 +117,8 @@ static __always_inline __must_check | |||
113 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 117 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
114 | { | 118 | { |
115 | int ret = 0; | 119 | int ret = 0; |
120 | |||
121 | might_fault(); | ||
116 | if (!__builtin_constant_p(size)) | 122 | if (!__builtin_constant_p(size)) |
117 | return copy_user_generic((__force void *)dst, | 123 | return copy_user_generic((__force void *)dst, |
118 | (__force void *)src, size); | 124 | (__force void *)src, size); |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index b62a7667828e..af2bc36ca1c4 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | |||
14 | CFLAGS_REMOVE_ftrace.o = -pg | 14 | CFLAGS_REMOVE_ftrace.o = -pg |
15 | endif | 15 | endif |
16 | 16 | ||
17 | ifdef CONFIG_FUNCTION_RET_TRACER | ||
18 | # Don't trace __switch_to() but let it for function tracer | ||
19 | CFLAGS_REMOVE_process_32.o = -pg | ||
20 | endif | ||
21 | |||
17 | # | 22 | # |
18 | # vsyscalls (which work on the user stack) should have | 23 | # vsyscalls (which work on the user stack) should have |
19 | # no stack-protector checks: | 24 | # no stack-protector checks: |
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o | |||
65 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 70 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
66 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 71 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
67 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 72 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
73 | obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o | ||
68 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 74 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 75 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 76 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 28b597ef9ca1..74defe21ba42 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1157,6 +1157,9 @@ ENTRY(mcount) | |||
1157 | END(mcount) | 1157 | END(mcount) |
1158 | 1158 | ||
1159 | ENTRY(ftrace_caller) | 1159 | ENTRY(ftrace_caller) |
1160 | cmpl $0, function_trace_stop | ||
1161 | jne ftrace_stub | ||
1162 | |||
1160 | pushl %eax | 1163 | pushl %eax |
1161 | pushl %ecx | 1164 | pushl %ecx |
1162 | pushl %edx | 1165 | pushl %edx |
@@ -1180,8 +1183,15 @@ END(ftrace_caller) | |||
1180 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 1183 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
1181 | 1184 | ||
1182 | ENTRY(mcount) | 1185 | ENTRY(mcount) |
1186 | cmpl $0, function_trace_stop | ||
1187 | jne ftrace_stub | ||
1188 | |||
1183 | cmpl $ftrace_stub, ftrace_trace_function | 1189 | cmpl $ftrace_stub, ftrace_trace_function |
1184 | jnz trace | 1190 | jnz trace |
1191 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1192 | cmpl $ftrace_stub, ftrace_function_return | ||
1193 | jnz ftrace_return_caller | ||
1194 | #endif | ||
1185 | .globl ftrace_stub | 1195 | .globl ftrace_stub |
1186 | ftrace_stub: | 1196 | ftrace_stub: |
1187 | ret | 1197 | ret |
@@ -1200,12 +1210,42 @@ trace: | |||
1200 | popl %edx | 1210 | popl %edx |
1201 | popl %ecx | 1211 | popl %ecx |
1202 | popl %eax | 1212 | popl %eax |
1203 | |||
1204 | jmp ftrace_stub | 1213 | jmp ftrace_stub |
1205 | END(mcount) | 1214 | END(mcount) |
1206 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1215 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1207 | #endif /* CONFIG_FUNCTION_TRACER */ | 1216 | #endif /* CONFIG_FUNCTION_TRACER */ |
1208 | 1217 | ||
1218 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1219 | ENTRY(ftrace_return_caller) | ||
1220 | cmpl $0, function_trace_stop | ||
1221 | jne ftrace_stub | ||
1222 | |||
1223 | pushl %eax | ||
1224 | pushl %ecx | ||
1225 | pushl %edx | ||
1226 | movl 0xc(%esp), %edx | ||
1227 | lea 0x4(%ebp), %eax | ||
1228 | call prepare_ftrace_return | ||
1229 | popl %edx | ||
1230 | popl %ecx | ||
1231 | popl %eax | ||
1232 | ret | ||
1233 | END(ftrace_return_caller) | ||
1234 | |||
1235 | .globl return_to_handler | ||
1236 | return_to_handler: | ||
1237 | pushl $0 | ||
1238 | pushl %eax | ||
1239 | pushl %ecx | ||
1240 | pushl %edx | ||
1241 | call ftrace_return_to_handler | ||
1242 | movl %eax, 0xc(%esp) | ||
1243 | popl %edx | ||
1244 | popl %ecx | ||
1245 | popl %eax | ||
1246 | ret | ||
1247 | #endif | ||
1248 | |||
1209 | .section .rodata,"a" | 1249 | .section .rodata,"a" |
1210 | #include "syscall_table_32.S" | 1250 | #include "syscall_table_32.S" |
1211 | 1251 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b86f332c96a6..08aa6b10933c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -68,6 +68,8 @@ ENTRY(mcount) | |||
68 | END(mcount) | 68 | END(mcount) |
69 | 69 | ||
70 | ENTRY(ftrace_caller) | 70 | ENTRY(ftrace_caller) |
71 | cmpl $0, function_trace_stop | ||
72 | jne ftrace_stub | ||
71 | 73 | ||
72 | /* taken from glibc */ | 74 | /* taken from glibc */ |
73 | subq $0x38, %rsp | 75 | subq $0x38, %rsp |
@@ -103,6 +105,9 @@ END(ftrace_caller) | |||
103 | 105 | ||
104 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 106 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
105 | ENTRY(mcount) | 107 | ENTRY(mcount) |
108 | cmpl $0, function_trace_stop | ||
109 | jne ftrace_stub | ||
110 | |||
106 | cmpq $ftrace_stub, ftrace_trace_function | 111 | cmpq $ftrace_stub, ftrace_trace_function |
107 | jnz trace | 112 | jnz trace |
108 | .globl ftrace_stub | 113 | .globl ftrace_stub |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 50ea0ac8c9bf..bb137f7297ed 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -14,14 +14,17 @@ | |||
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/list.h> | 19 | #include <linux/list.h> |
19 | 20 | ||
20 | #include <asm/ftrace.h> | 21 | #include <asm/ftrace.h> |
22 | #include <linux/ftrace.h> | ||
21 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
24 | #include <asm/nmi.h> | ||
22 | 25 | ||
23 | 26 | ||
24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
25 | 28 | ||
26 | union ftrace_code_union { | 29 | union ftrace_code_union { |
27 | char code[MCOUNT_INSN_SIZE]; | 30 | char code[MCOUNT_INSN_SIZE]; |
@@ -31,18 +34,12 @@ union ftrace_code_union { | |||
31 | } __attribute__((packed)); | 34 | } __attribute__((packed)); |
32 | }; | 35 | }; |
33 | 36 | ||
34 | |||
35 | static int ftrace_calc_offset(long ip, long addr) | 37 | static int ftrace_calc_offset(long ip, long addr) |
36 | { | 38 | { |
37 | return (int)(addr - ip); | 39 | return (int)(addr - ip); |
38 | } | 40 | } |
39 | 41 | ||
40 | unsigned char *ftrace_nop_replace(void) | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
41 | { | ||
42 | return ftrace_nop; | ||
43 | } | ||
44 | |||
45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
46 | { | 43 | { |
47 | static union ftrace_code_union calc; | 44 | static union ftrace_code_union calc; |
48 | 45 | ||
@@ -56,7 +53,143 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
56 | return calc.code; | 53 | return calc.code; |
57 | } | 54 | } |
58 | 55 | ||
59 | int | 56 | /* |
57 | * Modifying code must take extra care. On an SMP machine, if | ||
58 | * the code being modified is also being executed on another CPU | ||
59 | * that CPU will have undefined results and possibly take a GPF. | ||
60 | * We use kstop_machine to stop other CPUS from exectuing code. | ||
61 | * But this does not stop NMIs from happening. We still need | ||
62 | * to protect against that. We separate out the modification of | ||
63 | * the code to take care of this. | ||
64 | * | ||
65 | * Two buffers are added: An IP buffer and a "code" buffer. | ||
66 | * | ||
67 | * 1) Put the instruction pointer into the IP buffer | ||
68 | * and the new code into the "code" buffer. | ||
69 | * 2) Set a flag that says we are modifying code | ||
70 | * 3) Wait for any running NMIs to finish. | ||
71 | * 4) Write the code | ||
72 | * 5) clear the flag. | ||
73 | * 6) Wait for any running NMIs to finish. | ||
74 | * | ||
75 | * If an NMI is executed, the first thing it does is to call | ||
76 | * "ftrace_nmi_enter". This will check if the flag is set to write | ||
77 | * and if it is, it will write what is in the IP and "code" buffers. | ||
78 | * | ||
79 | * The trick is, it does not matter if everyone is writing the same | ||
80 | * content to the code location. Also, if a CPU is executing code | ||
81 | * it is OK to write to that code location if the contents being written | ||
82 | * are the same as what exists. | ||
83 | */ | ||
84 | |||
85 | static atomic_t in_nmi = ATOMIC_INIT(0); | ||
86 | static int mod_code_status; /* holds return value of text write */ | ||
87 | static int mod_code_write; /* set when NMI should do the write */ | ||
88 | static void *mod_code_ip; /* holds the IP to write to */ | ||
89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | ||
90 | |||
91 | static unsigned nmi_wait_count; | ||
92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | ||
93 | |||
94 | int ftrace_arch_read_dyn_info(char *buf, int size) | ||
95 | { | ||
96 | int r; | ||
97 | |||
98 | r = snprintf(buf, size, "%u %u", | ||
99 | nmi_wait_count, | ||
100 | atomic_read(&nmi_update_count)); | ||
101 | return r; | ||
102 | } | ||
103 | |||
104 | static void ftrace_mod_code(void) | ||
105 | { | ||
106 | /* | ||
107 | * Yes, more than one CPU process can be writing to mod_code_status. | ||
108 | * (and the code itself) | ||
109 | * But if one were to fail, then they all should, and if one were | ||
110 | * to succeed, then they all should. | ||
111 | */ | ||
112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | ||
113 | MCOUNT_INSN_SIZE); | ||
114 | |||
115 | } | ||
116 | |||
117 | void ftrace_nmi_enter(void) | ||
118 | { | ||
119 | atomic_inc(&in_nmi); | ||
120 | /* Must have in_nmi seen before reading write flag */ | ||
121 | smp_mb(); | ||
122 | if (mod_code_write) { | ||
123 | ftrace_mod_code(); | ||
124 | atomic_inc(&nmi_update_count); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | void ftrace_nmi_exit(void) | ||
129 | { | ||
130 | /* Finish all executions before clearing in_nmi */ | ||
131 | smp_wmb(); | ||
132 | atomic_dec(&in_nmi); | ||
133 | } | ||
134 | |||
135 | static void wait_for_nmi(void) | ||
136 | { | ||
137 | int waited = 0; | ||
138 | |||
139 | while (atomic_read(&in_nmi)) { | ||
140 | waited = 1; | ||
141 | cpu_relax(); | ||
142 | } | ||
143 | |||
144 | if (waited) | ||
145 | nmi_wait_count++; | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | do_ftrace_mod_code(unsigned long ip, void *new_code) | ||
150 | { | ||
151 | mod_code_ip = (void *)ip; | ||
152 | mod_code_newcode = new_code; | ||
153 | |||
154 | /* The buffers need to be visible before we let NMIs write them */ | ||
155 | smp_wmb(); | ||
156 | |||
157 | mod_code_write = 1; | ||
158 | |||
159 | /* Make sure write bit is visible before we wait on NMIs */ | ||
160 | smp_mb(); | ||
161 | |||
162 | wait_for_nmi(); | ||
163 | |||
164 | /* Make sure all running NMIs have finished before we write the code */ | ||
165 | smp_mb(); | ||
166 | |||
167 | ftrace_mod_code(); | ||
168 | |||
169 | /* Make sure the write happens before clearing the bit */ | ||
170 | smp_wmb(); | ||
171 | |||
172 | mod_code_write = 0; | ||
173 | |||
174 | /* make sure NMIs see the cleared bit */ | ||
175 | smp_mb(); | ||
176 | |||
177 | wait_for_nmi(); | ||
178 | |||
179 | return mod_code_status; | ||
180 | } | ||
181 | |||
182 | |||
183 | |||
184 | |||
185 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | ||
186 | |||
187 | static unsigned char *ftrace_nop_replace(void) | ||
188 | { | ||
189 | return ftrace_nop; | ||
190 | } | ||
191 | |||
192 | static int | ||
60 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 193 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
61 | unsigned char *new_code) | 194 | unsigned char *new_code) |
62 | { | 195 | { |
@@ -81,7 +214,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
81 | return -EINVAL; | 214 | return -EINVAL; |
82 | 215 | ||
83 | /* replace the text with the new text */ | 216 | /* replace the text with the new text */ |
84 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) | 217 | if (do_ftrace_mod_code(ip, new_code)) |
85 | return -EPERM; | 218 | return -EPERM; |
86 | 219 | ||
87 | sync_core(); | 220 | sync_core(); |
@@ -89,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
89 | return 0; | 222 | return 0; |
90 | } | 223 | } |
91 | 224 | ||
225 | int ftrace_make_nop(struct module *mod, | ||
226 | struct dyn_ftrace *rec, unsigned long addr) | ||
227 | { | ||
228 | unsigned char *new, *old; | ||
229 | unsigned long ip = rec->ip; | ||
230 | |||
231 | old = ftrace_call_replace(ip, addr); | ||
232 | new = ftrace_nop_replace(); | ||
233 | |||
234 | return ftrace_modify_code(rec->ip, old, new); | ||
235 | } | ||
236 | |||
237 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
238 | { | ||
239 | unsigned char *new, *old; | ||
240 | unsigned long ip = rec->ip; | ||
241 | |||
242 | old = ftrace_nop_replace(); | ||
243 | new = ftrace_call_replace(ip, addr); | ||
244 | |||
245 | return ftrace_modify_code(rec->ip, old, new); | ||
246 | } | ||
247 | |||
92 | int ftrace_update_ftrace_func(ftrace_func_t func) | 248 | int ftrace_update_ftrace_func(ftrace_func_t func) |
93 | { | 249 | { |
94 | unsigned long ip = (unsigned long)(&ftrace_call); | 250 | unsigned long ip = (unsigned long)(&ftrace_call); |
@@ -165,3 +321,139 @@ int __init ftrace_dyn_arch_init(void *data) | |||
165 | 321 | ||
166 | return 0; | 322 | return 0; |
167 | } | 323 | } |
324 | #endif | ||
325 | |||
326 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
327 | |||
328 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
329 | |||
330 | /* | ||
331 | * These functions are picked from those used on | ||
332 | * this page for dynamic ftrace. They have been | ||
333 | * simplified to ignore all traces in NMI context. | ||
334 | */ | ||
335 | static atomic_t in_nmi; | ||
336 | |||
337 | void ftrace_nmi_enter(void) | ||
338 | { | ||
339 | atomic_inc(&in_nmi); | ||
340 | } | ||
341 | |||
342 | void ftrace_nmi_exit(void) | ||
343 | { | ||
344 | atomic_dec(&in_nmi); | ||
345 | } | ||
346 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
347 | |||
348 | /* Add a function return address to the trace stack on thread info.*/ | ||
349 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
350 | unsigned long func) | ||
351 | { | ||
352 | int index; | ||
353 | |||
354 | if (!current->ret_stack) | ||
355 | return -EBUSY; | ||
356 | |||
357 | /* The return trace stack is full */ | ||
358 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
359 | atomic_inc(¤t->trace_overrun); | ||
360 | return -EBUSY; | ||
361 | } | ||
362 | |||
363 | index = ++current->curr_ret_stack; | ||
364 | barrier(); | ||
365 | current->ret_stack[index].ret = ret; | ||
366 | current->ret_stack[index].func = func; | ||
367 | current->ret_stack[index].calltime = time; | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
373 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | ||
374 | unsigned long *func, unsigned long *overrun) | ||
375 | { | ||
376 | int index; | ||
377 | |||
378 | index = current->curr_ret_stack; | ||
379 | *ret = current->ret_stack[index].ret; | ||
380 | *func = current->ret_stack[index].func; | ||
381 | *time = current->ret_stack[index].calltime; | ||
382 | *overrun = atomic_read(¤t->trace_overrun); | ||
383 | current->curr_ret_stack--; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * Send the trace to the ring-buffer. | ||
388 | * @return the original return address. | ||
389 | */ | ||
390 | unsigned long ftrace_return_to_handler(void) | ||
391 | { | ||
392 | struct ftrace_retfunc trace; | ||
393 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func, | ||
394 | &trace.overrun); | ||
395 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
396 | ftrace_function_return(&trace); | ||
397 | |||
398 | return trace.ret; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Hook the return address and push it in the stack of return addrs | ||
403 | * in current thread info. | ||
404 | */ | ||
405 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
406 | { | ||
407 | unsigned long old; | ||
408 | unsigned long long calltime; | ||
409 | int faulted; | ||
410 | unsigned long return_hooker = (unsigned long) | ||
411 | &return_to_handler; | ||
412 | |||
413 | /* Nmi's are currently unsupported */ | ||
414 | if (atomic_read(&in_nmi)) | ||
415 | return; | ||
416 | |||
417 | /* | ||
418 | * Protect against fault, even if it shouldn't | ||
419 | * happen. This tool is too much intrusive to | ||
420 | * ignore such a protection. | ||
421 | */ | ||
422 | asm volatile( | ||
423 | "1: movl (%[parent_old]), %[old]\n" | ||
424 | "2: movl %[return_hooker], (%[parent_replaced])\n" | ||
425 | " movl $0, %[faulted]\n" | ||
426 | |||
427 | ".section .fixup, \"ax\"\n" | ||
428 | "3: movl $1, %[faulted]\n" | ||
429 | ".previous\n" | ||
430 | |||
431 | ".section __ex_table, \"a\"\n" | ||
432 | " .long 1b, 3b\n" | ||
433 | " .long 2b, 3b\n" | ||
434 | ".previous\n" | ||
435 | |||
436 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | ||
437 | [faulted] "=r" (faulted) | ||
438 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
439 | : "memory" | ||
440 | ); | ||
441 | |||
442 | if (WARN_ON(faulted)) { | ||
443 | unregister_ftrace_return(); | ||
444 | return; | ||
445 | } | ||
446 | |||
447 | if (WARN_ON(!__kernel_text_address(old))) { | ||
448 | unregister_ftrace_return(); | ||
449 | *parent = old; | ||
450 | return; | ||
451 | } | ||
452 | |||
453 | calltime = cpu_clock(raw_smp_processor_id()); | ||
454 | |||
455 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | ||
456 | *parent = old; | ||
457 | } | ||
458 | |||
459 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index a03e7f6d90c3..10786af95545 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/stacktrace.h> | 7 | #include <linux/stacktrace.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/uaccess.h> | ||
9 | #include <asm/stacktrace.h> | 10 | #include <asm/stacktrace.h> |
10 | 11 | ||
11 | static void save_stack_warning(void *data, char *msg) | 12 | static void save_stack_warning(void *data, char *msg) |
@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
83 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 84 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
84 | } | 85 | } |
85 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | 86 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
87 | |||
88 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ | ||
89 | |||
90 | struct stack_frame { | ||
91 | const void __user *next_fp; | ||
92 | unsigned long ret_addr; | ||
93 | }; | ||
94 | |||
95 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | ||
96 | { | ||
97 | int ret; | ||
98 | |||
99 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | ||
100 | return 0; | ||
101 | |||
102 | ret = 1; | ||
103 | pagefault_disable(); | ||
104 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | ||
105 | ret = 0; | ||
106 | pagefault_enable(); | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | static inline void __save_stack_trace_user(struct stack_trace *trace) | ||
112 | { | ||
113 | const struct pt_regs *regs = task_pt_regs(current); | ||
114 | const void __user *fp = (const void __user *)regs->bp; | ||
115 | |||
116 | if (trace->nr_entries < trace->max_entries) | ||
117 | trace->entries[trace->nr_entries++] = regs->ip; | ||
118 | |||
119 | while (trace->nr_entries < trace->max_entries) { | ||
120 | struct stack_frame frame; | ||
121 | |||
122 | frame.next_fp = NULL; | ||
123 | frame.ret_addr = 0; | ||
124 | if (!copy_stack_frame(fp, &frame)) | ||
125 | break; | ||
126 | if ((unsigned long)fp < regs->sp) | ||
127 | break; | ||
128 | if (frame.ret_addr) { | ||
129 | trace->entries[trace->nr_entries++] = | ||
130 | frame.ret_addr; | ||
131 | } | ||
132 | if (fp == frame.next_fp) | ||
133 | break; | ||
134 | fp = frame.next_fp; | ||
135 | } | ||
136 | } | ||
137 | |||
138 | void save_stack_trace_user(struct stack_trace *trace) | ||
139 | { | ||
140 | /* | ||
141 | * Trace user stack if we are not a kernel thread | ||
142 | */ | ||
143 | if (current->mm) { | ||
144 | __save_stack_trace_user(trace); | ||
145 | } | ||
146 | if (trace->nr_entries < trace->max_entries) | ||
147 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
148 | } | ||
149 | |||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 0b8b6690a86d..6f3d3d4cd973 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -17,6 +17,9 @@ | |||
17 | * want per guest time just set the kernel.vsyscall64 sysctl to 0. | 17 | * want per guest time just set the kernel.vsyscall64 sysctl to 0. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | /* Disable profiling for userspace code: */ | ||
21 | #define DISABLE_BRANCH_PROFILING | ||
22 | |||
20 | #include <linux/time.h> | 23 | #include <linux/time.h> |
21 | #include <linux/init.h> | 24 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 9e68075544f6..4a20b2f9a381 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -39,7 +39,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
39 | #define __do_strncpy_from_user(dst, src, count, res) \ | 39 | #define __do_strncpy_from_user(dst, src, count, res) \ |
40 | do { \ | 40 | do { \ |
41 | int __d0, __d1, __d2; \ | 41 | int __d0, __d1, __d2; \ |
42 | might_sleep(); \ | 42 | might_fault(); \ |
43 | __asm__ __volatile__( \ | 43 | __asm__ __volatile__( \ |
44 | " testl %1,%1\n" \ | 44 | " testl %1,%1\n" \ |
45 | " jz 2f\n" \ | 45 | " jz 2f\n" \ |
@@ -126,7 +126,7 @@ EXPORT_SYMBOL(strncpy_from_user); | |||
126 | #define __do_clear_user(addr,size) \ | 126 | #define __do_clear_user(addr,size) \ |
127 | do { \ | 127 | do { \ |
128 | int __d0; \ | 128 | int __d0; \ |
129 | might_sleep(); \ | 129 | might_fault(); \ |
130 | __asm__ __volatile__( \ | 130 | __asm__ __volatile__( \ |
131 | "0: rep; stosl\n" \ | 131 | "0: rep; stosl\n" \ |
132 | " movl %2,%0\n" \ | 132 | " movl %2,%0\n" \ |
@@ -155,7 +155,7 @@ do { \ | |||
155 | unsigned long | 155 | unsigned long |
156 | clear_user(void __user *to, unsigned long n) | 156 | clear_user(void __user *to, unsigned long n) |
157 | { | 157 | { |
158 | might_sleep(); | 158 | might_fault(); |
159 | if (access_ok(VERIFY_WRITE, to, n)) | 159 | if (access_ok(VERIFY_WRITE, to, n)) |
160 | __do_clear_user(to, n); | 160 | __do_clear_user(to, n); |
161 | return n; | 161 | return n; |
@@ -197,7 +197,7 @@ long strnlen_user(const char __user *s, long n) | |||
197 | unsigned long mask = -__addr_ok(s); | 197 | unsigned long mask = -__addr_ok(s); |
198 | unsigned long res, tmp; | 198 | unsigned long res, tmp; |
199 | 199 | ||
200 | might_sleep(); | 200 | might_fault(); |
201 | 201 | ||
202 | __asm__ __volatile__( | 202 | __asm__ __volatile__( |
203 | " testl %0, %0\n" | 203 | " testl %0, %0\n" |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index f4df6e7c718b..64d6c84e6353 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #define __do_strncpy_from_user(dst,src,count,res) \ | 15 | #define __do_strncpy_from_user(dst,src,count,res) \ |
16 | do { \ | 16 | do { \ |
17 | long __d0, __d1, __d2; \ | 17 | long __d0, __d1, __d2; \ |
18 | might_sleep(); \ | 18 | might_fault(); \ |
19 | __asm__ __volatile__( \ | 19 | __asm__ __volatile__( \ |
20 | " testq %1,%1\n" \ | 20 | " testq %1,%1\n" \ |
21 | " jz 2f\n" \ | 21 | " jz 2f\n" \ |
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user); | |||
64 | unsigned long __clear_user(void __user *addr, unsigned long size) | 64 | unsigned long __clear_user(void __user *addr, unsigned long size) |
65 | { | 65 | { |
66 | long __d0; | 66 | long __d0; |
67 | might_sleep(); | 67 | might_fault(); |
68 | /* no memory constraint because it doesn't change any memory gcc knows | 68 | /* no memory constraint because it doesn't change any memory gcc knows |
69 | about */ | 69 | about */ |
70 | asm volatile( | 70 | asm volatile( |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index fea4565ff576..d8cc96a2738f 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o | |||
8 | 8 | ||
9 | obj-$(CONFIG_HIGHMEM) += highmem_32.o | 9 | obj-$(CONFIG_HIGHMEM) += highmem_32.o |
10 | 10 | ||
11 | obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o | ||
12 | obj-$(CONFIG_MMIOTRACE) += mmiotrace.o | 11 | obj-$(CONFIG_MMIOTRACE) += mmiotrace.o |
13 | mmiotrace-y := pf_in.o mmio-mod.o | 12 | mmiotrace-y := kmmio.o pf_in.o mmio-mod.o |
14 | obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o | 13 | obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o |
15 | 14 | ||
16 | obj-$(CONFIG_NUMA) += numa_$(BITS).o | 15 | obj-$(CONFIG_NUMA) += numa_$(BITS).o |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 31e8730fa246..4152d3c3b138 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | 54 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) |
55 | { | 55 | { |
56 | #ifdef CONFIG_MMIOTRACE_HOOKS | 56 | #ifdef CONFIG_MMIOTRACE |
57 | if (unlikely(is_kmmio_active())) | 57 | if (unlikely(is_kmmio_active())) |
58 | if (kmmio_handler(regs, addr) == 1) | 58 | if (kmmio_handler(regs, addr) == 1) |
59 | return -1; | 59 | return -1; |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 1ef0f90813d6..d9d35824c56f 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Also alternative() doesn't work. | 9 | * Also alternative() doesn't work. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | /* Disable profiling for userspace code: */ | ||
13 | #define DISABLE_BRANCH_PROFILING | ||
14 | |||
12 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
13 | #include <linux/posix-timers.h> | 16 | #include <linux/posix-timers.h> |
14 | #include <linux/time.h> | 17 | #include <linux/time.h> |