diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 49 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 |
4 files changed, 19 insertions, 40 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c3..7ed925edf4d2 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
36 | #include <linux/ftrace.h> | 36 | #include <trace/power.h> |
37 | 37 | ||
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <acpi/processor.h> | 39 | #include <acpi/processor.h> |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 6b1f6f6f8661..077c9ea655fc 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/nmi.h> | 15 | #include <linux/nmi.h> |
16 | #include <linux/sysfs.h> | 16 | #include <linux/sysfs.h> |
17 | #include <linux/ftrace.h> | ||
17 | 18 | ||
18 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
19 | 20 | ||
@@ -195,6 +196,11 @@ unsigned __kprobes long oops_begin(void) | |||
195 | int cpu; | 196 | int cpu; |
196 | unsigned long flags; | 197 | unsigned long flags; |
197 | 198 | ||
199 | /* notify the hw-branch tracer so it may disable tracing and | ||
200 | add the last trace to the trace buffer - | ||
201 | the earlier this happens, the more useful the trace. */ | ||
202 | trace_hw_branch_oops(); | ||
203 | |||
198 | oops_enter(); | 204 | oops_enter(); |
199 | 205 | ||
200 | /* racy, but better than risking deadlock. */ | 206 | /* racy, but better than risking deadlock. */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 9d549e4fe880..f20f49f7d244 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -82,7 +82,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
82 | * are the same as what exists. | 82 | * are the same as what exists. |
83 | */ | 83 | */ |
84 | 84 | ||
85 | static atomic_t in_nmi = ATOMIC_INIT(0); | 85 | static atomic_t nmi_running = ATOMIC_INIT(0); |
86 | static int mod_code_status; /* holds return value of text write */ | 86 | static int mod_code_status; /* holds return value of text write */ |
87 | static int mod_code_write; /* set when NMI should do the write */ | 87 | static int mod_code_write; /* set when NMI should do the write */ |
88 | static void *mod_code_ip; /* holds the IP to write to */ | 88 | static void *mod_code_ip; /* holds the IP to write to */ |
@@ -115,8 +115,8 @@ static void ftrace_mod_code(void) | |||
115 | 115 | ||
116 | void ftrace_nmi_enter(void) | 116 | void ftrace_nmi_enter(void) |
117 | { | 117 | { |
118 | atomic_inc(&in_nmi); | 118 | atomic_inc(&nmi_running); |
119 | /* Must have in_nmi seen before reading write flag */ | 119 | /* Must have nmi_running seen before reading write flag */ |
120 | smp_mb(); | 120 | smp_mb(); |
121 | if (mod_code_write) { | 121 | if (mod_code_write) { |
122 | ftrace_mod_code(); | 122 | ftrace_mod_code(); |
@@ -126,22 +126,21 @@ void ftrace_nmi_enter(void) | |||
126 | 126 | ||
127 | void ftrace_nmi_exit(void) | 127 | void ftrace_nmi_exit(void) |
128 | { | 128 | { |
129 | /* Finish all executions before clearing in_nmi */ | 129 | /* Finish all executions before clearing nmi_running */ |
130 | smp_wmb(); | 130 | smp_wmb(); |
131 | atomic_dec(&in_nmi); | 131 | atomic_dec(&nmi_running); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void wait_for_nmi(void) | 134 | static void wait_for_nmi(void) |
135 | { | 135 | { |
136 | int waited = 0; | 136 | if (!atomic_read(&nmi_running)) |
137 | return; | ||
137 | 138 | ||
138 | while (atomic_read(&in_nmi)) { | 139 | do { |
139 | waited = 1; | ||
140 | cpu_relax(); | 140 | cpu_relax(); |
141 | } | 141 | } while (atomic_read(&nmi_running)); |
142 | 142 | ||
143 | if (waited) | 143 | nmi_wait_count++; |
144 | nmi_wait_count++; | ||
145 | } | 144 | } |
146 | 145 | ||
147 | static int | 146 | static int |
@@ -368,25 +367,6 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
368 | return ftrace_mod_jmp(ip, old_offset, new_offset); | 367 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
369 | } | 368 | } |
370 | 369 | ||
371 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
372 | |||
373 | /* | ||
374 | * These functions are picked from those used on | ||
375 | * this page for dynamic ftrace. They have been | ||
376 | * simplified to ignore all traces in NMI context. | ||
377 | */ | ||
378 | static atomic_t in_nmi; | ||
379 | |||
380 | void ftrace_nmi_enter(void) | ||
381 | { | ||
382 | atomic_inc(&in_nmi); | ||
383 | } | ||
384 | |||
385 | void ftrace_nmi_exit(void) | ||
386 | { | ||
387 | atomic_dec(&in_nmi); | ||
388 | } | ||
389 | |||
390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
391 | 371 | ||
392 | /* Add a function return address to the trace stack on thread info.*/ | 372 | /* Add a function return address to the trace stack on thread info.*/ |
@@ -476,7 +456,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
476 | &return_to_handler; | 456 | &return_to_handler; |
477 | 457 | ||
478 | /* Nmi's are currently unsupported */ | 458 | /* Nmi's are currently unsupported */ |
479 | if (unlikely(atomic_read(&in_nmi))) | 459 | if (unlikely(in_nmi())) |
480 | return; | 460 | return; |
481 | 461 | ||
482 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 462 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
@@ -513,13 +493,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
513 | return; | 493 | return; |
514 | } | 494 | } |
515 | 495 | ||
516 | if (unlikely(!__kernel_text_address(old))) { | ||
517 | ftrace_graph_stop(); | ||
518 | *parent = old; | ||
519 | WARN_ON(1); | ||
520 | return; | ||
521 | } | ||
522 | |||
523 | calltime = cpu_clock(raw_smp_processor_id()); | 496 | calltime = cpu_clock(raw_smp_processor_id()); |
524 | 497 | ||
525 | if (push_return_trace(old, calltime, | 498 | if (push_return_trace(old, calltime, |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e68bb9e30864..026819ffcb0c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/ftrace.h> | 11 | #include <trace/power.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
14 | 14 | ||