aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c8
-rw-r--r--arch/x86/kernel/dumpstack.c7
-rw-r--r--arch/x86/kernel/ftrace.c143
-rw-r--r--arch/x86/kernel/process.c5
5 files changed, 51 insertions, 116 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 22590cf688ae..5e40f54171e7 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -33,7 +33,7 @@
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <linux/ftrace.h> 36#include <trace/power.h>
37 37
38#include <linux/acpi.h> 38#include <linux/acpi.h>
39#include <acpi/processor.h> 39#include <acpi/processor.h>
@@ -70,6 +70,8 @@ struct acpi_cpufreq_data {
70 70
71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
72 72
73DEFINE_TRACE(power_mark);
74
73/* acpi_perf_data is a pointer to percpu data. */ 75/* acpi_perf_data is a pointer to percpu data. */
74static struct acpi_processor_performance *acpi_perf_data; 76static struct acpi_processor_performance *acpi_perf_data;
75 77
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 25c559ba8d54..1a89a2b68d15 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -4,6 +4,7 @@
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/sched.h>
7#include <linux/thread_info.h> 8#include <linux/thread_info.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
@@ -55,11 +56,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
55 56
56 /* 57 /*
57 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 58 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
58 * with P/T states and does not stop in deep C-states 59 * with P/T states and does not stop in deep C-states.
60 *
61 * It is also reliable across cores and sockets. (but not across
62 * cabinets - we turn it off in that case explicitly.)
59 */ 63 */
60 if (c->x86_power & (1 << 8)) { 64 if (c->x86_power & (1 << 8)) {
61 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 65 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
62 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 66 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
67 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
68 sched_clock_stable = 1;
63 } 69 }
64 70
65 /* 71 /*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 87d103ded1c3..95ea5fa7d444 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -10,10 +10,12 @@
10#include <linux/kdebug.h> 10#include <linux/kdebug.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/ftrace.h>
13#include <linux/kexec.h> 14#include <linux/kexec.h>
14#include <linux/bug.h> 15#include <linux/bug.h>
15#include <linux/nmi.h> 16#include <linux/nmi.h>
16#include <linux/sysfs.h> 17#include <linux/sysfs.h>
18#include <linux/ftrace.h>
17 19
18#include <asm/stacktrace.h> 20#include <asm/stacktrace.h>
19 21
@@ -195,6 +197,11 @@ unsigned __kprobes long oops_begin(void)
195 int cpu; 197 int cpu;
196 unsigned long flags; 198 unsigned long flags;
197 199
200 /* notify the hw-branch tracer so it may disable tracing and
201 add the last trace to the trace buffer -
202 the earlier this happens, the more useful the trace. */
203 trace_hw_branch_oops();
204
198 oops_enter(); 205 oops_enter();
199 206
200 /* racy, but better than risking deadlock. */ 207 /* racy, but better than risking deadlock. */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 231bdd3c5b1c..a85da1764b1c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <asm/cacheflush.h>
21#include <asm/ftrace.h> 22#include <asm/ftrace.h>
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <asm/nops.h> 24#include <asm/nops.h>
@@ -26,6 +27,18 @@
26 27
27#ifdef CONFIG_DYNAMIC_FTRACE 28#ifdef CONFIG_DYNAMIC_FTRACE
28 29
30int ftrace_arch_code_modify_prepare(void)
31{
32 set_kernel_text_rw();
33 return 0;
34}
35
36int ftrace_arch_code_modify_post_process(void)
37{
38 set_kernel_text_ro();
39 return 0;
40}
41
29union ftrace_code_union { 42union ftrace_code_union {
30 char code[MCOUNT_INSN_SIZE]; 43 char code[MCOUNT_INSN_SIZE];
31 struct { 44 struct {
@@ -82,7 +95,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82 * are the same as what exists. 95 * are the same as what exists.
83 */ 96 */
84 97
85static atomic_t in_nmi = ATOMIC_INIT(0); 98static atomic_t nmi_running = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */ 99static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */ 100static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */ 101static void *mod_code_ip; /* holds the IP to write to */
@@ -111,12 +124,16 @@ static void ftrace_mod_code(void)
111 */ 124 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, 125 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE); 126 MCOUNT_INSN_SIZE);
127
128 /* if we fail, then kill any new writers */
129 if (mod_code_status)
130 mod_code_write = 0;
114} 131}
115 132
116void ftrace_nmi_enter(void) 133void ftrace_nmi_enter(void)
117{ 134{
118 atomic_inc(&in_nmi); 135 atomic_inc(&nmi_running);
119 /* Must have in_nmi seen before reading write flag */ 136 /* Must have nmi_running seen before reading write flag */
120 smp_mb(); 137 smp_mb();
121 if (mod_code_write) { 138 if (mod_code_write) {
122 ftrace_mod_code(); 139 ftrace_mod_code();
@@ -126,22 +143,21 @@ void ftrace_nmi_enter(void)
126 143
127void ftrace_nmi_exit(void) 144void ftrace_nmi_exit(void)
128{ 145{
129 /* Finish all executions before clearing in_nmi */ 146 /* Finish all executions before clearing nmi_running */
130 smp_wmb(); 147 smp_wmb();
131 atomic_dec(&in_nmi); 148 atomic_dec(&nmi_running);
132} 149}
133 150
134static void wait_for_nmi(void) 151static void wait_for_nmi(void)
135{ 152{
136 int waited = 0; 153 if (!atomic_read(&nmi_running))
154 return;
137 155
138 while (atomic_read(&in_nmi)) { 156 do {
139 waited = 1;
140 cpu_relax(); 157 cpu_relax();
141 } 158 } while (atomic_read(&nmi_running));
142 159
143 if (waited) 160 nmi_wait_count++;
144 nmi_wait_count++;
145} 161}
146 162
147static int 163static int
@@ -368,100 +384,8 @@ int ftrace_disable_ftrace_graph_caller(void)
368 return ftrace_mod_jmp(ip, old_offset, new_offset); 384 return ftrace_mod_jmp(ip, old_offset, new_offset);
369} 385}
370 386
371#else /* CONFIG_DYNAMIC_FTRACE */
372
373/*
374 * These functions are picked from those used on
375 * this page for dynamic ftrace. They have been
376 * simplified to ignore all traces in NMI context.
377 */
378static atomic_t in_nmi;
379
380void ftrace_nmi_enter(void)
381{
382 atomic_inc(&in_nmi);
383}
384
385void ftrace_nmi_exit(void)
386{
387 atomic_dec(&in_nmi);
388}
389
390#endif /* !CONFIG_DYNAMIC_FTRACE */ 387#endif /* !CONFIG_DYNAMIC_FTRACE */
391 388
392/* Add a function return address to the trace stack on thread info.*/
393static int push_return_trace(unsigned long ret, unsigned long long time,
394 unsigned long func, int *depth)
395{
396 int index;
397
398 if (!current->ret_stack)
399 return -EBUSY;
400
401 /* The return trace stack is full */
402 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
403 atomic_inc(&current->trace_overrun);
404 return -EBUSY;
405 }
406
407 index = ++current->curr_ret_stack;
408 barrier();
409 current->ret_stack[index].ret = ret;
410 current->ret_stack[index].func = func;
411 current->ret_stack[index].calltime = time;
412 *depth = index;
413
414 return 0;
415}
416
417/* Retrieve a function return address to the trace stack on thread info.*/
418static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
419{
420 int index;
421
422 index = current->curr_ret_stack;
423
424 if (unlikely(index < 0)) {
425 ftrace_graph_stop();
426 WARN_ON(1);
427 /* Might as well panic, otherwise we have no where to go */
428 *ret = (unsigned long)panic;
429 return;
430 }
431
432 *ret = current->ret_stack[index].ret;
433 trace->func = current->ret_stack[index].func;
434 trace->calltime = current->ret_stack[index].calltime;
435 trace->overrun = atomic_read(&current->trace_overrun);
436 trace->depth = index;
437 barrier();
438 current->curr_ret_stack--;
439
440}
441
442/*
443 * Send the trace to the ring-buffer.
444 * @return the original return address.
445 */
446unsigned long ftrace_return_to_handler(void)
447{
448 struct ftrace_graph_ret trace;
449 unsigned long ret;
450
451 pop_return_trace(&trace, &ret);
452 trace.rettime = cpu_clock(raw_smp_processor_id());
453 ftrace_graph_return(&trace);
454
455 if (unlikely(!ret)) {
456 ftrace_graph_stop();
457 WARN_ON(1);
458 /* Might as well panic. What else to do? */
459 ret = (unsigned long)panic;
460 }
461
462 return ret;
463}
464
465/* 389/*
466 * Hook the return address and push it in the stack of return addrs 390 * Hook the return address and push it in the stack of return addrs
467 * in current thread info. 391 * in current thread info.
@@ -476,7 +400,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
476 &return_to_handler; 400 &return_to_handler;
477 401
478 /* Nmi's are currently unsupported */ 402 /* Nmi's are currently unsupported */
479 if (unlikely(atomic_read(&in_nmi))) 403 if (unlikely(in_nmi()))
480 return; 404 return;
481 405
482 if (unlikely(atomic_read(&current->tracing_graph_pause))) 406 if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -512,16 +436,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
512 return; 436 return;
513 } 437 }
514 438
515 if (unlikely(!__kernel_text_address(old))) { 439 calltime = trace_clock_local();
516 ftrace_graph_stop();
517 *parent = old;
518 WARN_ON(1);
519 return;
520 }
521
522 calltime = cpu_clock(raw_smp_processor_id());
523 440
524 if (push_return_trace(old, calltime, 441 if (ftrace_push_return_trace(old, calltime,
525 self_addr, &trace.depth) == -EBUSY) { 442 self_addr, &trace.depth) == -EBUSY) {
526 *parent = old; 443 *parent = old;
527 return; 444 return;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6afa5232dbb7..8c037051b353 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -8,7 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/pm.h> 9#include <linux/pm.h>
10#include <linux/clockchips.h> 10#include <linux/clockchips.h>
11#include <linux/ftrace.h> 11#include <trace/power.h>
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/apic.h> 13#include <asm/apic.h>
14#include <asm/idle.h> 14#include <asm/idle.h>
@@ -22,6 +22,9 @@ EXPORT_SYMBOL(idle_nomwait);
22 22
23struct kmem_cache *task_xstate_cachep; 23struct kmem_cache *task_xstate_cachep;
24 24
25DEFINE_TRACE(power_start);
26DEFINE_TRACE(power_end);
27
25int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 28int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
26{ 29{
27 *dst = *src; 30 *dst = *src;