aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-31 16:34:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-31 16:34:04 -0400
commitf187e9fd68577cdd5f914659b6f7f11124e40485 (patch)
treead0e7422359724f7c7ffaaaa80dd3a7281f02a83 /kernel
parentadb3b1f3fc1c6edb501808ebf80a81e81c52eb73 (diff)
parent8ebfdf2babcda5a3b06cc67523bca1f9aed46009 (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates and fixes from Ingo Molnar: "It's mostly fixes, but there's also two late items: - preliminary GTK GUI support for perf report - PMU raw event format descriptors in sysfs, to be parsed by tooling The raw event format in sysfs is a new ABI. For example for the 'CPU' PMU we have: aldebaran:~> ll /sys/bus/event_source/devices/cpu/format/* -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/any -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/cmask -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/edge -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/event -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/inv -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/offcore_rsp -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/pc -r--r--r--. 1 root root 4096 Mar 31 10:29 /sys/bus/event_source/devices/cpu/format/umask those lists of fields contain a specific format: aldebaran:~> cat /sys/bus/event_source/devices/cpu/format/offcore_rsp config1:0-63 So, those who wish to specify raw events can now use the following event format: -e cpu/cmask=1,event=2,umask=3 Most people will not want to specify any events (let alone raw events), they'll just use whatever default event the tools use. But for more obscure PMU events that have no cross-architecture generic events the above syntax is more usable and a bit more structured than specifying hex numbers." * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits) perf tools: Remove auto-generated bison/flex files perf annotate: Fix off by one symbol hist size allocation and hit accounting perf tools: Add missing ref-cycles event back to event parser perf annotate: addr2line wants addresses in same format as objdump perf probe: Finder fails to resolve function name to address tracing: Fix ent_size in trace output perf symbols: Handle NULL dso in dso__name_len perf symbols: Do not include libgen.h perf tools: Fix bug in raw sample parsing perf tools: Fix display of first level of callchains perf tools: Switch module.h into export.h perf: Move mmap page data_head offset assertion out of header perf: Fix mmap_page capabilities and docs perf diff: Fix to work with new hists design perf tools: Fix modifier to be applied on correct events perf tools: Fix various casting issues for 32 bits perf tools: Simplify event_read_id exit path tracing: Fix ftrace stack trace entries tracing: Move the tracing_on/off() declarations into CONFIG_TRACING perf report: Add a simple GTK2-based 'perf report' browser ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/ring_buffer.c157
-rw-r--r--kernel/trace/trace.c113
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_entries.h16
-rw-r--r--kernel/trace/trace_export.c2
8 files changed, 199 insertions, 108 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4b50357914fb..a6a9ec4cd8f5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3348,7 +3348,7 @@ static void calc_timer_values(struct perf_event *event,
3348 *running = ctx_time - event->tstamp_running; 3348 *running = ctx_time - event->tstamp_running;
3349} 3349}
3350 3350
3351void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) 3351void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3352{ 3352{
3353} 3353}
3354 3354
@@ -3398,7 +3398,7 @@ void perf_event_update_userpage(struct perf_event *event)
3398 userpg->time_running = running + 3398 userpg->time_running = running +
3399 atomic64_read(&event->child_total_time_running); 3399 atomic64_read(&event->child_total_time_running);
3400 3400
3401 perf_update_user_clock(userpg, now); 3401 arch_perf_update_userpage(userpg, now);
3402 3402
3403 barrier(); 3403 barrier();
3404 ++userpg->lock; 3404 ++userpg->lock;
@@ -7116,6 +7116,13 @@ void __init perf_event_init(void)
7116 7116
7117 /* do not patch jump label more than once per second */ 7117 /* do not patch jump label more than once per second */
7118 jump_label_rate_limit(&perf_sched_events, HZ); 7118 jump_label_rate_limit(&perf_sched_events, HZ);
7119
7120 /*
7121 * Build time assertion that we keep the data_head at the intended
7122 * location. IOW, validation we got the __reserved[] size right.
7123 */
7124 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7125 != 1024);
7119} 7126}
7120 7127
7121static int __init perf_event_sysfs_init(void) 7128static int __init perf_event_sysfs_init(void)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cd3134510f3d..a1d2849f2473 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,7 +141,7 @@ if FTRACE
141config FUNCTION_TRACER 141config FUNCTION_TRACER
142 bool "Kernel Function Tracer" 142 bool "Kernel Function Tracer"
143 depends on HAVE_FUNCTION_TRACER 143 depends on HAVE_FUNCTION_TRACER
144 select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE 144 select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
145 select KALLSYMS 145 select KALLSYMS
146 select GENERIC_TRACER 146 select GENERIC_TRACER
147 select CONTEXT_SWITCH_TRACER 147 select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 867bd1dd2dd0..0fa92f677c92 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -249,7 +249,8 @@ static void update_ftrace_function(void)
249#else 249#else
250 __ftrace_trace_function = func; 250 __ftrace_trace_function = func;
251#endif 251#endif
252 ftrace_trace_function = ftrace_test_stop_func; 252 ftrace_trace_function =
253 (func == ftrace_stub) ? func : ftrace_test_stop_func;
253#endif 254#endif
254} 255}
255 256
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f5b7b5c1195b..cf8d11e91efd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -154,33 +154,10 @@ enum {
154 154
155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
156 156
157#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 157/* Used for individual buffers (after the counter) */
158 158#define RB_BUFFER_OFF (1 << 20)
159/**
160 * tracing_on - enable all tracing buffers
161 *
162 * This function enables all tracing buffers that may have been
163 * disabled with tracing_off.
164 */
165void tracing_on(void)
166{
167 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
168}
169EXPORT_SYMBOL_GPL(tracing_on);
170 159
171/** 160#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
172 * tracing_off - turn off all tracing buffers
173 *
174 * This function stops all tracing buffers from recording data.
175 * It does not disable any overhead the tracers themselves may
176 * be causing. This function simply causes all recording to
177 * the ring buffers to fail.
178 */
179void tracing_off(void)
180{
181 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
182}
183EXPORT_SYMBOL_GPL(tracing_off);
184 161
185/** 162/**
186 * tracing_off_permanent - permanently disable ring buffers 163 * tracing_off_permanent - permanently disable ring buffers
@@ -193,15 +170,6 @@ void tracing_off_permanent(void)
193 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 170 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
194} 171}
195 172
196/**
197 * tracing_is_on - show state of ring buffers enabled
198 */
199int tracing_is_on(void)
200{
201 return ring_buffer_flags == RB_BUFFERS_ON;
202}
203EXPORT_SYMBOL_GPL(tracing_is_on);
204
205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 173#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206#define RB_ALIGNMENT 4U 174#define RB_ALIGNMENT 4U
207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 175#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2619,6 +2587,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
2619EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 2587EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2620 2588
2621/** 2589/**
2590 * ring_buffer_record_off - stop all writes into the buffer
2591 * @buffer: The ring buffer to stop writes to.
2592 *
2593 * This prevents all writes to the buffer. Any attempt to write
2594 * to the buffer after this will fail and return NULL.
2595 *
2596 * This is different than ring_buffer_record_disable() as
2597 * it works like an on/off switch, where as the disable() verison
2598 * must be paired with a enable().
2599 */
2600void ring_buffer_record_off(struct ring_buffer *buffer)
2601{
2602 unsigned int rd;
2603 unsigned int new_rd;
2604
2605 do {
2606 rd = atomic_read(&buffer->record_disabled);
2607 new_rd = rd | RB_BUFFER_OFF;
2608 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2609}
2610EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2611
2612/**
2613 * ring_buffer_record_on - restart writes into the buffer
2614 * @buffer: The ring buffer to start writes to.
2615 *
2616 * This enables all writes to the buffer that was disabled by
2617 * ring_buffer_record_off().
2618 *
2619 * This is different than ring_buffer_record_enable() as
2620 * it works like an on/off switch, where as the enable() verison
2621 * must be paired with a disable().
2622 */
2623void ring_buffer_record_on(struct ring_buffer *buffer)
2624{
2625 unsigned int rd;
2626 unsigned int new_rd;
2627
2628 do {
2629 rd = atomic_read(&buffer->record_disabled);
2630 new_rd = rd & ~RB_BUFFER_OFF;
2631 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2632}
2633EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2634
2635/**
2636 * ring_buffer_record_is_on - return true if the ring buffer can write
2637 * @buffer: The ring buffer to see if write is enabled
2638 *
2639 * Returns true if the ring buffer is in a state that it accepts writes.
2640 */
2641int ring_buffer_record_is_on(struct ring_buffer *buffer)
2642{
2643 return !atomic_read(&buffer->record_disabled);
2644}
2645
2646/**
2622 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 2647 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2623 * @buffer: The ring buffer to stop writes to. 2648 * @buffer: The ring buffer to stop writes to.
2624 * @cpu: The CPU buffer to stop 2649 * @cpu: The CPU buffer to stop
@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
4039} 4064}
4040EXPORT_SYMBOL_GPL(ring_buffer_read_page); 4065EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4041 4066
4042#ifdef CONFIG_TRACING
4043static ssize_t
4044rb_simple_read(struct file *filp, char __user *ubuf,
4045 size_t cnt, loff_t *ppos)
4046{
4047 unsigned long *p = filp->private_data;
4048 char buf[64];
4049 int r;
4050
4051 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
4052 r = sprintf(buf, "permanently disabled\n");
4053 else
4054 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
4055
4056 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4057}
4058
4059static ssize_t
4060rb_simple_write(struct file *filp, const char __user *ubuf,
4061 size_t cnt, loff_t *ppos)
4062{
4063 unsigned long *p = filp->private_data;
4064 unsigned long val;
4065 int ret;
4066
4067 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4068 if (ret)
4069 return ret;
4070
4071 if (val)
4072 set_bit(RB_BUFFERS_ON_BIT, p);
4073 else
4074 clear_bit(RB_BUFFERS_ON_BIT, p);
4075
4076 (*ppos)++;
4077
4078 return cnt;
4079}
4080
4081static const struct file_operations rb_simple_fops = {
4082 .open = tracing_open_generic,
4083 .read = rb_simple_read,
4084 .write = rb_simple_write,
4085 .llseek = default_llseek,
4086};
4087
4088
4089static __init int rb_init_debugfs(void)
4090{
4091 struct dentry *d_tracer;
4092
4093 d_tracer = tracing_init_dentry();
4094
4095 trace_create_file("tracing_on", 0644, d_tracer,
4096 &ring_buffer_flags, &rb_simple_fops);
4097
4098 return 0;
4099}
4100
4101fs_initcall(rb_init_debugfs);
4102#endif
4103
4104#ifdef CONFIG_HOTPLUG_CPU 4067#ifdef CONFIG_HOTPLUG_CPU
4105static int rb_cpu_notify(struct notifier_block *self, 4068static int rb_cpu_notify(struct notifier_block *self,
4106 unsigned long action, void *hcpu) 4069 unsigned long action, void *hcpu)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 10d5503f0d04..ed7b5d1e12f4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -36,6 +36,7 @@
36#include <linux/ctype.h> 36#include <linux/ctype.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/nmi.h>
39#include <linux/fs.h> 40#include <linux/fs.h>
40 41
41#include "trace.h" 42#include "trace.h"
@@ -352,6 +353,59 @@ static void wakeup_work_handler(struct work_struct *work)
352static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); 353static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
353 354
354/** 355/**
356 * tracing_on - enable tracing buffers
357 *
358 * This function enables tracing buffers that may have been
359 * disabled with tracing_off.
360 */
361void tracing_on(void)
362{
363 if (global_trace.buffer)
364 ring_buffer_record_on(global_trace.buffer);
365 /*
366 * This flag is only looked at when buffers haven't been
367 * allocated yet. We don't really care about the race
368 * between setting this flag and actually turning
369 * on the buffer.
370 */
371 global_trace.buffer_disabled = 0;
372}
373EXPORT_SYMBOL_GPL(tracing_on);
374
375/**
376 * tracing_off - turn off tracing buffers
377 *
378 * This function stops the tracing buffers from recording data.
379 * It does not disable any overhead the tracers themselves may
380 * be causing. This function simply causes all recording to
381 * the ring buffers to fail.
382 */
383void tracing_off(void)
384{
385 if (global_trace.buffer)
386 ring_buffer_record_on(global_trace.buffer);
387 /*
388 * This flag is only looked at when buffers haven't been
389 * allocated yet. We don't really care about the race
390 * between setting this flag and actually turning
391 * on the buffer.
392 */
393 global_trace.buffer_disabled = 1;
394}
395EXPORT_SYMBOL_GPL(tracing_off);
396
397/**
398 * tracing_is_on - show state of ring buffers enabled
399 */
400int tracing_is_on(void)
401{
402 if (global_trace.buffer)
403 return ring_buffer_record_is_on(global_trace.buffer);
404 return !global_trace.buffer_disabled;
405}
406EXPORT_SYMBOL_GPL(tracing_is_on);
407
408/**
355 * trace_wake_up - wake up tasks waiting for trace input 409 * trace_wake_up - wake up tasks waiting for trace input
356 * 410 *
357 * Schedules a delayed work to wake up any task that is blocked on the 411 * Schedules a delayed work to wake up any task that is blocked on the
@@ -1644,6 +1698,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1644 int cpu_file = iter->cpu_file; 1698 int cpu_file = iter->cpu_file;
1645 u64 next_ts = 0, ts; 1699 u64 next_ts = 0, ts;
1646 int next_cpu = -1; 1700 int next_cpu = -1;
1701 int next_size = 0;
1647 int cpu; 1702 int cpu;
1648 1703
1649 /* 1704 /*
@@ -1675,9 +1730,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1675 next_cpu = cpu; 1730 next_cpu = cpu;
1676 next_ts = ts; 1731 next_ts = ts;
1677 next_lost = lost_events; 1732 next_lost = lost_events;
1733 next_size = iter->ent_size;
1678 } 1734 }
1679 } 1735 }
1680 1736
1737 iter->ent_size = next_size;
1738
1681 if (ent_cpu) 1739 if (ent_cpu)
1682 *ent_cpu = next_cpu; 1740 *ent_cpu = next_cpu;
1683 1741
@@ -4567,6 +4625,55 @@ static __init void create_trace_options_dir(void)
4567 create_trace_option_core_file(trace_options[i], i); 4625 create_trace_option_core_file(trace_options[i], i);
4568} 4626}
4569 4627
4628static ssize_t
4629rb_simple_read(struct file *filp, char __user *ubuf,
4630 size_t cnt, loff_t *ppos)
4631{
4632 struct ring_buffer *buffer = filp->private_data;
4633 char buf[64];
4634 int r;
4635
4636 if (buffer)
4637 r = ring_buffer_record_is_on(buffer);
4638 else
4639 r = 0;
4640
4641 r = sprintf(buf, "%d\n", r);
4642
4643 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4644}
4645
4646static ssize_t
4647rb_simple_write(struct file *filp, const char __user *ubuf,
4648 size_t cnt, loff_t *ppos)
4649{
4650 struct ring_buffer *buffer = filp->private_data;
4651 unsigned long val;
4652 int ret;
4653
4654 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4655 if (ret)
4656 return ret;
4657
4658 if (buffer) {
4659 if (val)
4660 ring_buffer_record_on(buffer);
4661 else
4662 ring_buffer_record_off(buffer);
4663 }
4664
4665 (*ppos)++;
4666
4667 return cnt;
4668}
4669
4670static const struct file_operations rb_simple_fops = {
4671 .open = tracing_open_generic,
4672 .read = rb_simple_read,
4673 .write = rb_simple_write,
4674 .llseek = default_llseek,
4675};
4676
4570static __init int tracer_init_debugfs(void) 4677static __init int tracer_init_debugfs(void)
4571{ 4678{
4572 struct dentry *d_tracer; 4679 struct dentry *d_tracer;
@@ -4626,6 +4733,9 @@ static __init int tracer_init_debugfs(void)
4626 trace_create_file("trace_clock", 0644, d_tracer, NULL, 4733 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4627 &trace_clock_fops); 4734 &trace_clock_fops);
4628 4735
4736 trace_create_file("tracing_on", 0644, d_tracer,
4737 global_trace.buffer, &rb_simple_fops);
4738
4629#ifdef CONFIG_DYNAMIC_FTRACE 4739#ifdef CONFIG_DYNAMIC_FTRACE
4630 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4740 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4631 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4741 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4798,6 +4908,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4798 if (ret != TRACE_TYPE_NO_CONSUME) 4908 if (ret != TRACE_TYPE_NO_CONSUME)
4799 trace_consume(&iter); 4909 trace_consume(&iter);
4800 } 4910 }
4911 touch_nmi_watchdog();
4801 4912
4802 trace_printk_seq(&iter.seq); 4913 trace_printk_seq(&iter.seq);
4803 } 4914 }
@@ -4863,6 +4974,8 @@ __init static int tracer_alloc_buffers(void)
4863 goto out_free_cpumask; 4974 goto out_free_cpumask;
4864 } 4975 }
4865 global_trace.entries = ring_buffer_size(global_trace.buffer); 4976 global_trace.entries = ring_buffer_size(global_trace.buffer);
4977 if (global_trace.buffer_disabled)
4978 tracing_off();
4866 4979
4867 4980
4868#ifdef CONFIG_TRACER_MAX_TRACE 4981#ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 54faec790bc1..95059f091a24 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -154,6 +154,7 @@ struct trace_array {
154 struct ring_buffer *buffer; 154 struct ring_buffer *buffer;
155 unsigned long entries; 155 unsigned long entries;
156 int cpu; 156 int cpu;
157 int buffer_disabled;
157 cycle_t time_start; 158 cycle_t time_start;
158 struct task_struct *waiter; 159 struct task_struct *waiter;
159 struct trace_array_cpu *data[NR_CPUS]; 160 struct trace_array_cpu *data[NR_CPUS];
@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
835 filter) 836 filter)
836#include "trace_entries.h" 837#include "trace_entries.h"
837 838
838#ifdef CONFIG_PERF_EVENTS
839#ifdef CONFIG_FUNCTION_TRACER 839#ifdef CONFIG_FUNCTION_TRACER
840int perf_ftrace_event_register(struct ftrace_event_call *call, 840int perf_ftrace_event_register(struct ftrace_event_call *call,
841 enum trace_reg type, void *data); 841 enum trace_reg type, void *data);
842#else 842#else
843#define perf_ftrace_event_register NULL 843#define perf_ftrace_event_register NULL
844#endif /* CONFIG_FUNCTION_TRACER */ 844#endif /* CONFIG_FUNCTION_TRACER */
845#endif /* CONFIG_PERF_EVENTS */
846 845
847#endif /* _LINUX_KERNEL_TRACE_H */ 846#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index d91eb0541b3a..4108e1250ca2 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -166,6 +166,12 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
166 166
167#define FTRACE_STACK_ENTRIES 8 167#define FTRACE_STACK_ENTRIES 8
168 168
169#ifndef CONFIG_64BIT
170# define IP_FMT "%08lx"
171#else
172# define IP_FMT "%016lx"
173#endif
174
169FTRACE_ENTRY(kernel_stack, stack_entry, 175FTRACE_ENTRY(kernel_stack, stack_entry,
170 176
171 TRACE_STACK, 177 TRACE_STACK,
@@ -175,8 +181,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
175 __dynamic_array(unsigned long, caller ) 181 __dynamic_array(unsigned long, caller )
176 ), 182 ),
177 183
178 F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" 184 F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
179 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", 185 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
186 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
180 __entry->caller[0], __entry->caller[1], __entry->caller[2], 187 __entry->caller[0], __entry->caller[1], __entry->caller[2],
181 __entry->caller[3], __entry->caller[4], __entry->caller[5], 188 __entry->caller[3], __entry->caller[4], __entry->caller[5],
182 __entry->caller[6], __entry->caller[7]), 189 __entry->caller[6], __entry->caller[7]),
@@ -193,8 +200,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
193 __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) 200 __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
194 ), 201 ),
195 202
196 F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" 203 F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
197 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", 204 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
205 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
198 __entry->caller[0], __entry->caller[1], __entry->caller[2], 206 __entry->caller[0], __entry->caller[1], __entry->caller[2],
199 __entry->caller[3], __entry->caller[4], __entry->caller[5], 207 __entry->caller[3], __entry->caller[4], __entry->caller[5],
200 __entry->caller[6], __entry->caller[7]), 208 __entry->caller[6], __entry->caller[7]),
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7b46c9bd22ae..3dd15e8bc856 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -162,7 +162,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
162#define __dynamic_array(type, item) 162#define __dynamic_array(type, item)
163 163
164#undef F_printk 164#undef F_printk
165#define F_printk(fmt, args...) #fmt ", " __stringify(args) 165#define F_printk(fmt, args...) __stringify(fmt) ", " __stringify(args)
166 166
167#undef FTRACE_ENTRY_REG 167#undef FTRACE_ENTRY_REG
168#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ 168#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\