aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-07-23 03:10:29 -0400
committerIngo Molnar <mingo@elte.hu>2010-07-23 03:10:29 -0400
commit3a01736e70a7d629140695ba46a901266b4460cc (patch)
tree49ff8ce1e7c6a267f0ce84b5daddbe6666bc4253
parent4c21adf26f8fcf86a755b9b9f55c2e9fd241e1fb (diff)
parent24a461d537f49f9da6533d83100999ea08c6c755 (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
-rw-r--r--Documentation/trace/ftrace-design.txt153
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/trace/ftrace.h12
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c46
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events.c30
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rwxr-xr-xscripts/recordmcount.pl2
12 files changed, 241 insertions, 33 deletions
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index f1f81afee8a0..dc52bd442c92 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -13,6 +13,9 @@ Note that this focuses on architecture implementation details only. If you
13want more explanation of a feature in terms of common code, review the common 13want more explanation of a feature in terms of common code, review the common
14ftrace.txt file. 14ftrace.txt file.
15 15
16Ideally, everyone who wishes to retain performance while supporting tracing in
17their kernel should make it all the way to dynamic ftrace support.
18
16 19
17Prerequisites 20Prerequisites
18------------- 21-------------
@@ -215,7 +218,7 @@ An arch may pass in a unique value (frame pointer) to both the entering and
215exiting of a function. On exit, the value is compared and if it does not 218exiting of a function. On exit, the value is compared and if it does not
216match, then it will panic the kernel. This is largely a sanity check for bad 219match, then it will panic the kernel. This is largely a sanity check for bad
217code generation with gcc. If gcc for your port sanely updates the frame 220code generation with gcc. If gcc for your port sanely updates the frame
218pointer under different opitmization levels, then ignore this option. 221pointer under different optimization levels, then ignore this option.
219 222
220However, adding support for it isn't terribly difficult. In your assembly code 223However, adding support for it isn't terribly difficult. In your assembly code
221that calls prepare_ftrace_return(), pass the frame pointer as the 3rd argument. 224that calls prepare_ftrace_return(), pass the frame pointer as the 3rd argument.
@@ -234,7 +237,7 @@ If you can't trace NMI functions, then skip this option.
234 237
235 238
236HAVE_SYSCALL_TRACEPOINTS 239HAVE_SYSCALL_TRACEPOINTS
237--------------------- 240------------------------
238 241
239You need very few things to get the syscalls tracing in an arch. 242You need very few things to get the syscalls tracing in an arch.
240 243
@@ -250,12 +253,152 @@ You need very few things to get the syscalls tracing in an arch.
250HAVE_FTRACE_MCOUNT_RECORD 253HAVE_FTRACE_MCOUNT_RECORD
251------------------------- 254-------------------------
252 255
253See scripts/recordmcount.pl for more info. 256See scripts/recordmcount.pl for more info. Just fill in the arch-specific
257details for how to locate the addresses of mcount call sites via objdump.
258This option doesn't make much sense without also implementing dynamic ftrace.
254 259
260
261HAVE_DYNAMIC_FTRACE
262-------------------
263
264You will first need HAVE_FTRACE_MCOUNT_RECORD and HAVE_FUNCTION_TRACER, so
265scroll your reader back up if you got over eager.
266
267Once those are out of the way, you will need to implement:
268 - asm/ftrace.h:
269 - MCOUNT_ADDR
270 - ftrace_call_adjust()
271 - struct dyn_arch_ftrace{}
272 - asm code:
273 - mcount() (new stub)
274 - ftrace_caller()
275 - ftrace_call()
276 - ftrace_stub()
277 - C code:
278 - ftrace_dyn_arch_init()
279 - ftrace_make_nop()
280 - ftrace_make_call()
281 - ftrace_update_ftrace_func()
282
283First you will need to fill out some arch details in your asm/ftrace.h.
284
285Define MCOUNT_ADDR as the address of your mcount symbol similar to:
286 #define MCOUNT_ADDR ((unsigned long)mcount)
287Since no one else will have a decl for that function, you will need to:
288 extern void mcount(void);
289
290You will also need the helper function ftrace_call_adjust(). Most people
291will be able to stub it out like so:
292 static inline unsigned long ftrace_call_adjust(unsigned long addr)
293 {
294 return addr;
295 }
255<details to be filled> 296<details to be filled>
256 297
298Lastly you will need the custom dyn_arch_ftrace structure. If you need
299some extra state when runtime patching arbitrary call sites, this is the
300place. For now though, create an empty struct:
301 struct dyn_arch_ftrace {
302 /* No extra data needed */
303 };
304
305With the header out of the way, we can fill out the assembly code. While we
306did already create a mcount() function earlier, dynamic ftrace only wants a
307stub function. This is because the mcount() will only be used during boot
308and then all references to it will be patched out never to return. Instead,
309the guts of the old mcount() will be used to create a new ftrace_caller()
310function. Because the two are hard to merge, it will most likely be a lot
311easier to have two separate definitions split up by #ifdefs. Same goes for
312the ftrace_stub() as that will now be inlined in ftrace_caller().
313
314Before we get confused anymore, let's check out some pseudo code so you can
315implement your own stuff in assembly:
257 316
258HAVE_DYNAMIC_FTRACE 317void mcount(void)
259--------------------- 318{
319 return;
320}
321
322void ftrace_caller(void)
323{
324 /* implement HAVE_FUNCTION_TRACE_MCOUNT_TEST if you desire */
325
326 /* save all state needed by the ABI (see paragraph above) */
327
328 unsigned long frompc = ...;
329 unsigned long selfpc = <return address> - MCOUNT_INSN_SIZE;
330
331ftrace_call:
332 ftrace_stub(frompc, selfpc);
333
334 /* restore all state needed by the ABI */
335
336ftrace_stub:
337 return;
338}
339
340This might look a little odd at first, but keep in mind that we will be runtime
341patching multiple things. First, only functions that we actually want to trace
342will be patched to call ftrace_caller(). Second, since we only have one tracer
343active at a time, we will patch the ftrace_caller() function itself to call the
344specific tracer in question. That is the point of the ftrace_call label.
345
346With that in mind, let's move on to the C code that will actually be doing the
347runtime patching. You'll need a little knowledge of your arch's opcodes in
348order to make it through the next section.
349
350Every arch has an init callback function. If you need to do something early on
351to initialize some state, this is the time to do that. Otherwise, this simple
352function below should be sufficient for most people:
353
354int __init ftrace_dyn_arch_init(void *data)
355{
356 /* return value is done indirectly via data */
357 *(unsigned long *)data = 0;
358
359 return 0;
360}
361
362There are two functions that are used to do runtime patching of arbitrary
363functions. The first is used to turn the mcount call site into a nop (which
364is what helps us retain runtime performance when not tracing). The second is
365used to turn the mcount call site into a call to an arbitrary location (but
366typically that is ftracer_caller()). See the general function definition in
367linux/ftrace.h for the functions:
368 ftrace_make_nop()
369 ftrace_make_call()
370The rec->ip value is the address of the mcount call site that was collected
371by the scripts/recordmcount.pl during build time.
372
373The last function is used to do runtime patching of the active tracer. This
374will be modifying the assembly code at the location of the ftrace_call symbol
375inside of the ftrace_caller() function. So you should have sufficient padding
376at that location to support the new function calls you'll be inserting. Some
377people will be using a "call" type instruction while others will be using a
378"branch" type instruction. Specifically, the function is:
379 ftrace_update_ftrace_func()
380
381
382HAVE_DYNAMIC_FTRACE + HAVE_FUNCTION_GRAPH_TRACER
383------------------------------------------------
384
385The function grapher needs a few tweaks in order to work with dynamic ftrace.
386Basically, you will need to:
387 - update:
388 - ftrace_caller()
389 - ftrace_graph_call()
390 - ftrace_graph_caller()
391 - implement:
392 - ftrace_enable_ftrace_graph_caller()
393 - ftrace_disable_ftrace_graph_caller()
260 394
261<details to be filled> 395<details to be filled>
396Quick notes:
397 - add a nop stub after the ftrace_call location named ftrace_graph_call;
398 stub needs to be large enough to support a call to ftrace_graph_caller()
399 - update ftrace_graph_caller() to work with being called by the new
400 ftrace_caller() since some semantics may have changed
401 - ftrace_enable_ftrace_graph_caller() will runtime patch the
402 ftrace_graph_call location with a call to ftrace_graph_caller()
403 - ftrace_disable_ftrace_graph_caller() will runtime patch the
404 ftrace_graph_call location with nops
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 41e46330d9be..dcd6a7c3a435 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,3 +1,8 @@
1/*
2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */
5
1#ifndef _LINUX_FTRACE_H 6#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 7#define _LINUX_FTRACE_H
3 8
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 01df7ca4ead7..02b8b24f8f51 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -11,8 +11,6 @@ struct trace_array;
11struct tracer; 11struct tracer;
12struct dentry; 12struct dentry;
13 13
14DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
15
16struct trace_print_flags { 14struct trace_print_flags {
17 unsigned long mask; 15 unsigned long mask;
18 const char *name; 16 const char *name;
@@ -58,6 +56,9 @@ struct trace_iterator {
58 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 56 struct ring_buffer_iter *buffer_iter[NR_CPUS];
59 unsigned long iter_flags; 57 unsigned long iter_flags;
60 58
59 /* trace_seq for __print_flags() and __print_symbolic() etc. */
60 struct trace_seq tmp_seq;
61
61 /* The below is zeroed out in pipe_read */ 62 /* The below is zeroed out in pipe_read */
62 struct trace_seq seq; 63 struct trace_seq seq;
63 struct trace_entry *ent; 64 struct trace_entry *ent;
@@ -152,11 +153,13 @@ extern int ftrace_event_reg(struct ftrace_event_call *event,
152enum { 153enum {
153 TRACE_EVENT_FL_ENABLED_BIT, 154 TRACE_EVENT_FL_ENABLED_BIT,
154 TRACE_EVENT_FL_FILTERED_BIT, 155 TRACE_EVENT_FL_FILTERED_BIT,
156 TRACE_EVENT_FL_RECORDED_CMD_BIT,
155}; 157};
156 158
157enum { 159enum {
158 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), 160 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
159 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 161 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
162 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
160}; 163};
161 164
162struct ftrace_event_call { 165struct ftrace_event_call {
@@ -174,6 +177,7 @@ struct ftrace_event_call {
174 * 32 bit flags: 177 * 32 bit flags:
175 * bit 1: enabled 178 * bit 1: enabled
176 * bit 2: filter_active 179 * bit 2: filter_active
180 * bit 3: enabled cmd record
177 * 181 *
178 * Changes to flags must hold the event_mutex. 182 * Changes to flags must hold the event_mutex.
179 * 183 *
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 55c1fd1bbc3d..fb783d94fc54 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -145,7 +145,7 @@
145 * struct trace_seq *s = &iter->seq; 145 * struct trace_seq *s = &iter->seq;
146 * struct ftrace_raw_<call> *field; <-- defined in stage 1 146 * struct ftrace_raw_<call> *field; <-- defined in stage 1
147 * struct trace_entry *entry; 147 * struct trace_entry *entry;
148 * struct trace_seq *p; 148 * struct trace_seq *p = &iter->tmp_seq;
149 * int ret; 149 * int ret;
150 * 150 *
151 * entry = iter->ent; 151 * entry = iter->ent;
@@ -157,12 +157,10 @@
157 * 157 *
158 * field = (typeof(field))entry; 158 * field = (typeof(field))entry;
159 * 159 *
160 * p = &get_cpu_var(ftrace_event_seq);
161 * trace_seq_init(p); 160 * trace_seq_init(p);
162 * ret = trace_seq_printf(s, "%s: ", <call>); 161 * ret = trace_seq_printf(s, "%s: ", <call>);
163 * if (ret) 162 * if (ret)
164 * ret = trace_seq_printf(s, <TP_printk> "\n"); 163 * ret = trace_seq_printf(s, <TP_printk> "\n");
165 * put_cpu();
166 * if (!ret) 164 * if (!ret)
167 * return TRACE_TYPE_PARTIAL_LINE; 165 * return TRACE_TYPE_PARTIAL_LINE;
168 * 166 *
@@ -216,7 +214,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
216 struct trace_seq *s = &iter->seq; \ 214 struct trace_seq *s = &iter->seq; \
217 struct ftrace_raw_##call *field; \ 215 struct ftrace_raw_##call *field; \
218 struct trace_entry *entry; \ 216 struct trace_entry *entry; \
219 struct trace_seq *p; \ 217 struct trace_seq *p = &iter->tmp_seq; \
220 int ret; \ 218 int ret; \
221 \ 219 \
222 event = container_of(trace_event, struct ftrace_event_call, \ 220 event = container_of(trace_event, struct ftrace_event_call, \
@@ -231,12 +229,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
231 \ 229 \
232 field = (typeof(field))entry; \ 230 field = (typeof(field))entry; \
233 \ 231 \
234 p = &get_cpu_var(ftrace_event_seq); \
235 trace_seq_init(p); \ 232 trace_seq_init(p); \
236 ret = trace_seq_printf(s, "%s: ", event->name); \ 233 ret = trace_seq_printf(s, "%s: ", event->name); \
237 if (ret) \ 234 if (ret) \
238 ret = trace_seq_printf(s, print); \ 235 ret = trace_seq_printf(s, print); \
239 put_cpu(); \
240 if (!ret) \ 236 if (!ret) \
241 return TRACE_TYPE_PARTIAL_LINE; \ 237 return TRACE_TYPE_PARTIAL_LINE; \
242 \ 238 \
@@ -255,7 +251,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
255 struct trace_seq *s = &iter->seq; \ 251 struct trace_seq *s = &iter->seq; \
256 struct ftrace_raw_##template *field; \ 252 struct ftrace_raw_##template *field; \
257 struct trace_entry *entry; \ 253 struct trace_entry *entry; \
258 struct trace_seq *p; \ 254 struct trace_seq *p = &iter->tmp_seq; \
259 int ret; \ 255 int ret; \
260 \ 256 \
261 entry = iter->ent; \ 257 entry = iter->ent; \
@@ -267,12 +263,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
267 \ 263 \
268 field = (typeof(field))entry; \ 264 field = (typeof(field))entry; \
269 \ 265 \
270 p = &get_cpu_var(ftrace_event_seq); \
271 trace_seq_init(p); \ 266 trace_seq_init(p); \
272 ret = trace_seq_printf(s, "%s: ", #call); \ 267 ret = trace_seq_printf(s, "%s: ", #call); \
273 if (ret) \ 268 if (ret) \
274 ret = trace_seq_printf(s, print); \ 269 ret = trace_seq_printf(s, print); \
275 put_cpu(); \
276 if (!ret) \ 270 if (!ret) \
277 return TRACE_TYPE_PARTIAL_LINE; \ 271 return TRACE_TYPE_PARTIAL_LINE; \
278 \ 272 \
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 28d0615a513f..3632ce87674f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -443,6 +443,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
443 */ 443 */
444struct ring_buffer_per_cpu { 444struct ring_buffer_per_cpu {
445 int cpu; 445 int cpu;
446 atomic_t record_disabled;
446 struct ring_buffer *buffer; 447 struct ring_buffer *buffer;
447 spinlock_t reader_lock; /* serialize readers */ 448 spinlock_t reader_lock; /* serialize readers */
448 arch_spinlock_t lock; 449 arch_spinlock_t lock;
@@ -462,7 +463,6 @@ struct ring_buffer_per_cpu {
462 unsigned long read; 463 unsigned long read;
463 u64 write_stamp; 464 u64 write_stamp;
464 u64 read_stamp; 465 u64 read_stamp;
465 atomic_t record_disabled;
466}; 466};
467 467
468struct ring_buffer { 468struct ring_buffer {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c1752dac613e..4b1122d0df37 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -344,7 +344,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
344/* trace_flags holds trace_options default values */ 344/* trace_flags holds trace_options default values */
345unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 345unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
347 TRACE_ITER_GRAPH_TIME; 347 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
348 348
349static int trace_stop_count; 349static int trace_stop_count;
350static DEFINE_SPINLOCK(tracing_start_lock); 350static DEFINE_SPINLOCK(tracing_start_lock);
@@ -428,6 +428,7 @@ static const char *trace_options[] = {
428 "latency-format", 428 "latency-format",
429 "sleep-time", 429 "sleep-time",
430 "graph-time", 430 "graph-time",
431 "record-cmd",
431 NULL 432 NULL
432}; 433};
433 434
@@ -659,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
659 return; 660 return;
660 661
661 WARN_ON_ONCE(!irqs_disabled()); 662 WARN_ON_ONCE(!irqs_disabled());
663 if (!current_trace->use_max_tr) {
664 WARN_ON_ONCE(1);
665 return;
666 }
662 arch_spin_lock(&ftrace_max_lock); 667 arch_spin_lock(&ftrace_max_lock);
663 668
664 tr->buffer = max_tr.buffer; 669 tr->buffer = max_tr.buffer;
@@ -685,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
685 return; 690 return;
686 691
687 WARN_ON_ONCE(!irqs_disabled()); 692 WARN_ON_ONCE(!irqs_disabled());
693 if (!current_trace->use_max_tr) {
694 WARN_ON_ONCE(1);
695 return;
696 }
697
688 arch_spin_lock(&ftrace_max_lock); 698 arch_spin_lock(&ftrace_max_lock);
689 699
690 ftrace_disable_cpu(); 700 ftrace_disable_cpu();
@@ -729,7 +739,7 @@ __acquires(kernel_lock)
729 return -1; 739 return -1;
730 } 740 }
731 741
732 if (strlen(type->name) > MAX_TRACER_SIZE) { 742 if (strlen(type->name) >= MAX_TRACER_SIZE) {
733 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 743 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
734 return -1; 744 return -1;
735 } 745 }
@@ -2508,6 +2518,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2508 trace_flags |= mask; 2518 trace_flags |= mask;
2509 else 2519 else
2510 trace_flags &= ~mask; 2520 trace_flags &= ~mask;
2521
2522 if (mask == TRACE_ITER_RECORD_CMD)
2523 trace_event_enable_cmd_record(enabled);
2511} 2524}
2512 2525
2513static ssize_t 2526static ssize_t
@@ -2746,6 +2759,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
2746 if (ret < 0) 2759 if (ret < 0)
2747 return ret; 2760 return ret;
2748 2761
2762 if (!current_trace->use_max_tr)
2763 goto out;
2764
2749 ret = ring_buffer_resize(max_tr.buffer, size); 2765 ret = ring_buffer_resize(max_tr.buffer, size);
2750 if (ret < 0) { 2766 if (ret < 0) {
2751 int r; 2767 int r;
@@ -2773,11 +2789,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
2773 return ret; 2789 return ret;
2774 } 2790 }
2775 2791
2792 max_tr.entries = size;
2793 out:
2776 global_trace.entries = size; 2794 global_trace.entries = size;
2777 2795
2778 return ret; 2796 return ret;
2779} 2797}
2780 2798
2799
2781/** 2800/**
2782 * tracing_update_buffers - used by tracing facility to expand ring buffers 2801 * tracing_update_buffers - used by tracing facility to expand ring buffers
2783 * 2802 *
@@ -2838,12 +2857,26 @@ static int tracing_set_tracer(const char *buf)
2838 trace_branch_disable(); 2857 trace_branch_disable();
2839 if (current_trace && current_trace->reset) 2858 if (current_trace && current_trace->reset)
2840 current_trace->reset(tr); 2859 current_trace->reset(tr);
2841 2860 if (current_trace && current_trace->use_max_tr) {
2861 /*
2862 * We don't free the ring buffer. instead, resize it because
2863 * The max_tr ring buffer has some state (e.g. ring->clock) and
2864 * we want preserve it.
2865 */
2866 ring_buffer_resize(max_tr.buffer, 1);
2867 max_tr.entries = 1;
2868 }
2842 destroy_trace_option_files(topts); 2869 destroy_trace_option_files(topts);
2843 2870
2844 current_trace = t; 2871 current_trace = t;
2845 2872
2846 topts = create_trace_option_files(current_trace); 2873 topts = create_trace_option_files(current_trace);
2874 if (current_trace->use_max_tr) {
2875 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
2876 if (ret < 0)
2877 goto out;
2878 max_tr.entries = global_trace.entries;
2879 }
2847 2880
2848 if (t->init) { 2881 if (t->init) {
2849 ret = tracer_init(t, tr); 2882 ret = tracer_init(t, tr);
@@ -3426,7 +3459,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3426 } 3459 }
3427 3460
3428 tracing_start(); 3461 tracing_start();
3429 max_tr.entries = global_trace.entries;
3430 mutex_unlock(&trace_types_lock); 3462 mutex_unlock(&trace_types_lock);
3431 3463
3432 return cnt; 3464 return cnt;
@@ -4531,16 +4563,14 @@ __init static int tracer_alloc_buffers(void)
4531 4563
4532 4564
4533#ifdef CONFIG_TRACER_MAX_TRACE 4565#ifdef CONFIG_TRACER_MAX_TRACE
4534 max_tr.buffer = ring_buffer_alloc(ring_buf_size, 4566 max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
4535 TRACE_BUFFER_FLAGS);
4536 if (!max_tr.buffer) { 4567 if (!max_tr.buffer) {
4537 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4568 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4538 WARN_ON(1); 4569 WARN_ON(1);
4539 ring_buffer_free(global_trace.buffer); 4570 ring_buffer_free(global_trace.buffer);
4540 goto out_free_cpumask; 4571 goto out_free_cpumask;
4541 } 4572 }
4542 max_tr.entries = ring_buffer_size(max_tr.buffer); 4573 max_tr.entries = 1;
4543 WARN_ON(max_tr.entries != global_trace.entries);
4544#endif 4574#endif
4545 4575
4546 /* Allocate the first page for all buffers */ 4576 /* Allocate the first page for all buffers */
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 638a5887e2ec..d05c873dd4b2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -274,6 +274,7 @@ struct tracer {
274 struct tracer *next; 274 struct tracer *next;
275 int print_max; 275 int print_max;
276 struct tracer_flags *flags; 276 struct tracer_flags *flags;
277 int use_max_tr;
277}; 278};
278 279
279 280
@@ -581,6 +582,7 @@ enum trace_iterator_flags {
581 TRACE_ITER_LATENCY_FMT = 0x20000, 582 TRACE_ITER_LATENCY_FMT = 0x20000,
582 TRACE_ITER_SLEEP_TIME = 0x40000, 583 TRACE_ITER_SLEEP_TIME = 0x40000,
583 TRACE_ITER_GRAPH_TIME = 0x80000, 584 TRACE_ITER_GRAPH_TIME = 0x80000,
585 TRACE_ITER_RECORD_CMD = 0x100000,
584}; 586};
585 587
586/* 588/*
@@ -713,6 +715,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
713 return 0; 715 return 0;
714} 716}
715 717
718extern void trace_event_enable_cmd_record(bool enable);
719
716extern struct mutex event_mutex; 720extern struct mutex event_mutex;
717extern struct list_head ftrace_events; 721extern struct list_head ftrace_events;
718 722
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e8e6043f4d29..09b4fa6e4d3b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -170,6 +170,26 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
170} 170}
171EXPORT_SYMBOL_GPL(ftrace_event_reg); 171EXPORT_SYMBOL_GPL(ftrace_event_reg);
172 172
173void trace_event_enable_cmd_record(bool enable)
174{
175 struct ftrace_event_call *call;
176
177 mutex_lock(&event_mutex);
178 list_for_each_entry(call, &ftrace_events, list) {
179 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
180 continue;
181
182 if (enable) {
183 tracing_start_cmdline_record();
184 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
185 } else {
186 tracing_stop_cmdline_record();
187 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
188 }
189 }
190 mutex_unlock(&event_mutex);
191}
192
173static int ftrace_event_enable_disable(struct ftrace_event_call *call, 193static int ftrace_event_enable_disable(struct ftrace_event_call *call,
174 int enable) 194 int enable)
175{ 195{
@@ -179,13 +199,19 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
179 case 0: 199 case 0:
180 if (call->flags & TRACE_EVENT_FL_ENABLED) { 200 if (call->flags & TRACE_EVENT_FL_ENABLED) {
181 call->flags &= ~TRACE_EVENT_FL_ENABLED; 201 call->flags &= ~TRACE_EVENT_FL_ENABLED;
182 tracing_stop_cmdline_record(); 202 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
203 tracing_stop_cmdline_record();
204 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
205 }
183 call->class->reg(call, TRACE_REG_UNREGISTER); 206 call->class->reg(call, TRACE_REG_UNREGISTER);
184 } 207 }
185 break; 208 break;
186 case 1: 209 case 1:
187 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { 210 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
188 tracing_start_cmdline_record(); 211 if (trace_flags & TRACE_ITER_RECORD_CMD) {
212 tracing_start_cmdline_record();
213 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
214 }
189 ret = call->class->reg(call, TRACE_REG_REGISTER); 215 ret = call->class->reg(call, TRACE_REG_REGISTER);
190 if (ret) { 216 if (ret) {
191 tracing_stop_cmdline_record(); 217 tracing_stop_cmdline_record();
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 6fd486e0cef4..73a6b0601f2e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly =
649#endif 649#endif
650 .open = irqsoff_trace_open, 650 .open = irqsoff_trace_open,
651 .close = irqsoff_trace_close, 651 .close = irqsoff_trace_close,
652 .use_max_tr = 1,
652}; 653};
653# define register_irqsoff(trace) register_tracer(&trace) 654# define register_irqsoff(trace) register_tracer(&trace)
654#else 655#else
@@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly =
681#endif 682#endif
682 .open = irqsoff_trace_open, 683 .open = irqsoff_trace_open,
683 .close = irqsoff_trace_close, 684 .close = irqsoff_trace_close,
685 .use_max_tr = 1,
684}; 686};
685# define register_preemptoff(trace) register_tracer(&trace) 687# define register_preemptoff(trace) register_tracer(&trace)
686#else 688#else
@@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
715#endif 717#endif
716 .open = irqsoff_trace_open, 718 .open = irqsoff_trace_open,
717 .close = irqsoff_trace_close, 719 .close = irqsoff_trace_close,
720 .use_max_tr = 1,
718}; 721};
719 722
720# define register_preemptirqsoff(trace) register_tracer(&trace) 723# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a46197b80b7f..02272baa2206 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -16,9 +16,6 @@
16 16
17DECLARE_RWSEM(trace_event_mutex); 17DECLARE_RWSEM(trace_event_mutex);
18 18
19DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 19static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23 20
24static int next_event_type = __TRACE_LAST_TYPE + 1; 21static int next_event_type = __TRACE_LAST_TYPE + 1;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index c9fd5bd02036..4086eae6e81b 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -382,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly =
382#ifdef CONFIG_FTRACE_SELFTEST 382#ifdef CONFIG_FTRACE_SELFTEST
383 .selftest = trace_selftest_startup_wakeup, 383 .selftest = trace_selftest_startup_wakeup,
384#endif 384#endif
385 .use_max_tr = 1,
385}; 386};
386 387
387static struct tracer wakeup_rt_tracer __read_mostly = 388static struct tracer wakeup_rt_tracer __read_mostly =
@@ -396,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
396#ifdef CONFIG_FTRACE_SELFTEST 397#ifdef CONFIG_FTRACE_SELFTEST
397 .selftest = trace_selftest_startup_wakeup, 398 .selftest = trace_selftest_startup_wakeup,
398#endif 399#endif
400 .use_max_tr = 1,
399}; 401};
400 402
401__init static int init_wakeup_tracer(void) 403__init static int init_wakeup_tracer(void)
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index f3c9c0a90b98..0171060b5fd6 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -326,7 +326,7 @@ if ($arch eq "x86_64") {
326 # 14: R_MIPS_NONE *ABS* 326 # 14: R_MIPS_NONE *ABS*
327 # 18: 00020021 nop 327 # 18: 00020021 nop
328 if ($is_module eq "0") { 328 if ($is_module eq "0") {
329 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; 329 $mcount_regex = "^\\s*([0-9a-fA-F]+): R_MIPS_26\\s+_mcount\$";
330 } else { 330 } else {
331 $mcount_regex = "^\\s*([0-9a-fA-F]+): R_MIPS_HI16\\s+_mcount\$"; 331 $mcount_regex = "^\\s*([0-9a-fA-F]+): R_MIPS_HI16\\s+_mcount\$";
332 } 332 }