aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/trace/ftrace-design.txt13
-rw-r--r--arch/s390/kernel/ftrace.c67
-rw-r--r--arch/x86/kernel/entry_32.S7
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/ftrace.c84
-rw-r--r--arch/x86/mm/testmmiotrace.c29
-rw-r--r--include/linux/smp_lock.h21
-rw-r--r--include/trace/events/bkl.h61
-rw-r--r--include/trace/syscall.h2
-rw-r--r--kernel/trace/ftrace.c370
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace.h44
-rw-r--r--kernel/trace/trace_events.c23
-rw-r--r--kernel/trace/trace_events_filter.c155
-rw-r--r--kernel/trace/trace_syscalls.c86
-rw-r--r--lib/kernel_lock.c20
-rwxr-xr-xscripts/recordmcount.pl1
18 files changed, 639 insertions, 361 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9107b387e91..c8d1b2be28e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -779,6 +779,13 @@ and is between 256 and 4096 characters. It is defined in the file
779 by the set_ftrace_notrace file in the debugfs 779 by the set_ftrace_notrace file in the debugfs
780 tracing directory. 780 tracing directory.
781 781
782 ftrace_graph_filter=[function-list]
783 [FTRACE] Limit the top level callers functions traced
784 by the function graph tracer at boot up.
785 function-list is a comma separated list of functions
786 that can be changed at run time by the
787 set_graph_function file in the debugfs tracing directory.
788
782 gamecon.map[2|3]= 789 gamecon.map[2|3]=
783 [HW,JOY] Multisystem joystick and NES/SNES/PSX pad 790 [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
784 support via parallel port (up to 5 devices per port) 791 support via parallel port (up to 5 devices per port)
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index 7003e10f10f..641a1ef2a7f 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
213<details to be filled> 213<details to be filled>
214 214
215 215
216HAVE_FTRACE_SYSCALLS 216HAVE_SYSCALL_TRACEPOINTS
217--------------------- 217---------------------
218 218
219<details to be filled> 219You need very few things to get the syscalls tracing in an arch.
220
221- Have a NR_syscalls variable in <asm/unistd.h> that provides the number
222 of syscalls supported by the arch.
223- Implement arch_syscall_addr() that resolves a syscall address from a
224 syscall number.
225- Support the TIF_SYSCALL_TRACEPOINT thread flags
226- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
227 in the ptrace syscalls tracing path.
228- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
220 229
221 230
222HAVE_FTRACE_MCOUNT_RECORD 231HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index f5fe34dd821..5a82bc68193 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -203,73 +203,10 @@ out:
203 203
204#ifdef CONFIG_FTRACE_SYSCALLS 204#ifdef CONFIG_FTRACE_SYSCALLS
205 205
206extern unsigned long __start_syscalls_metadata[];
207extern unsigned long __stop_syscalls_metadata[];
208extern unsigned int sys_call_table[]; 206extern unsigned int sys_call_table[];
209 207
210static struct syscall_metadata **syscalls_metadata; 208unsigned long __init arch_syscall_addr(int nr)
211
212struct syscall_metadata *syscall_nr_to_meta(int nr)
213{
214 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
215 return NULL;
216
217 return syscalls_metadata[nr];
218}
219
220int syscall_name_to_nr(char *name)
221{
222 int i;
223
224 if (!syscalls_metadata)
225 return -1;
226 for (i = 0; i < NR_syscalls; i++)
227 if (syscalls_metadata[i])
228 if (!strcmp(syscalls_metadata[i]->name, name))
229 return i;
230 return -1;
231}
232
233void set_syscall_enter_id(int num, int id)
234{
235 syscalls_metadata[num]->enter_id = id;
236}
237
238void set_syscall_exit_id(int num, int id)
239{ 209{
240 syscalls_metadata[num]->exit_id = id; 210 return (unsigned long)sys_call_table[nr];
241}
242
243static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
244{
245 struct syscall_metadata *start;
246 struct syscall_metadata *stop;
247 char str[KSYM_SYMBOL_LEN];
248
249 start = (struct syscall_metadata *)__start_syscalls_metadata;
250 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
251 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
252
253 for ( ; start < stop; start++) {
254 if (start->name && !strcmp(start->name + 3, str + 3))
255 return start;
256 }
257 return NULL;
258}
259
260static int __init arch_init_ftrace_syscalls(void)
261{
262 struct syscall_metadata *meta;
263 int i;
264 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
265 GFP_KERNEL);
266 if (!syscalls_metadata)
267 return -ENOMEM;
268 for (i = 0; i < NR_syscalls; i++) {
269 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
270 syscalls_metadata[i] = meta;
271 }
272 return 0;
273} 211}
274arch_initcall(arch_init_ftrace_syscalls);
275#endif 212#endif
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index c097e7d607c..7d52e9da5e0 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1185,17 +1185,14 @@ END(ftrace_graph_caller)
1185 1185
1186.globl return_to_handler 1186.globl return_to_handler
1187return_to_handler: 1187return_to_handler:
1188 pushl $0
1189 pushl %eax 1188 pushl %eax
1190 pushl %ecx
1191 pushl %edx 1189 pushl %edx
1192 movl %ebp, %eax 1190 movl %ebp, %eax
1193 call ftrace_return_to_handler 1191 call ftrace_return_to_handler
1194 movl %eax, 0xc(%esp) 1192 movl %eax, %ecx
1195 popl %edx 1193 popl %edx
1196 popl %ecx
1197 popl %eax 1194 popl %eax
1198 ret 1195 jmp *%ecx
1199#endif 1196#endif
1200 1197
1201.section .rodata,"a" 1198.section .rodata,"a"
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b5c061f8f35..bd5bbddddf9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -155,11 +155,11 @@ GLOBAL(return_to_handler)
155 155
156 call ftrace_return_to_handler 156 call ftrace_return_to_handler
157 157
158 movq %rax, 16(%rsp) 158 movq %rax, %rdi
159 movq 8(%rsp), %rdx 159 movq 8(%rsp), %rdx
160 movq (%rsp), %rax 160 movq (%rsp), %rax
161 addq $16, %rsp 161 addq $24, %rsp
162 retq 162 jmp *%rdi
163#endif 163#endif
164 164
165 165
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 9dbb527e165..5a1b9758fd6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -9,6 +9,8 @@
9 * the dangers of modifying code on the run. 9 * the dangers of modifying code on the run.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/spinlock.h> 14#include <linux/spinlock.h>
13#include <linux/hardirq.h> 15#include <linux/hardirq.h>
14#include <linux/uaccess.h> 16#include <linux/uaccess.h>
@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data)
336 338
337 switch (faulted) { 339 switch (faulted) {
338 case 0: 340 case 0:
339 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); 341 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
340 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); 342 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
341 break; 343 break;
342 case 1: 344 case 1:
343 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); 345 pr_info("converting mcount calls to 66 66 66 66 90\n");
344 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); 346 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
345 break; 347 break;
346 case 2: 348 case 2:
347 pr_info("ftrace: converting mcount calls to jmp . + 5\n"); 349 pr_info("converting mcount calls to jmp . + 5\n");
348 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); 350 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
349 break; 351 break;
350 } 352 }
@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
468 470
469#ifdef CONFIG_FTRACE_SYSCALLS 471#ifdef CONFIG_FTRACE_SYSCALLS
470 472
471extern unsigned long __start_syscalls_metadata[];
472extern unsigned long __stop_syscalls_metadata[];
473extern unsigned long *sys_call_table; 473extern unsigned long *sys_call_table;
474 474
475static struct syscall_metadata **syscalls_metadata; 475unsigned long __init arch_syscall_addr(int nr)
476
477static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
478{
479 struct syscall_metadata *start;
480 struct syscall_metadata *stop;
481 char str[KSYM_SYMBOL_LEN];
482
483
484 start = (struct syscall_metadata *)__start_syscalls_metadata;
485 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
486 kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
487
488 for ( ; start < stop; start++) {
489 if (start->name && !strcmp(start->name, str))
490 return start;
491 }
492 return NULL;
493}
494
495struct syscall_metadata *syscall_nr_to_meta(int nr)
496{
497 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
498 return NULL;
499
500 return syscalls_metadata[nr];
501}
502
503int syscall_name_to_nr(char *name)
504{ 476{
505 int i; 477 return (unsigned long)(&sys_call_table)[nr];
506
507 if (!syscalls_metadata)
508 return -1;
509
510 for (i = 0; i < NR_syscalls; i++) {
511 if (syscalls_metadata[i]) {
512 if (!strcmp(syscalls_metadata[i]->name, name))
513 return i;
514 }
515 }
516 return -1;
517}
518
519void set_syscall_enter_id(int num, int id)
520{
521 syscalls_metadata[num]->enter_id = id;
522}
523
524void set_syscall_exit_id(int num, int id)
525{
526 syscalls_metadata[num]->exit_id = id;
527}
528
529static int __init arch_init_ftrace_syscalls(void)
530{
531 int i;
532 struct syscall_metadata *meta;
533 unsigned long **psys_syscall_table = &sys_call_table;
534
535 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
536 NR_syscalls, GFP_KERNEL);
537 if (!syscalls_metadata) {
538 WARN_ON(1);
539 return -ENOMEM;
540 }
541
542 for (i = 0; i < NR_syscalls; i++) {
543 meta = find_syscall_meta(psys_syscall_table[i]);
544 syscalls_metadata[i] = meta;
545 }
546 return 0;
547} 478}
548arch_initcall(arch_init_ftrace_syscalls);
549#endif 479#endif
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index 427fd1b56df..8565d944f7c 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi> 2 * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
3 */ 3 */
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
4#include <linux/module.h> 7#include <linux/module.h>
5#include <linux/io.h> 8#include <linux/io.h>
6#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
7 10
8#define MODULE_NAME "testmmiotrace"
9
10static unsigned long mmio_address; 11static unsigned long mmio_address;
11module_param(mmio_address, ulong, 0); 12module_param(mmio_address, ulong, 0);
12MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " 13MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
@@ -30,7 +31,7 @@ static unsigned v32(unsigned i)
30static void do_write_test(void __iomem *p) 31static void do_write_test(void __iomem *p)
31{ 32{
32 unsigned int i; 33 unsigned int i;
33 pr_info(MODULE_NAME ": write test.\n"); 34 pr_info("write test.\n");
34 mmiotrace_printk("Write test.\n"); 35 mmiotrace_printk("Write test.\n");
35 36
36 for (i = 0; i < 256; i++) 37 for (i = 0; i < 256; i++)
@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p)
47{ 48{
48 unsigned int i; 49 unsigned int i;
49 unsigned errs[3] = { 0 }; 50 unsigned errs[3] = { 0 };
50 pr_info(MODULE_NAME ": read test.\n"); 51 pr_info("read test.\n");
51 mmiotrace_printk("Read test.\n"); 52 mmiotrace_printk("Read test.\n");
52 53
53 for (i = 0; i < 256; i++) 54 for (i = 0; i < 256; i++)
@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p)
68 69
69static void do_read_far_test(void __iomem *p) 70static void do_read_far_test(void __iomem *p)
70{ 71{
71 pr_info(MODULE_NAME ": read far test.\n"); 72 pr_info("read far test.\n");
72 mmiotrace_printk("Read far test.\n"); 73 mmiotrace_printk("Read far test.\n");
73 74
74 ioread32(p + read_far); 75 ioread32(p + read_far);
@@ -78,7 +79,7 @@ static void do_test(unsigned long size)
78{ 79{
79 void __iomem *p = ioremap_nocache(mmio_address, size); 80 void __iomem *p = ioremap_nocache(mmio_address, size);
80 if (!p) { 81 if (!p) {
81 pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); 82 pr_err("could not ioremap, aborting.\n");
82 return; 83 return;
83 } 84 }
84 mmiotrace_printk("ioremap returned %p.\n", p); 85 mmiotrace_printk("ioremap returned %p.\n", p);
@@ -94,24 +95,22 @@ static int __init init(void)
94 unsigned long size = (read_far) ? (8 << 20) : (16 << 10); 95 unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
95 96
96 if (mmio_address == 0) { 97 if (mmio_address == 0) {
97 pr_err(MODULE_NAME ": you have to use the module argument " 98 pr_err("you have to use the module argument mmio_address.\n");
98 "mmio_address.\n"); 99 pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n");
99 pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
100 " YOU REALLY KNOW WHAT YOU ARE DOING!\n");
101 return -ENXIO; 100 return -ENXIO;
102 } 101 }
103 102
104 pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI " 103 pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
105 "address space, and writing 16 kB of rubbish in there.\n", 104 "and writing 16 kB of rubbish in there.\n",
106 size >> 10, mmio_address); 105 size >> 10, mmio_address);
107 do_test(size); 106 do_test(size);
108 pr_info(MODULE_NAME ": All done.\n"); 107 pr_info("All done.\n");
109 return 0; 108 return 0;
110} 109}
111 110
112static void __exit cleanup(void) 111static void __exit cleanup(void)
113{ 112{
114 pr_debug(MODULE_NAME ": unloaded.\n"); 113 pr_debug("unloaded.\n");
115} 114}
116 115
117module_init(init); 116module_init(init);
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 813be59bf34..2ea1dd1ba21 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
24 return 0; 24 return 0;
25} 25}
26 26
27extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); 27extern void __lockfunc
28extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); 28_lock_kernel(const char *func, const char *file, int line)
29__acquires(kernel_lock);
30
31extern void __lockfunc
32_unlock_kernel(const char *func, const char *file, int line)
33__releases(kernel_lock);
34
35#define lock_kernel() do { \
36 _lock_kernel(__func__, __FILE__, __LINE__); \
37} while (0)
38
39#define unlock_kernel() do { \
40 _unlock_kernel(__func__, __FILE__, __LINE__); \
41} while (0)
29 42
30/* 43/*
31 * Various legacy drivers don't really need the BKL in a specific 44 * Various legacy drivers don't really need the BKL in a specific
@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
41 54
42#else 55#else
43 56
44#define lock_kernel() do { } while(0) 57#define lock_kernel()
45#define unlock_kernel() do { } while(0) 58#define unlock_kernel()
46#define release_kernel_lock(task) do { } while(0) 59#define release_kernel_lock(task) do { } while(0)
47#define cycle_kernel_lock() do { } while(0) 60#define cycle_kernel_lock() do { } while(0)
48#define reacquire_kernel_lock(task) 0 61#define reacquire_kernel_lock(task) 0
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h
new file mode 100644
index 00000000000..8abd620a490
--- /dev/null
+++ b/include/trace/events/bkl.h
@@ -0,0 +1,61 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM bkl
3
4#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BKL_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(lock_kernel,
10
11 TP_PROTO(const char *func, const char *file, int line),
12
13 TP_ARGS(func, file, line),
14
15 TP_STRUCT__entry(
16 __field( int, lock_depth )
17 __field_ext( const char *, func, FILTER_PTR_STRING )
18 __field_ext( const char *, file, FILTER_PTR_STRING )
19 __field( int, line )
20 ),
21
22 TP_fast_assign(
23 /* We want to record the lock_depth after lock is acquired */
24 __entry->lock_depth = current->lock_depth + 1;
25 __entry->func = func;
26 __entry->file = file;
27 __entry->line = line;
28 ),
29
30 TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
31 __entry->file, __entry->line, __entry->func)
32);
33
34TRACE_EVENT(unlock_kernel,
35
36 TP_PROTO(const char *func, const char *file, int line),
37
38 TP_ARGS(func, file, line),
39
40 TP_STRUCT__entry(
41 __field(int, lock_depth)
42 __field(const char *, func)
43 __field(const char *, file)
44 __field(int, line)
45 ),
46
47 TP_fast_assign(
48 __entry->lock_depth = current->lock_depth;
49 __entry->func = func;
50 __entry->file = file;
51 __entry->line = line;
52 ),
53
54 TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
55 __entry->file, __entry->line, __entry->func)
56);
57
58#endif /* _TRACE_BKL_H */
59
60/* This part must be outside protection */
61#include <trace/define_trace.h>
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 5dc283ba5ae..e972f0a40f8 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -33,7 +33,7 @@ struct syscall_metadata {
33}; 33};
34 34
35#ifdef CONFIG_FTRACE_SYSCALLS 35#ifdef CONFIG_FTRACE_SYSCALLS
36extern struct syscall_metadata *syscall_nr_to_meta(int nr); 36extern unsigned long arch_syscall_addr(int nr);
37extern int syscall_name_to_nr(char *name); 37extern int syscall_name_to_nr(char *name);
38void set_syscall_enter_id(int num, int id); 38void set_syscall_enter_id(int num, int id);
39void set_syscall_exit_id(int num, int id); 39void set_syscall_exit_id(int num, int id);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 37ba67e3326..b10c0d90a6f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -60,6 +60,13 @@ static int last_ftrace_enabled;
60/* Quick disabling of function tracer. */ 60/* Quick disabling of function tracer. */
61int function_trace_stop; 61int function_trace_stop;
62 62
63/* List for set_ftrace_pid's pids. */
64LIST_HEAD(ftrace_pids);
65struct ftrace_pid {
66 struct list_head list;
67 struct pid *pid;
68};
69
63/* 70/*
64 * ftrace_disabled is set when an anomaly is discovered. 71 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled. 72 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
80 87
88#ifdef CONFIG_FUNCTION_GRAPH_TRACER
89static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
90#endif
91
81static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 92static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
82{ 93{
83 struct ftrace_ops *op = ftrace_list; 94 struct ftrace_ops *op = ftrace_list;
@@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
155 else 166 else
156 func = ftrace_list_func; 167 func = ftrace_list_func;
157 168
158 if (ftrace_pid_trace) { 169 if (!list_empty(&ftrace_pids)) {
159 set_ftrace_pid_function(func); 170 set_ftrace_pid_function(func);
160 func = ftrace_pid_func; 171 func = ftrace_pid_func;
161 } 172 }
@@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
203 if (ftrace_list->next == &ftrace_list_end) { 214 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func; 215 ftrace_func_t func = ftrace_list->func;
205 216
206 if (ftrace_pid_trace) { 217 if (!list_empty(&ftrace_pids)) {
207 set_ftrace_pid_function(func); 218 set_ftrace_pid_function(func);
208 func = ftrace_pid_func; 219 func = ftrace_pid_func;
209 } 220 }
@@ -231,7 +242,7 @@ static void ftrace_update_pid_func(void)
231 func = __ftrace_trace_function; 242 func = __ftrace_trace_function;
232#endif 243#endif
233 244
234 if (ftrace_pid_trace) { 245 if (!list_empty(&ftrace_pids)) {
235 set_ftrace_pid_function(func); 246 set_ftrace_pid_function(func);
236 func = ftrace_pid_func; 247 func = ftrace_pid_func;
237 } else { 248 } else {
@@ -821,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
821} 832}
822#endif /* CONFIG_FUNCTION_PROFILER */ 833#endif /* CONFIG_FUNCTION_PROFILER */
823 834
824/* set when tracing only a pid */
825struct pid *ftrace_pid_trace;
826static struct pid * const ftrace_swapper_pid = &init_struct_pid; 835static struct pid * const ftrace_swapper_pid = &init_struct_pid;
827 836
828#ifdef CONFIG_DYNAMIC_FTRACE 837#ifdef CONFIG_DYNAMIC_FTRACE
@@ -1261,12 +1270,34 @@ static int ftrace_update_code(struct module *mod)
1261 ftrace_new_addrs = p->newlist; 1270 ftrace_new_addrs = p->newlist;
1262 p->flags = 0L; 1271 p->flags = 0L;
1263 1272
1264 /* convert record (i.e, patch mcount-call with NOP) */ 1273 /*
1265 if (ftrace_code_disable(mod, p)) { 1274 * Do the initial record convertion from mcount jump
1266 p->flags |= FTRACE_FL_CONVERTED; 1275 * to the NOP instructions.
1267 ftrace_update_cnt++; 1276 */
1268 } else 1277 if (!ftrace_code_disable(mod, p)) {
1269 ftrace_free_rec(p); 1278 ftrace_free_rec(p);
1279 continue;
1280 }
1281
1282 p->flags |= FTRACE_FL_CONVERTED;
1283 ftrace_update_cnt++;
1284
1285 /*
1286 * If the tracing is enabled, go ahead and enable the record.
1287 *
1288 * The reason not to enable the record immediatelly is the
1289 * inherent check of ftrace_make_nop/ftrace_make_call for
1290 * correct previous instructions. Making first the NOP
1291 * conversion puts the module to the correct state, thus
1292 * passing the ftrace_make_call check.
1293 */
1294 if (ftrace_start_up) {
1295 int failed = __ftrace_replace_code(p, 1);
1296 if (failed) {
1297 ftrace_bug(failed, p->ip);
1298 ftrace_free_rec(p);
1299 }
1300 }
1270 } 1301 }
1271 1302
1272 stop = ftrace_now(raw_smp_processor_id()); 1303 stop = ftrace_now(raw_smp_processor_id());
@@ -1656,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1656 return ret; 1687 return ret;
1657} 1688}
1658 1689
1659enum {
1660 MATCH_FULL,
1661 MATCH_FRONT_ONLY,
1662 MATCH_MIDDLE_ONLY,
1663 MATCH_END_ONLY,
1664};
1665
1666/*
1667 * (static function - no need for kernel doc)
1668 *
1669 * Pass in a buffer containing a glob and this function will
1670 * set search to point to the search part of the buffer and
1671 * return the type of search it is (see enum above).
1672 * This does modify buff.
1673 *
1674 * Returns enum type.
1675 * search returns the pointer to use for comparison.
1676 * not returns 1 if buff started with a '!'
1677 * 0 otherwise.
1678 */
1679static int
1680ftrace_setup_glob(char *buff, int len, char **search, int *not)
1681{
1682 int type = MATCH_FULL;
1683 int i;
1684
1685 if (buff[0] == '!') {
1686 *not = 1;
1687 buff++;
1688 len--;
1689 } else
1690 *not = 0;
1691
1692 *search = buff;
1693
1694 for (i = 0; i < len; i++) {
1695 if (buff[i] == '*') {
1696 if (!i) {
1697 *search = buff + 1;
1698 type = MATCH_END_ONLY;
1699 } else {
1700 if (type == MATCH_END_ONLY)
1701 type = MATCH_MIDDLE_ONLY;
1702 else
1703 type = MATCH_FRONT_ONLY;
1704 buff[i] = 0;
1705 break;
1706 }
1707 }
1708 }
1709
1710 return type;
1711}
1712
1713static int ftrace_match(char *str, char *regex, int len, int type) 1690static int ftrace_match(char *str, char *regex, int len, int type)
1714{ 1691{
1715 int matched = 0; 1692 int matched = 0;
@@ -1758,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1758 int not; 1735 int not;
1759 1736
1760 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1737 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1761 type = ftrace_setup_glob(buff, len, &search, &not); 1738 type = filter_parse_regex(buff, len, &search, &not);
1762 1739
1763 search_len = strlen(search); 1740 search_len = strlen(search);
1764 1741
@@ -1826,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1826 } 1803 }
1827 1804
1828 if (strlen(buff)) { 1805 if (strlen(buff)) {
1829 type = ftrace_setup_glob(buff, strlen(buff), &search, &not); 1806 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1830 search_len = strlen(search); 1807 search_len = strlen(search);
1831 } 1808 }
1832 1809
@@ -1991,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1991 int count = 0; 1968 int count = 0;
1992 char *search; 1969 char *search;
1993 1970
1994 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 1971 type = filter_parse_regex(glob, strlen(glob), &search, &not);
1995 len = strlen(search); 1972 len = strlen(search);
1996 1973
1997 /* we do not support '!' for function probes */ 1974 /* we do not support '!' for function probes */
@@ -2068,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2068 else if (glob) { 2045 else if (glob) {
2069 int not; 2046 int not;
2070 2047
2071 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 2048 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2072 len = strlen(search); 2049 len = strlen(search);
2073 2050
2074 /* we do not support '!' for function probes */ 2051 /* we do not support '!' for function probes */
@@ -2297,6 +2274,7 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2297#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 2274#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2298static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 2275static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2299static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 2276static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2277static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2300 2278
2301static int __init set_ftrace_notrace(char *str) 2279static int __init set_ftrace_notrace(char *str)
2302{ 2280{
@@ -2312,6 +2290,31 @@ static int __init set_ftrace_filter(char *str)
2312} 2290}
2313__setup("ftrace_filter=", set_ftrace_filter); 2291__setup("ftrace_filter=", set_ftrace_filter);
2314 2292
2293#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2294static int __init set_graph_function(char *str)
2295{
2296 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2297 return 1;
2298}
2299__setup("ftrace_graph_filter=", set_graph_function);
2300
2301static void __init set_ftrace_early_graph(char *buf)
2302{
2303 int ret;
2304 char *func;
2305
2306 while (buf) {
2307 func = strsep(&buf, ",");
2308 /* we allow only one expression at a time */
2309 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2310 func);
2311 if (ret)
2312 printk(KERN_DEBUG "ftrace: function %s not "
2313 "traceable\n", func);
2314 }
2315}
2316#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2317
2315static void __init set_ftrace_early_filter(char *buf, int enable) 2318static void __init set_ftrace_early_filter(char *buf, int enable)
2316{ 2319{
2317 char *func; 2320 char *func;
@@ -2328,6 +2331,10 @@ static void __init set_ftrace_early_filters(void)
2328 set_ftrace_early_filter(ftrace_filter_buf, 1); 2331 set_ftrace_early_filter(ftrace_filter_buf, 1);
2329 if (ftrace_notrace_buf[0]) 2332 if (ftrace_notrace_buf[0])
2330 set_ftrace_early_filter(ftrace_notrace_buf, 0); 2333 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2335 if (ftrace_graph_buf[0])
2336 set_ftrace_early_graph(ftrace_graph_buf);
2337#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2331} 2338}
2332 2339
2333static int 2340static int
@@ -2513,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2513 return -ENODEV; 2520 return -ENODEV;
2514 2521
2515 /* decode regex */ 2522 /* decode regex */
2516 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not); 2523 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2517 if (not) 2524 if (not)
2518 return -EINVAL; 2525 return -EINVAL;
2519 2526
@@ -2624,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2624 return 0; 2631 return 0;
2625} 2632}
2626 2633
2627static int ftrace_convert_nops(struct module *mod, 2634static int ftrace_process_locs(struct module *mod,
2628 unsigned long *start, 2635 unsigned long *start,
2629 unsigned long *end) 2636 unsigned long *end)
2630{ 2637{
@@ -2684,7 +2691,7 @@ static void ftrace_init_module(struct module *mod,
2684{ 2691{
2685 if (ftrace_disabled || start == end) 2692 if (ftrace_disabled || start == end)
2686 return; 2693 return;
2687 ftrace_convert_nops(mod, start, end); 2694 ftrace_process_locs(mod, start, end);
2688} 2695}
2689 2696
2690static int ftrace_module_notify(struct notifier_block *self, 2697static int ftrace_module_notify(struct notifier_block *self,
@@ -2745,7 +2752,7 @@ void __init ftrace_init(void)
2745 2752
2746 last_ftrace_enabled = ftrace_enabled = 1; 2753 last_ftrace_enabled = ftrace_enabled = 1;
2747 2754
2748 ret = ftrace_convert_nops(NULL, 2755 ret = ftrace_process_locs(NULL,
2749 __start_mcount_loc, 2756 __start_mcount_loc,
2750 __stop_mcount_loc); 2757 __stop_mcount_loc);
2751 2758
@@ -2778,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { }
2778# define ftrace_shutdown_sysctl() do { } while (0) 2785# define ftrace_shutdown_sysctl() do { } while (0)
2779#endif /* CONFIG_DYNAMIC_FTRACE */ 2786#endif /* CONFIG_DYNAMIC_FTRACE */
2780 2787
2781static ssize_t
2782ftrace_pid_read(struct file *file, char __user *ubuf,
2783 size_t cnt, loff_t *ppos)
2784{
2785 char buf[64];
2786 int r;
2787
2788 if (ftrace_pid_trace == ftrace_swapper_pid)
2789 r = sprintf(buf, "swapper tasks\n");
2790 else if (ftrace_pid_trace)
2791 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2792 else
2793 r = sprintf(buf, "no pid\n");
2794
2795 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2796}
2797
2798static void clear_ftrace_swapper(void) 2788static void clear_ftrace_swapper(void)
2799{ 2789{
2800 struct task_struct *p; 2790 struct task_struct *p;
@@ -2845,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid)
2845 rcu_read_unlock(); 2835 rcu_read_unlock();
2846} 2836}
2847 2837
2848static void clear_ftrace_pid_task(struct pid **pid) 2838static void clear_ftrace_pid_task(struct pid *pid)
2849{ 2839{
2850 if (*pid == ftrace_swapper_pid) 2840 if (pid == ftrace_swapper_pid)
2851 clear_ftrace_swapper(); 2841 clear_ftrace_swapper();
2852 else 2842 else
2853 clear_ftrace_pid(*pid); 2843 clear_ftrace_pid(pid);
2854
2855 *pid = NULL;
2856} 2844}
2857 2845
2858static void set_ftrace_pid_task(struct pid *pid) 2846static void set_ftrace_pid_task(struct pid *pid)
@@ -2863,11 +2851,140 @@ static void set_ftrace_pid_task(struct pid *pid)
2863 set_ftrace_pid(pid); 2851 set_ftrace_pid(pid);
2864} 2852}
2865 2853
2854static int ftrace_pid_add(int p)
2855{
2856 struct pid *pid;
2857 struct ftrace_pid *fpid;
2858 int ret = -EINVAL;
2859
2860 mutex_lock(&ftrace_lock);
2861
2862 if (!p)
2863 pid = ftrace_swapper_pid;
2864 else
2865 pid = find_get_pid(p);
2866
2867 if (!pid)
2868 goto out;
2869
2870 ret = 0;
2871
2872 list_for_each_entry(fpid, &ftrace_pids, list)
2873 if (fpid->pid == pid)
2874 goto out_put;
2875
2876 ret = -ENOMEM;
2877
2878 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2879 if (!fpid)
2880 goto out_put;
2881
2882 list_add(&fpid->list, &ftrace_pids);
2883 fpid->pid = pid;
2884
2885 set_ftrace_pid_task(pid);
2886
2887 ftrace_update_pid_func();
2888 ftrace_startup_enable(0);
2889
2890 mutex_unlock(&ftrace_lock);
2891 return 0;
2892
2893out_put:
2894 if (pid != ftrace_swapper_pid)
2895 put_pid(pid);
2896
2897out:
2898 mutex_unlock(&ftrace_lock);
2899 return ret;
2900}
2901
2902static void ftrace_pid_reset(void)
2903{
2904 struct ftrace_pid *fpid, *safe;
2905
2906 mutex_lock(&ftrace_lock);
2907 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2908 struct pid *pid = fpid->pid;
2909
2910 clear_ftrace_pid_task(pid);
2911
2912 list_del(&fpid->list);
2913 kfree(fpid);
2914 }
2915
2916 ftrace_update_pid_func();
2917 ftrace_startup_enable(0);
2918
2919 mutex_unlock(&ftrace_lock);
2920}
2921
2922static void *fpid_start(struct seq_file *m, loff_t *pos)
2923{
2924 mutex_lock(&ftrace_lock);
2925
2926 if (list_empty(&ftrace_pids) && (!*pos))
2927 return (void *) 1;
2928
2929 return seq_list_start(&ftrace_pids, *pos);
2930}
2931
2932static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2933{
2934 if (v == (void *)1)
2935 return NULL;
2936
2937 return seq_list_next(v, &ftrace_pids, pos);
2938}
2939
2940static void fpid_stop(struct seq_file *m, void *p)
2941{
2942 mutex_unlock(&ftrace_lock);
2943}
2944
2945static int fpid_show(struct seq_file *m, void *v)
2946{
2947 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2948
2949 if (v == (void *)1) {
2950 seq_printf(m, "no pid\n");
2951 return 0;
2952 }
2953
2954 if (fpid->pid == ftrace_swapper_pid)
2955 seq_printf(m, "swapper tasks\n");
2956 else
2957 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2958
2959 return 0;
2960}
2961
2962static const struct seq_operations ftrace_pid_sops = {
2963 .start = fpid_start,
2964 .next = fpid_next,
2965 .stop = fpid_stop,
2966 .show = fpid_show,
2967};
2968
2969static int
2970ftrace_pid_open(struct inode *inode, struct file *file)
2971{
2972 int ret = 0;
2973
2974 if ((file->f_mode & FMODE_WRITE) &&
2975 (file->f_flags & O_TRUNC))
2976 ftrace_pid_reset();
2977
2978 if (file->f_mode & FMODE_READ)
2979 ret = seq_open(file, &ftrace_pid_sops);
2980
2981 return ret;
2982}
2983
2866static ssize_t 2984static ssize_t
2867ftrace_pid_write(struct file *filp, const char __user *ubuf, 2985ftrace_pid_write(struct file *filp, const char __user *ubuf,
2868 size_t cnt, loff_t *ppos) 2986 size_t cnt, loff_t *ppos)
2869{ 2987{
2870 struct pid *pid;
2871 char buf[64]; 2988 char buf[64];
2872 long val; 2989 long val;
2873 int ret; 2990 int ret;
@@ -2880,57 +2997,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
2880 2997
2881 buf[cnt] = 0; 2998 buf[cnt] = 0;
2882 2999
3000 /*
3001 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3002 * to clean the filter quietly.
3003 */
3004 strstrip(buf);
3005 if (strlen(buf) == 0)
3006 return 1;
3007
2883 ret = strict_strtol(buf, 10, &val); 3008 ret = strict_strtol(buf, 10, &val);
2884 if (ret < 0) 3009 if (ret < 0)
2885 return ret; 3010 return ret;
2886 3011
2887 mutex_lock(&ftrace_lock); 3012 ret = ftrace_pid_add(val);
2888 if (val < 0) {
2889 /* disable pid tracing */
2890 if (!ftrace_pid_trace)
2891 goto out;
2892
2893 clear_ftrace_pid_task(&ftrace_pid_trace);
2894
2895 } else {
2896 /* swapper task is special */
2897 if (!val) {
2898 pid = ftrace_swapper_pid;
2899 if (pid == ftrace_pid_trace)
2900 goto out;
2901 } else {
2902 pid = find_get_pid(val);
2903 3013
2904 if (pid == ftrace_pid_trace) { 3014 return ret ? ret : cnt;
2905 put_pid(pid); 3015}
2906 goto out;
2907 }
2908 }
2909
2910 if (ftrace_pid_trace)
2911 clear_ftrace_pid_task(&ftrace_pid_trace);
2912
2913 if (!pid)
2914 goto out;
2915
2916 ftrace_pid_trace = pid;
2917
2918 set_ftrace_pid_task(ftrace_pid_trace);
2919 }
2920
2921 /* update the function call */
2922 ftrace_update_pid_func();
2923 ftrace_startup_enable(0);
2924 3016
2925 out: 3017static int
2926 mutex_unlock(&ftrace_lock); 3018ftrace_pid_release(struct inode *inode, struct file *file)
3019{
3020 if (file->f_mode & FMODE_READ)
3021 seq_release(inode, file);
2927 3022
2928 return cnt; 3023 return 0;
2929} 3024}
2930 3025
2931static const struct file_operations ftrace_pid_fops = { 3026static const struct file_operations ftrace_pid_fops = {
2932 .read = ftrace_pid_read, 3027 .open = ftrace_pid_open,
2933 .write = ftrace_pid_write, 3028 .write = ftrace_pid_write,
3029 .read = seq_read,
3030 .llseek = seq_lseek,
3031 .release = ftrace_pid_release,
2934}; 3032};
2935 3033
2936static __init int ftrace_init_debugfs(void) 3034static __init int ftrace_init_debugfs(void)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c820b0310a1..026e715a0c7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_cmdline_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
138 return 1; 138 return 1;
139} 139}
140__setup("ftrace=", set_ftrace); 140__setup("ftrace=", set_cmdline_ftrace);
141 141
142static int __init set_ftrace_dump_on_oops(char *str) 142static int __init set_ftrace_dump_on_oops(char *str)
143{ 143{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 405cb850b75..acef8b4636f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -483,10 +483,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
483 return 0; 483 return 0;
484} 484}
485#else 485#else
486static inline int ftrace_trace_addr(unsigned long addr)
487{
488 return 1;
489}
490static inline int ftrace_graph_addr(unsigned long addr) 486static inline int ftrace_graph_addr(unsigned long addr)
491{ 487{
492 return 1; 488 return 1;
@@ -500,12 +496,12 @@ print_graph_function(struct trace_iterator *iter)
500} 496}
501#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 497#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
502 498
503extern struct pid *ftrace_pid_trace; 499extern struct list_head ftrace_pids;
504 500
505#ifdef CONFIG_FUNCTION_TRACER 501#ifdef CONFIG_FUNCTION_TRACER
506static inline int ftrace_trace_task(struct task_struct *task) 502static inline int ftrace_trace_task(struct task_struct *task)
507{ 503{
508 if (!ftrace_pid_trace) 504 if (list_empty(&ftrace_pids))
509 return 1; 505 return 1;
510 506
511 return test_tsk_trace_trace(task); 507 return test_tsk_trace_trace(task);
@@ -699,22 +695,40 @@ struct event_subsystem {
699}; 695};
700 696
701struct filter_pred; 697struct filter_pred;
698struct regex;
702 699
703typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, 700typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
704 int val1, int val2); 701 int val1, int val2);
705 702
703typedef int (*regex_match_func)(char *str, struct regex *r, int len);
704
705enum regex_type {
706 MATCH_FULL,
707 MATCH_FRONT_ONLY,
708 MATCH_MIDDLE_ONLY,
709 MATCH_END_ONLY,
710};
711
712struct regex {
713 char pattern[MAX_FILTER_STR_VAL];
714 int len;
715 int field_len;
716 regex_match_func match;
717};
718
706struct filter_pred { 719struct filter_pred {
707 filter_pred_fn_t fn; 720 filter_pred_fn_t fn;
708 u64 val; 721 u64 val;
709 char str_val[MAX_FILTER_STR_VAL]; 722 struct regex regex;
710 int str_len; 723 char *field_name;
711 char *field_name; 724 int offset;
712 int offset; 725 int not;
713 int not; 726 int op;
714 int op; 727 int pop_n;
715 int pop_n;
716}; 728};
717 729
730extern enum regex_type
731filter_parse_regex(char *buff, int len, char **search, int *not);
718extern void print_event_filter(struct ftrace_event_call *call, 732extern void print_event_filter(struct ftrace_event_call *call,
719 struct trace_seq *s); 733 struct trace_seq *s);
720extern int apply_event_filter(struct ftrace_event_call *call, 734extern int apply_event_filter(struct ftrace_event_call *call,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index cf3cabf6ce1..7c18d154ea2 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -878,9 +878,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
878 "'%s/filter' entry\n", name); 878 "'%s/filter' entry\n", name);
879 } 879 }
880 880
881 entry = trace_create_file("enable", 0644, system->entry, 881 trace_create_file("enable", 0644, system->entry,
882 (void *)system->name, 882 (void *)system->name,
883 &ftrace_system_enable_fops); 883 &ftrace_system_enable_fops);
884 884
885 return system->entry; 885 return system->entry;
886} 886}
@@ -892,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
892 const struct file_operations *filter, 892 const struct file_operations *filter,
893 const struct file_operations *format) 893 const struct file_operations *format)
894{ 894{
895 struct dentry *entry;
896 int ret; 895 int ret;
897 896
898 /* 897 /*
@@ -910,12 +909,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
910 } 909 }
911 910
912 if (call->regfunc) 911 if (call->regfunc)
913 entry = trace_create_file("enable", 0644, call->dir, call, 912 trace_create_file("enable", 0644, call->dir, call,
914 enable); 913 enable);
915 914
916 if (call->id && call->profile_enable) 915 if (call->id && call->profile_enable)
917 entry = trace_create_file("id", 0444, call->dir, call, 916 trace_create_file("id", 0444, call->dir, call,
918 id); 917 id);
919 918
920 if (call->define_fields) { 919 if (call->define_fields) {
921 ret = call->define_fields(call); 920 ret = call->define_fields(call);
@@ -924,16 +923,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
924 " events/%s\n", call->name); 923 " events/%s\n", call->name);
925 return ret; 924 return ret;
926 } 925 }
927 entry = trace_create_file("filter", 0644, call->dir, call, 926 trace_create_file("filter", 0644, call->dir, call,
928 filter); 927 filter);
929 } 928 }
930 929
931 /* A trace may not want to export its format */ 930 /* A trace may not want to export its format */
932 if (!call->show_format) 931 if (!call->show_format)
933 return 0; 932 return 0;
934 933
935 entry = trace_create_file("format", 0444, call->dir, call, 934 trace_create_file("format", 0444, call->dir, call,
936 format); 935 format);
937 936
938 return 0; 937 return 0;
939} 938}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 98a6cc5c64e..92672016da2 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -18,8 +18,6 @@
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> 18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */ 19 */
20 20
21#include <linux/debugfs.h>
22#include <linux/uaccess.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/ctype.h> 22#include <linux/ctype.h>
25#include <linux/mutex.h> 23#include <linux/mutex.h>
@@ -197,9 +195,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
197 char *addr = (char *)(event + pred->offset); 195 char *addr = (char *)(event + pred->offset);
198 int cmp, match; 196 int cmp, match;
199 197
200 cmp = strncmp(addr, pred->str_val, pred->str_len); 198 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
201 199
202 match = (!cmp) ^ pred->not; 200 match = cmp ^ pred->not;
203 201
204 return match; 202 return match;
205} 203}
@@ -211,9 +209,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
211 char **addr = (char **)(event + pred->offset); 209 char **addr = (char **)(event + pred->offset);
212 int cmp, match; 210 int cmp, match;
213 211
214 cmp = strncmp(*addr, pred->str_val, pred->str_len); 212 cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
215 213
216 match = (!cmp) ^ pred->not; 214 match = cmp ^ pred->not;
217 215
218 return match; 216 return match;
219} 217}
@@ -237,9 +235,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
237 char *addr = (char *)(event + str_loc); 235 char *addr = (char *)(event + str_loc);
238 int cmp, match; 236 int cmp, match;
239 237
240 cmp = strncmp(addr, pred->str_val, str_len); 238 cmp = pred->regex.match(addr, &pred->regex, str_len);
241 239
242 match = (!cmp) ^ pred->not; 240 match = cmp ^ pred->not;
243 241
244 return match; 242 return match;
245} 243}
@@ -250,6 +248,124 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
250 return 0; 248 return 0;
251} 249}
252 250
251/* Basic regex callbacks */
252static int regex_match_full(char *str, struct regex *r, int len)
253{
254 if (strncmp(str, r->pattern, len) == 0)
255 return 1;
256 return 0;
257}
258
259static int regex_match_front(char *str, struct regex *r, int len)
260{
261 if (strncmp(str, r->pattern, len) == 0)
262 return 1;
263 return 0;
264}
265
266static int regex_match_middle(char *str, struct regex *r, int len)
267{
268 if (strstr(str, r->pattern))
269 return 1;
270 return 0;
271}
272
273static int regex_match_end(char *str, struct regex *r, int len)
274{
275 char *ptr = strstr(str, r->pattern);
276
277 if (ptr && (ptr[r->len] == 0))
278 return 1;
279 return 0;
280}
281
282/**
283 * filter_parse_regex - parse a basic regex
284 * @buff: the raw regex
285 * @len: length of the regex
286 * @search: will point to the beginning of the string to compare
287 * @not: tell whether the match will have to be inverted
288 *
289 * This passes in a buffer containing a regex and this function will
290 * set search to point to the search part of the buffer and
291 * return the type of search it is (see enum above).
292 * This does modify buff.
293 *
294 * Returns enum type.
295 * search returns the pointer to use for comparison.
296 * not returns 1 if buff started with a '!'
297 * 0 otherwise.
298 */
299enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
300{
301 int type = MATCH_FULL;
302 int i;
303
304 if (buff[0] == '!') {
305 *not = 1;
306 buff++;
307 len--;
308 } else
309 *not = 0;
310
311 *search = buff;
312
313 for (i = 0; i < len; i++) {
314 if (buff[i] == '*') {
315 if (!i) {
316 *search = buff + 1;
317 type = MATCH_END_ONLY;
318 } else {
319 if (type == MATCH_END_ONLY)
320 type = MATCH_MIDDLE_ONLY;
321 else
322 type = MATCH_FRONT_ONLY;
323 buff[i] = 0;
324 break;
325 }
326 }
327 }
328
329 return type;
330}
331
332static int filter_build_regex(struct filter_pred *pred)
333{
334 struct regex *r = &pred->regex;
335 char *search, *dup;
336 enum regex_type type;
337 int not;
338
339 type = filter_parse_regex(r->pattern, r->len, &search, &not);
340 dup = kstrdup(search, GFP_KERNEL);
341 if (!dup)
342 return -ENOMEM;
343
344 strcpy(r->pattern, dup);
345 kfree(dup);
346
347 r->len = strlen(r->pattern);
348
349 switch (type) {
350 case MATCH_FULL:
351 r->match = regex_match_full;
352 break;
353 case MATCH_FRONT_ONLY:
354 r->match = regex_match_front;
355 break;
356 case MATCH_MIDDLE_ONLY:
357 r->match = regex_match_middle;
358 break;
359 case MATCH_END_ONLY:
360 r->match = regex_match_end;
361 break;
362 }
363
364 pred->not ^= not;
365
366 return 0;
367}
368
253/* return 1 if event matches, 0 otherwise (discard) */ 369/* return 1 if event matches, 0 otherwise (discard) */
254int filter_match_preds(struct ftrace_event_call *call, void *rec) 370int filter_match_preds(struct ftrace_event_call *call, void *rec)
255{ 371{
@@ -396,7 +512,7 @@ static void filter_clear_pred(struct filter_pred *pred)
396{ 512{
397 kfree(pred->field_name); 513 kfree(pred->field_name);
398 pred->field_name = NULL; 514 pred->field_name = NULL;
399 pred->str_len = 0; 515 pred->regex.len = 0;
400} 516}
401 517
402static int filter_set_pred(struct filter_pred *dest, 518static int filter_set_pred(struct filter_pred *dest,
@@ -660,21 +776,24 @@ static int filter_add_pred(struct filter_parse_state *ps,
660 } 776 }
661 777
662 if (is_string_field(field)) { 778 if (is_string_field(field)) {
663 pred->str_len = field->size; 779 ret = filter_build_regex(pred);
780 if (ret)
781 return ret;
664 782
665 if (field->filter_type == FILTER_STATIC_STRING) 783 if (field->filter_type == FILTER_STATIC_STRING) {
666 fn = filter_pred_string; 784 fn = filter_pred_string;
667 else if (field->filter_type == FILTER_DYN_STRING) 785 pred->regex.field_len = field->size;
668 fn = filter_pred_strloc; 786 } else if (field->filter_type == FILTER_DYN_STRING)
787 fn = filter_pred_strloc;
669 else { 788 else {
670 fn = filter_pred_pchar; 789 fn = filter_pred_pchar;
671 pred->str_len = strlen(pred->str_val); 790 pred->regex.field_len = strlen(pred->regex.pattern);
672 } 791 }
673 } else { 792 } else {
674 if (field->is_signed) 793 if (field->is_signed)
675 ret = strict_strtoll(pred->str_val, 0, &val); 794 ret = strict_strtoll(pred->regex.pattern, 0, &val);
676 else 795 else
677 ret = strict_strtoull(pred->str_val, 0, &val); 796 ret = strict_strtoull(pred->regex.pattern, 0, &val);
678 if (ret) { 797 if (ret) {
679 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); 798 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
680 return -EINVAL; 799 return -EINVAL;
@@ -1045,8 +1164,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
1045 return NULL; 1164 return NULL;
1046 } 1165 }
1047 1166
1048 strcpy(pred->str_val, operand2); 1167 strcpy(pred->regex.pattern, operand2);
1049 pred->str_len = strlen(operand2); 1168 pred->regex.len = strlen(pred->regex.pattern);
1050 1169
1051 pred->op = op; 1170 pred->op = op;
1052 1171
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index d99abc427c3..d00d1a8f1f2 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -14,6 +14,69 @@ static int sys_refcount_exit;
14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16 16
17extern unsigned long __start_syscalls_metadata[];
18extern unsigned long __stop_syscalls_metadata[];
19
20static struct syscall_metadata **syscalls_metadata;
21
22static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
23{
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
27
28
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
32
33 for ( ; start < stop; start++) {
34 /*
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
39 */
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
42 }
43 return NULL;
44}
45
46static struct syscall_metadata *syscall_nr_to_meta(int nr)
47{
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
50
51 return syscalls_metadata[nr];
52}
53
54int syscall_name_to_nr(char *name)
55{
56 int i;
57
58 if (!syscalls_metadata)
59 return -1;
60
61 for (i = 0; i < NR_syscalls; i++) {
62 if (syscalls_metadata[i]) {
63 if (!strcmp(syscalls_metadata[i]->name, name))
64 return i;
65 }
66 }
67 return -1;
68}
69
70void set_syscall_enter_id(int num, int id)
71{
72 syscalls_metadata[num]->enter_id = id;
73}
74
75void set_syscall_exit_id(int num, int id)
76{
77 syscalls_metadata[num]->exit_id = id;
78}
79
17enum print_line_t 80enum print_line_t
18print_syscall_enter(struct trace_iterator *iter, int flags) 81print_syscall_enter(struct trace_iterator *iter, int flags)
19{ 82{
@@ -381,6 +444,29 @@ struct trace_event event_syscall_exit = {
381 .trace = print_syscall_exit, 444 .trace = print_syscall_exit,
382}; 445};
383 446
447int __init init_ftrace_syscalls(void)
448{
449 struct syscall_metadata *meta;
450 unsigned long addr;
451 int i;
452
453 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
454 NR_syscalls, GFP_KERNEL);
455 if (!syscalls_metadata) {
456 WARN_ON(1);
457 return -ENOMEM;
458 }
459
460 for (i = 0; i < NR_syscalls; i++) {
461 addr = arch_syscall_addr(i);
462 meta = find_syscall_meta(addr);
463 syscalls_metadata[i] = meta;
464 }
465
466 return 0;
467}
468core_initcall(init_ftrace_syscalls);
469
384#ifdef CONFIG_EVENT_PROFILE 470#ifdef CONFIG_EVENT_PROFILE
385 471
386static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 472static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e352..4ebfa5a164d 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,13 @@
5 * relegated to obsolescence, but used by various less 5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems. 6 * important (or lazy) subsystems.
7 */ 7 */
8#include <linux/smp_lock.h>
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
11#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
12 15
13/* 16/*
14 * The 'big kernel lock' 17 * The 'big kernel lock'
@@ -113,21 +116,26 @@ static inline void __unlock_kernel(void)
113 * This cannot happen asynchronously, so we only need to 116 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's. 117 * worry about other CPU's.
115 */ 118 */
116void __lockfunc lock_kernel(void) 119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
117{ 120{
118 int depth = current->lock_depth+1; 121 int depth = current->lock_depth + 1;
122
123 trace_lock_kernel(func, file, line);
124
119 if (likely(!depth)) 125 if (likely(!depth))
120 __lock_kernel(); 126 __lock_kernel();
121 current->lock_depth = depth; 127 current->lock_depth = depth;
122} 128}
123 129
124void __lockfunc unlock_kernel(void) 130void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
125{ 131{
126 BUG_ON(current->lock_depth < 0); 132 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0)) 133 if (likely(--current->lock_depth < 0))
128 __unlock_kernel(); 134 __unlock_kernel();
135
136 trace_unlock_kernel(func, file, line);
129} 137}
130 138
131EXPORT_SYMBOL(lock_kernel); 139EXPORT_SYMBOL(_lock_kernel);
132EXPORT_SYMBOL(unlock_kernel); 140EXPORT_SYMBOL(_unlock_kernel);
133 141
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 090d300d739..bfb8b2cdd92 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -119,6 +119,7 @@ my %text_sections = (
119 ".sched.text" => 1, 119 ".sched.text" => 1,
120 ".spinlock.text" => 1, 120 ".spinlock.text" => 1,
121 ".irqentry.text" => 1, 121 ".irqentry.text" => 1,
122 ".text.unlikely" => 1,
122); 123);
123 124
124$objdump = "objdump" if ((length $objdump) == 0); 125$objdump = "objdump" if ((length $objdump) == 0);