aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:20:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:20:25 -0500
commit6556a6743549defc32e5f90ee2cb1ecd833a44c3 (patch)
tree622306583d4a3c13235a8bfc012854c125c597f1 /kernel/trace
parente0d272429a34ff143bfa04ee8e29dd4eed2964c7 (diff)
parent1dd2980d990068e20045b90c424518cc7f3657ff (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits) perf_event, amd: Fix spinlock initialization perf_event: Fix preempt warning in perf_clock() perf tools: Flush maps on COMM events perf_events, x86: Split PMU definitions into separate files perf annotate: Handle samples not at objdump output addr boundaries perf_events, x86: Remove superflous MSR writes perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in() perf_events, x86: AMD event scheduling perf_events: Add new start/stop PMU callbacks perf_events: Report the MMAP pgoff value in bytes perf annotate: Defer allocating sym_priv->hist array perf symbols: Improve debugging information about symtab origins perf top: Use a macro instead of a constant variable perf symbols: Check the right return variable perf/scripts: Tag syscall_name helper as not yet available perf/scripts: Add perf-trace-python Documentation perf/scripts: Remove unnecessary PyTuple resizes perf/scripts: Add syscall tracing scripts perf/scripts: Add Python scripting engine perf/scripts: Remove check-perf-trace from listed scripts ... Fix trivial conflict in tools/perf/util/probe-event.c
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/trace_event_profile.c52
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c196
-rw-r--r--kernel/trace/trace_syscalls.c76
6 files changed, 152 insertions, 234 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cd9ecd89ec77..d00c6fe23f54 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -51,7 +51,9 @@ endif
51obj-$(CONFIG_EVENT_TRACING) += trace_events.o 51obj-$(CONFIG_EVENT_TRACING) += trace_events.o
52obj-$(CONFIG_EVENT_TRACING) += trace_export.o 52obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o 54ifeq ($(CONFIG_PERF_EVENTS),y)
55obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
56endif
55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
56obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
57obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o 59obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d996353473fd..83783579378f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -22,7 +22,6 @@
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/kprobes.h>
26#include <linux/ftrace.h> 25#include <linux/ftrace.h>
27#include <linux/sysctl.h> 26#include <linux/sysctl.h>
28#include <linux/ctype.h> 27#include <linux/ctype.h>
@@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records;
898 } \ 897 } \
899 } 898 }
900 899
901#ifdef CONFIG_KPROBES
902
903static int frozen_record_count;
904
905static inline void freeze_record(struct dyn_ftrace *rec)
906{
907 if (!(rec->flags & FTRACE_FL_FROZEN)) {
908 rec->flags |= FTRACE_FL_FROZEN;
909 frozen_record_count++;
910 }
911}
912
913static inline void unfreeze_record(struct dyn_ftrace *rec)
914{
915 if (rec->flags & FTRACE_FL_FROZEN) {
916 rec->flags &= ~FTRACE_FL_FROZEN;
917 frozen_record_count--;
918 }
919}
920
921static inline int record_frozen(struct dyn_ftrace *rec)
922{
923 return rec->flags & FTRACE_FL_FROZEN;
924}
925#else
926# define freeze_record(rec) ({ 0; })
927# define unfreeze_record(rec) ({ 0; })
928# define record_frozen(rec) ({ 0; })
929#endif /* CONFIG_KPROBES */
930
931static void ftrace_free_rec(struct dyn_ftrace *rec) 900static void ftrace_free_rec(struct dyn_ftrace *rec)
932{ 901{
933 rec->freelist = ftrace_free_records; 902 rec->freelist = ftrace_free_records;
@@ -1025,6 +994,21 @@ static void ftrace_bug(int failed, unsigned long ip)
1025} 994}
1026 995
1027 996
997/* Return 1 if the address range is reserved for ftrace */
998int ftrace_text_reserved(void *start, void *end)
999{
1000 struct dyn_ftrace *rec;
1001 struct ftrace_page *pg;
1002
1003 do_for_each_ftrace_rec(pg, rec) {
1004 if (rec->ip <= (unsigned long)end &&
1005 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1006 return 1;
1007 } while_for_each_ftrace_rec();
1008 return 0;
1009}
1010
1011
1028static int 1012static int
1029__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1013__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1030{ 1014{
@@ -1076,14 +1060,6 @@ static void ftrace_replace_code(int enable)
1076 !(rec->flags & FTRACE_FL_CONVERTED)) 1060 !(rec->flags & FTRACE_FL_CONVERTED))
1077 continue; 1061 continue;
1078 1062
1079 /* ignore updates to this record's mcount site */
1080 if (get_kprobe((void *)rec->ip)) {
1081 freeze_record(rec);
1082 continue;
1083 } else {
1084 unfreeze_record(rec);
1085 }
1086
1087 failed = __ftrace_replace_code(rec, enable); 1063 failed = __ftrace_replace_code(rec, enable);
1088 if (failed) { 1064 if (failed) {
1089 rec->flags |= FTRACE_FL_FAILED; 1065 rec->flags |= FTRACE_FL_FAILED;
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 9e25573242cf..f0d693005075 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -6,14 +6,12 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kprobes.h>
9#include "trace.h" 10#include "trace.h"
10 11
11 12
12char *perf_trace_buf; 13static char *perf_trace_buf;
13EXPORT_SYMBOL_GPL(perf_trace_buf); 14static char *perf_trace_buf_nmi;
14
15char *perf_trace_buf_nmi;
16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
17 15
18typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; 16typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
19 17
@@ -120,3 +118,47 @@ void ftrace_profile_disable(int event_id)
120 } 118 }
121 mutex_unlock(&event_mutex); 119 mutex_unlock(&event_mutex);
122} 120}
121
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags)
124{
125 struct trace_entry *entry;
126 char *trace_buf, *raw_data;
127 int pc, cpu;
128
129 pc = preempt_count();
130
131 /* Protect the per cpu buffer, begin the rcu read side */
132 local_irq_save(*irq_flags);
133
134 *rctxp = perf_swevent_get_recursion_context();
135 if (*rctxp < 0)
136 goto err_recursion;
137
138 cpu = smp_processor_id();
139
140 if (in_nmi())
141 trace_buf = rcu_dereference(perf_trace_buf_nmi);
142 else
143 trace_buf = rcu_dereference(perf_trace_buf);
144
145 if (!trace_buf)
146 goto err;
147
148 raw_data = per_cpu_ptr(trace_buf, cpu);
149
150 /* zero the dead bytes from align to not leak stack to user */
151 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
152
153 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, *irq_flags, pc);
155 entry->type = type;
156
157 return raw_data;
158err:
159 perf_swevent_put_recursion_context(*rctxp);
160err_recursion:
161 local_irq_restore(*irq_flags);
162 return NULL;
163}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e42af9aad69f..4615f62a04f1 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1371,7 +1371,7 @@ out_unlock:
1371 return err; 1371 return err;
1372} 1372}
1373 1373
1374#ifdef CONFIG_EVENT_PROFILE 1374#ifdef CONFIG_PERF_EVENTS
1375 1375
1376void ftrace_profile_free_filter(struct perf_event *event) 1376void ftrace_profile_free_filter(struct perf_event *event)
1377{ 1377{
@@ -1439,5 +1439,5 @@ out_unlock:
1439 return err; 1439 return err;
1440} 1440}
1441 1441
1442#endif /* CONFIG_EVENT_PROFILE */ 1442#endif /* CONFIG_PERF_EVENTS */
1443 1443
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 465b36bef4ca..505c92273b1a 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -91,11 +91,6 @@ static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
91 return retval; 91 return retval;
92} 92}
93 93
94static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
95{
96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
97}
98
99static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, 94static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
100 void *dummy) 95 void *dummy)
101{ 96{
@@ -231,9 +226,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
231{ 226{
232 int ret = -EINVAL; 227 int ret = -EINVAL;
233 228
234 if (ff->func == fetch_argument) 229 if (ff->func == fetch_register) {
235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
236 else if (ff->func == fetch_register) {
237 const char *name; 230 const char *name;
238 name = regs_query_register_name((unsigned int)((long)ff->data)); 231 name = regs_query_register_name((unsigned int)((long)ff->data));
239 ret = snprintf(buf, n, "%%%s", name); 232 ret = snprintf(buf, n, "%%%s", name);
@@ -489,14 +482,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
489 } 482 }
490 } else 483 } else
491 ret = -EINVAL; 484 ret = -EINVAL;
492 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
493 ret = strict_strtoul(arg + 3, 10, &param);
494 if (ret || param > PARAM_MAX_ARGS)
495 ret = -EINVAL;
496 else {
497 ff->func = fetch_argument;
498 ff->data = (void *)param;
499 }
500 } else 485 } else
501 ret = -EINVAL; 486 ret = -EINVAL;
502 return ret; 487 return ret;
@@ -611,7 +596,6 @@ static int create_trace_probe(int argc, char **argv)
611 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] 596 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
612 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] 597 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
613 * Fetch args: 598 * Fetch args:
614 * $argN : fetch Nth of function argument. (N:0-)
615 * $retval : fetch return value 599 * $retval : fetch return value
616 * $stack : fetch stack address 600 * $stack : fetch stack address
617 * $stackN : fetch Nth of stack (N:0-) 601 * $stackN : fetch Nth of stack (N:0-)
@@ -958,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = {
958}; 942};
959 943
960/* Kprobe handler */ 944/* Kprobe handler */
961static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 945static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
962{ 946{
963 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 947 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
964 struct kprobe_trace_entry *entry; 948 struct kprobe_trace_entry *entry;
@@ -978,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
978 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 962 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
979 irq_flags, pc); 963 irq_flags, pc);
980 if (!event) 964 if (!event)
981 return 0; 965 return;
982 966
983 entry = ring_buffer_event_data(event); 967 entry = ring_buffer_event_data(event);
984 entry->nargs = tp->nr_args; 968 entry->nargs = tp->nr_args;
@@ -988,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
988 972
989 if (!filter_current_check_discard(buffer, call, entry, event)) 973 if (!filter_current_check_discard(buffer, call, entry, event))
990 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 974 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
991 return 0;
992} 975}
993 976
994/* Kretprobe handler */ 977/* Kretprobe handler */
995static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, 978static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
996 struct pt_regs *regs) 979 struct pt_regs *regs)
997{ 980{
998 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 981 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1011,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
1011 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 994 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
1012 irq_flags, pc); 995 irq_flags, pc);
1013 if (!event) 996 if (!event)
1014 return 0; 997 return;
1015 998
1016 entry = ring_buffer_event_data(event); 999 entry = ring_buffer_event_data(event);
1017 entry->nargs = tp->nr_args; 1000 entry->nargs = tp->nr_args;
@@ -1022,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
1022 1005
1023 if (!filter_current_check_discard(buffer, call, entry, event)) 1006 if (!filter_current_check_discard(buffer, call, entry, event))
1024 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 1007 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1025
1026 return 0;
1027} 1008}
1028 1009
1029/* Event entry printers */ 1010/* Event entry printers */
@@ -1230,137 +1211,67 @@ static int set_print_fmt(struct trace_probe *tp)
1230 return 0; 1211 return 0;
1231} 1212}
1232 1213
1233#ifdef CONFIG_EVENT_PROFILE 1214#ifdef CONFIG_PERF_EVENTS
1234 1215
1235/* Kprobe profile handler */ 1216/* Kprobe profile handler */
1236static __kprobes int kprobe_profile_func(struct kprobe *kp, 1217static __kprobes void kprobe_profile_func(struct kprobe *kp,
1237 struct pt_regs *regs) 1218 struct pt_regs *regs)
1238{ 1219{
1239 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1240 struct ftrace_event_call *call = &tp->call; 1221 struct ftrace_event_call *call = &tp->call;
1241 struct kprobe_trace_entry *entry; 1222 struct kprobe_trace_entry *entry;
1242 struct trace_entry *ent; 1223 int size, __size, i;
1243 int size, __size, i, pc, __cpu;
1244 unsigned long irq_flags; 1224 unsigned long irq_flags;
1245 char *trace_buf;
1246 char *raw_data;
1247 int rctx; 1225 int rctx;
1248 1226
1249 pc = preempt_count();
1250 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1251 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1252 size -= sizeof(u32); 1229 size -= sizeof(u32);
1253 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1230 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1254 "profile buffer not large enough")) 1231 "profile buffer not large enough"))
1255 return 0; 1232 return;
1256
1257 /*
1258 * Protect the non nmi buffer
1259 * This also protects the rcu read side
1260 */
1261 local_irq_save(irq_flags);
1262 1233
1263 rctx = perf_swevent_get_recursion_context(); 1234 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
1264 if (rctx < 0) 1235 if (!entry)
1265 goto end_recursion; 1236 return;
1266
1267 __cpu = smp_processor_id();
1268
1269 if (in_nmi())
1270 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1271 else
1272 trace_buf = rcu_dereference(perf_trace_buf);
1273
1274 if (!trace_buf)
1275 goto end;
1276
1277 raw_data = per_cpu_ptr(trace_buf, __cpu);
1278
1279 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1280 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1281 entry = (struct kprobe_trace_entry *)raw_data;
1282 ent = &entry->ent;
1283 1237
1284 tracing_generic_entry_update(ent, irq_flags, pc);
1285 ent->type = call->id;
1286 entry->nargs = tp->nr_args; 1238 entry->nargs = tp->nr_args;
1287 entry->ip = (unsigned long)kp->addr; 1239 entry->ip = (unsigned long)kp->addr;
1288 for (i = 0; i < tp->nr_args; i++) 1240 for (i = 0; i < tp->nr_args; i++)
1289 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1290 perf_tp_event(call->id, entry->ip, 1, entry, size);
1291 1242
1292end: 1243 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
1293 perf_swevent_put_recursion_context(rctx);
1294end_recursion:
1295 local_irq_restore(irq_flags);
1296
1297 return 0;
1298} 1244}
1299 1245
1300/* Kretprobe profile handler */ 1246/* Kretprobe profile handler */
1301static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, 1247static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1302 struct pt_regs *regs) 1248 struct pt_regs *regs)
1303{ 1249{
1304 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1305 struct ftrace_event_call *call = &tp->call; 1251 struct ftrace_event_call *call = &tp->call;
1306 struct kretprobe_trace_entry *entry; 1252 struct kretprobe_trace_entry *entry;
1307 struct trace_entry *ent; 1253 int size, __size, i;
1308 int size, __size, i, pc, __cpu;
1309 unsigned long irq_flags; 1254 unsigned long irq_flags;
1310 char *trace_buf;
1311 char *raw_data;
1312 int rctx; 1255 int rctx;
1313 1256
1314 pc = preempt_count();
1315 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1316 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1317 size -= sizeof(u32); 1259 size -= sizeof(u32);
1318 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1260 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1319 "profile buffer not large enough")) 1261 "profile buffer not large enough"))
1320 return 0; 1262 return;
1321
1322 /*
1323 * Protect the non nmi buffer
1324 * This also protects the rcu read side
1325 */
1326 local_irq_save(irq_flags);
1327
1328 rctx = perf_swevent_get_recursion_context();
1329 if (rctx < 0)
1330 goto end_recursion;
1331
1332 __cpu = smp_processor_id();
1333
1334 if (in_nmi())
1335 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1336 else
1337 trace_buf = rcu_dereference(perf_trace_buf);
1338
1339 if (!trace_buf)
1340 goto end;
1341
1342 raw_data = per_cpu_ptr(trace_buf, __cpu);
1343 1263
1344 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1264 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
1345 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1265 if (!entry)
1346 entry = (struct kretprobe_trace_entry *)raw_data; 1266 return;
1347 ent = &entry->ent;
1348 1267
1349 tracing_generic_entry_update(ent, irq_flags, pc);
1350 ent->type = call->id;
1351 entry->nargs = tp->nr_args; 1268 entry->nargs = tp->nr_args;
1352 entry->func = (unsigned long)tp->rp.kp.addr; 1269 entry->func = (unsigned long)tp->rp.kp.addr;
1353 entry->ret_ip = (unsigned long)ri->ret_addr; 1270 entry->ret_ip = (unsigned long)ri->ret_addr;
1354 for (i = 0; i < tp->nr_args; i++) 1271 for (i = 0; i < tp->nr_args; i++)
1355 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1356 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1357
1358end:
1359 perf_swevent_put_recursion_context(rctx);
1360end_recursion:
1361 local_irq_restore(irq_flags);
1362 1273
1363 return 0; 1274 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
1364} 1275}
1365 1276
1366static int probe_profile_enable(struct ftrace_event_call *call) 1277static int probe_profile_enable(struct ftrace_event_call *call)
@@ -1388,7 +1299,7 @@ static void probe_profile_disable(struct ftrace_event_call *call)
1388 disable_kprobe(&tp->rp.kp); 1299 disable_kprobe(&tp->rp.kp);
1389 } 1300 }
1390} 1301}
1391#endif /* CONFIG_EVENT_PROFILE */ 1302#endif /* CONFIG_PERF_EVENTS */
1392 1303
1393 1304
1394static __kprobes 1305static __kprobes
@@ -1398,10 +1309,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1398 1309
1399 if (tp->flags & TP_FLAG_TRACE) 1310 if (tp->flags & TP_FLAG_TRACE)
1400 kprobe_trace_func(kp, regs); 1311 kprobe_trace_func(kp, regs);
1401#ifdef CONFIG_EVENT_PROFILE 1312#ifdef CONFIG_PERF_EVENTS
1402 if (tp->flags & TP_FLAG_PROFILE) 1313 if (tp->flags & TP_FLAG_PROFILE)
1403 kprobe_profile_func(kp, regs); 1314 kprobe_profile_func(kp, regs);
1404#endif /* CONFIG_EVENT_PROFILE */ 1315#endif
1405 return 0; /* We don't tweek kernel, so just return 0 */ 1316 return 0; /* We don't tweek kernel, so just return 0 */
1406} 1317}
1407 1318
@@ -1412,10 +1323,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1412 1323
1413 if (tp->flags & TP_FLAG_TRACE) 1324 if (tp->flags & TP_FLAG_TRACE)
1414 kretprobe_trace_func(ri, regs); 1325 kretprobe_trace_func(ri, regs);
1415#ifdef CONFIG_EVENT_PROFILE 1326#ifdef CONFIG_PERF_EVENTS
1416 if (tp->flags & TP_FLAG_PROFILE) 1327 if (tp->flags & TP_FLAG_PROFILE)
1417 kretprobe_profile_func(ri, regs); 1328 kretprobe_profile_func(ri, regs);
1418#endif /* CONFIG_EVENT_PROFILE */ 1329#endif
1419 return 0; /* We don't tweek kernel, so just return 0 */ 1330 return 0; /* We don't tweek kernel, so just return 0 */
1420} 1331}
1421 1332
@@ -1446,7 +1357,7 @@ static int register_probe_event(struct trace_probe *tp)
1446 call->regfunc = probe_event_enable; 1357 call->regfunc = probe_event_enable;
1447 call->unregfunc = probe_event_disable; 1358 call->unregfunc = probe_event_disable;
1448 1359
1449#ifdef CONFIG_EVENT_PROFILE 1360#ifdef CONFIG_PERF_EVENTS
1450 call->profile_enable = probe_profile_enable; 1361 call->profile_enable = probe_profile_enable;
1451 call->profile_disable = probe_profile_disable; 1362 call->profile_disable = probe_profile_disable;
1452#endif 1363#endif
@@ -1507,28 +1418,67 @@ static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1507 1418
1508static __init int kprobe_trace_self_tests_init(void) 1419static __init int kprobe_trace_self_tests_init(void)
1509{ 1420{
1510 int ret; 1421 int ret, warn = 0;
1511 int (*target)(int, int, int, int, int, int); 1422 int (*target)(int, int, int, int, int, int);
1423 struct trace_probe *tp;
1512 1424
1513 target = kprobe_trace_selftest_target; 1425 target = kprobe_trace_selftest_target;
1514 1426
1515 pr_info("Testing kprobe tracing: "); 1427 pr_info("Testing kprobe tracing: ");
1516 1428
1517 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " 1429 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1518 "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); 1430 "$stack $stack0 +0($stack)");
1519 if (WARN_ON_ONCE(ret)) 1431 if (WARN_ON_ONCE(ret)) {
1520 pr_warning("error enabling function entry\n"); 1432 pr_warning("error on probing function entry.\n");
1433 warn++;
1434 } else {
1435 /* Enable trace point */
1436 tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
1437 if (WARN_ON_ONCE(tp == NULL)) {
1438 pr_warning("error on getting new probe.\n");
1439 warn++;
1440 } else
1441 probe_event_enable(&tp->call);
1442 }
1521 1443
1522 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " 1444 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1523 "$retval"); 1445 "$retval");
1524 if (WARN_ON_ONCE(ret)) 1446 if (WARN_ON_ONCE(ret)) {
1525 pr_warning("error enabling function return\n"); 1447 pr_warning("error on probing function return.\n");
1448 warn++;
1449 } else {
1450 /* Enable trace point */
1451 tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
1452 if (WARN_ON_ONCE(tp == NULL)) {
1453 pr_warning("error on getting new probe.\n");
1454 warn++;
1455 } else
1456 probe_event_enable(&tp->call);
1457 }
1458
1459 if (warn)
1460 goto end;
1526 1461
1527 ret = target(1, 2, 3, 4, 5, 6); 1462 ret = target(1, 2, 3, 4, 5, 6);
1528 1463
1529 cleanup_all_probes(); 1464 ret = command_trace_probe("-:testprobe");
1465 if (WARN_ON_ONCE(ret)) {
1466 pr_warning("error on deleting a probe.\n");
1467 warn++;
1468 }
1469
1470 ret = command_trace_probe("-:testprobe2");
1471 if (WARN_ON_ONCE(ret)) {
1472 pr_warning("error on deleting a probe.\n");
1473 warn++;
1474 }
1530 1475
1531 pr_cont("OK\n"); 1476end:
1477 cleanup_all_probes();
1478 if (warn)
1479 pr_cont("NG: Some tests are failed. Please check them.\n");
1480 else
1481 pr_cont("OK\n");
1532 return 0; 1482 return 0;
1533} 1483}
1534 1484
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index a1834dda85f4..cba47d7935cc 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -426,7 +426,7 @@ int __init init_ftrace_syscalls(void)
426} 426}
427core_initcall(init_ftrace_syscalls); 427core_initcall(init_ftrace_syscalls);
428 428
429#ifdef CONFIG_EVENT_PROFILE 429#ifdef CONFIG_PERF_EVENTS
430 430
431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
@@ -438,12 +438,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
438 struct syscall_metadata *sys_data; 438 struct syscall_metadata *sys_data;
439 struct syscall_trace_enter *rec; 439 struct syscall_trace_enter *rec;
440 unsigned long flags; 440 unsigned long flags;
441 char *trace_buf;
442 char *raw_data;
443 int syscall_nr; 441 int syscall_nr;
444 int rctx; 442 int rctx;
445 int size; 443 int size;
446 int cpu;
447 444
448 syscall_nr = syscall_get_nr(current, regs); 445 syscall_nr = syscall_get_nr(current, regs);
449 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 446 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
@@ -462,37 +459,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
462 "profile buffer not large enough")) 459 "profile buffer not large enough"))
463 return; 460 return;
464 461
465 /* Protect the per cpu buffer, begin the rcu read side */ 462 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
466 local_irq_save(flags); 463 sys_data->enter_event->id, &rctx, &flags);
467 464 if (!rec)
468 rctx = perf_swevent_get_recursion_context(); 465 return;
469 if (rctx < 0)
470 goto end_recursion;
471
472 cpu = smp_processor_id();
473
474 trace_buf = rcu_dereference(perf_trace_buf);
475
476 if (!trace_buf)
477 goto end;
478
479 raw_data = per_cpu_ptr(trace_buf, cpu);
480
481 /* zero the dead bytes from align to not leak stack to user */
482 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
483 466
484 rec = (struct syscall_trace_enter *) raw_data;
485 tracing_generic_entry_update(&rec->ent, 0, 0);
486 rec->ent.type = sys_data->enter_event->id;
487 rec->nr = syscall_nr; 467 rec->nr = syscall_nr;
488 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 468 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
489 (unsigned long *)&rec->args); 469 (unsigned long *)&rec->args);
490 perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size); 470 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
491
492end:
493 perf_swevent_put_recursion_context(rctx);
494end_recursion:
495 local_irq_restore(flags);
496} 471}
497 472
498int prof_sysenter_enable(struct ftrace_event_call *call) 473int prof_sysenter_enable(struct ftrace_event_call *call)
@@ -536,11 +511,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
536 struct syscall_trace_exit *rec; 511 struct syscall_trace_exit *rec;
537 unsigned long flags; 512 unsigned long flags;
538 int syscall_nr; 513 int syscall_nr;
539 char *trace_buf;
540 char *raw_data;
541 int rctx; 514 int rctx;
542 int size; 515 int size;
543 int cpu;
544 516
545 syscall_nr = syscall_get_nr(current, regs); 517 syscall_nr = syscall_get_nr(current, regs);
546 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 518 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
@@ -562,38 +534,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
562 "exit event has grown above profile buffer size")) 534 "exit event has grown above profile buffer size"))
563 return; 535 return;
564 536
565 /* Protect the per cpu buffer, begin the rcu read side */ 537 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
566 local_irq_save(flags); 538 sys_data->exit_event->id, &rctx, &flags);
567 539 if (!rec)
568 rctx = perf_swevent_get_recursion_context(); 540 return;
569 if (rctx < 0)
570 goto end_recursion;
571
572 cpu = smp_processor_id();
573
574 trace_buf = rcu_dereference(perf_trace_buf);
575
576 if (!trace_buf)
577 goto end;
578
579 raw_data = per_cpu_ptr(trace_buf, cpu);
580
581 /* zero the dead bytes from align to not leak stack to user */
582 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
583
584 rec = (struct syscall_trace_exit *)raw_data;
585 541
586 tracing_generic_entry_update(&rec->ent, 0, 0);
587 rec->ent.type = sys_data->exit_event->id;
588 rec->nr = syscall_nr; 542 rec->nr = syscall_nr;
589 rec->ret = syscall_get_return_value(current, regs); 543 rec->ret = syscall_get_return_value(current, regs);
590 544
591 perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size); 545 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
592
593end:
594 perf_swevent_put_recursion_context(rctx);
595end_recursion:
596 local_irq_restore(flags);
597} 546}
598 547
599int prof_sysexit_enable(struct ftrace_event_call *call) 548int prof_sysexit_enable(struct ftrace_event_call *call)
@@ -631,6 +580,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call)
631 mutex_unlock(&syscall_trace_lock); 580 mutex_unlock(&syscall_trace_lock);
632} 581}
633 582
634#endif 583#endif /* CONFIG_PERF_EVENTS */
635
636 584