diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 19:52:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 19:52:46 -0400 |
commit | f82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (patch) | |
tree | 09fc553c2fb6f527962048d139159dc139e04afc /kernel/trace | |
parent | c6b9e73f2fee8bb86058f296de808b326473456b (diff) | |
parent | dcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits)
perf: Fix unexported generic perf_arch_fetch_caller_regs
perf record: Don't try to find buildids in a zero sized file
perf: export perf_trace_regs and perf_arch_fetch_caller_regs
perf, x86: Fix hw_perf_enable() event assignment
perf, ppc: Fix compile error due to new cpu notifiers
perf: Make the install relative to DESTDIR if specified
kprobes: Calculate the index correctly when freeing the out-of-line execution slot
perf tools: Fix sparse CPU numbering related bugs
perf_event: Fix oops triggered by cpu offline/online
perf: Drop the obsolete profile naming for trace events
perf: Take a hot regs snapshot for trace events
perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot
perf/x86-64: Use frame pointer to walk on irq and process stacks
lockdep: Move lock events under lockdep recursion protection
perf report: Print the map table just after samples for which no map was found
perf report: Add multiple event support
perf session: Change perf_session post processing functions to take histogram tree
perf session: Add storage for seperating event types in report
perf session: Change add_hist_entry to take the tree root instead of session
perf record: Add ID and to recorded event data when recording multiple events
...
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Makefile | 2 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c) | 50 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 72 |
5 files changed, 80 insertions, 75 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o | |||
52 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o | 52 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o |
53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
54 | ifeq ($(CONFIG_PERF_EVENTS),y) | 54 | ifeq ($(CONFIG_PERF_EVENTS),y) |
55 | obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o |
56 | endif | 56 | endif |
57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
58 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 58 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c index c1cc3ab633de..81f691eb3a30 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -1,32 +1,36 @@ | |||
1 | /* | 1 | /* |
2 | * trace event based perf counter profiling | 2 | * trace event based perf event profiling/tracing |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> |
5 | * | 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); | ||
13 | EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); | ||
14 | |||
15 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | ||
12 | 16 | ||
13 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
14 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
15 | 19 | ||
16 | typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; | 20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; |
17 | 21 | ||
18 | /* Count the events in use (per event id, not per instance) */ | 22 | /* Count the events in use (per event id, not per instance) */ |
19 | static int total_profile_count; | 23 | static int total_ref_count; |
20 | 24 | ||
21 | static int ftrace_profile_enable_event(struct ftrace_event_call *event) | 25 | static int perf_trace_event_enable(struct ftrace_event_call *event) |
22 | { | 26 | { |
23 | char *buf; | 27 | char *buf; |
24 | int ret = -ENOMEM; | 28 | int ret = -ENOMEM; |
25 | 29 | ||
26 | if (event->profile_count++ > 0) | 30 | if (event->perf_refcount++ > 0) |
27 | return 0; | 31 | return 0; |
28 | 32 | ||
29 | if (!total_profile_count) { | 33 | if (!total_ref_count) { |
30 | buf = (char *)alloc_percpu(perf_trace_t); | 34 | buf = (char *)alloc_percpu(perf_trace_t); |
31 | if (!buf) | 35 | if (!buf) |
32 | goto fail_buf; | 36 | goto fail_buf; |
@@ -40,35 +44,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
40 | rcu_assign_pointer(perf_trace_buf_nmi, buf); | 44 | rcu_assign_pointer(perf_trace_buf_nmi, buf); |
41 | } | 45 | } |
42 | 46 | ||
43 | ret = event->profile_enable(event); | 47 | ret = event->perf_event_enable(event); |
44 | if (!ret) { | 48 | if (!ret) { |
45 | total_profile_count++; | 49 | total_ref_count++; |
46 | return 0; | 50 | return 0; |
47 | } | 51 | } |
48 | 52 | ||
49 | fail_buf_nmi: | 53 | fail_buf_nmi: |
50 | if (!total_profile_count) { | 54 | if (!total_ref_count) { |
51 | free_percpu(perf_trace_buf_nmi); | 55 | free_percpu(perf_trace_buf_nmi); |
52 | free_percpu(perf_trace_buf); | 56 | free_percpu(perf_trace_buf); |
53 | perf_trace_buf_nmi = NULL; | 57 | perf_trace_buf_nmi = NULL; |
54 | perf_trace_buf = NULL; | 58 | perf_trace_buf = NULL; |
55 | } | 59 | } |
56 | fail_buf: | 60 | fail_buf: |
57 | event->profile_count--; | 61 | event->perf_refcount--; |
58 | 62 | ||
59 | return ret; | 63 | return ret; |
60 | } | 64 | } |
61 | 65 | ||
62 | int ftrace_profile_enable(int event_id) | 66 | int perf_trace_enable(int event_id) |
63 | { | 67 | { |
64 | struct ftrace_event_call *event; | 68 | struct ftrace_event_call *event; |
65 | int ret = -EINVAL; | 69 | int ret = -EINVAL; |
66 | 70 | ||
67 | mutex_lock(&event_mutex); | 71 | mutex_lock(&event_mutex); |
68 | list_for_each_entry(event, &ftrace_events, list) { | 72 | list_for_each_entry(event, &ftrace_events, list) { |
69 | if (event->id == event_id && event->profile_enable && | 73 | if (event->id == event_id && event->perf_event_enable && |
70 | try_module_get(event->mod)) { | 74 | try_module_get(event->mod)) { |
71 | ret = ftrace_profile_enable_event(event); | 75 | ret = perf_trace_event_enable(event); |
72 | break; | 76 | break; |
73 | } | 77 | } |
74 | } | 78 | } |
@@ -77,16 +81,16 @@ int ftrace_profile_enable(int event_id) | |||
77 | return ret; | 81 | return ret; |
78 | } | 82 | } |
79 | 83 | ||
80 | static void ftrace_profile_disable_event(struct ftrace_event_call *event) | 84 | static void perf_trace_event_disable(struct ftrace_event_call *event) |
81 | { | 85 | { |
82 | char *buf, *nmi_buf; | 86 | char *buf, *nmi_buf; |
83 | 87 | ||
84 | if (--event->profile_count > 0) | 88 | if (--event->perf_refcount > 0) |
85 | return; | 89 | return; |
86 | 90 | ||
87 | event->profile_disable(event); | 91 | event->perf_event_disable(event); |
88 | 92 | ||
89 | if (!--total_profile_count) { | 93 | if (!--total_ref_count) { |
90 | buf = perf_trace_buf; | 94 | buf = perf_trace_buf; |
91 | rcu_assign_pointer(perf_trace_buf, NULL); | 95 | rcu_assign_pointer(perf_trace_buf, NULL); |
92 | 96 | ||
@@ -104,14 +108,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
104 | } | 108 | } |
105 | } | 109 | } |
106 | 110 | ||
107 | void ftrace_profile_disable(int event_id) | 111 | void perf_trace_disable(int event_id) |
108 | { | 112 | { |
109 | struct ftrace_event_call *event; | 113 | struct ftrace_event_call *event; |
110 | 114 | ||
111 | mutex_lock(&event_mutex); | 115 | mutex_lock(&event_mutex); |
112 | list_for_each_entry(event, &ftrace_events, list) { | 116 | list_for_each_entry(event, &ftrace_events, list) { |
113 | if (event->id == event_id) { | 117 | if (event->id == event_id) { |
114 | ftrace_profile_disable_event(event); | 118 | perf_trace_event_disable(event); |
115 | module_put(event->mod); | 119 | module_put(event->mod); |
116 | break; | 120 | break; |
117 | } | 121 | } |
@@ -119,8 +123,8 @@ void ftrace_profile_disable(int event_id) | |||
119 | mutex_unlock(&event_mutex); | 123 | mutex_unlock(&event_mutex); |
120 | } | 124 | } |
121 | 125 | ||
122 | __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, | 126 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, |
123 | int *rctxp, unsigned long *irq_flags) | 127 | int *rctxp, unsigned long *irq_flags) |
124 | { | 128 | { |
125 | struct trace_entry *entry; | 129 | struct trace_entry *entry; |
126 | char *trace_buf, *raw_data; | 130 | char *trace_buf, *raw_data; |
@@ -161,4 +165,4 @@ err_recursion: | |||
161 | local_irq_restore(*irq_flags); | 165 | local_irq_restore(*irq_flags); |
162 | return NULL; | 166 | return NULL; |
163 | } | 167 | } |
164 | EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); | 168 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
938 | trace_create_file("enable", 0644, call->dir, call, | 938 | trace_create_file("enable", 0644, call->dir, call, |
939 | enable); | 939 | enable); |
940 | 940 | ||
941 | if (call->id && call->profile_enable) | 941 | if (call->id && call->perf_event_enable) |
942 | trace_create_file("id", 0444, call->dir, call, | 942 | trace_create_file("id", 0444, call->dir, call, |
943 | id); | 943 | id); |
944 | 944 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c92273b1a..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) | |||
1214 | #ifdef CONFIG_PERF_EVENTS | 1214 | #ifdef CONFIG_PERF_EVENTS |
1215 | 1215 | ||
1216 | /* Kprobe profile handler */ | 1216 | /* Kprobe profile handler */ |
1217 | static __kprobes void kprobe_profile_func(struct kprobe *kp, | 1217 | static __kprobes void kprobe_perf_func(struct kprobe *kp, |
1218 | struct pt_regs *regs) | 1218 | struct pt_regs *regs) |
1219 | { | 1219 | { |
1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, | |||
1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); |
1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1229 | size -= sizeof(u32); | 1229 | size -= sizeof(u32); |
1230 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1230 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
1231 | "profile buffer not large enough")) | 1231 | "profile buffer not large enough")) |
1232 | return; | 1232 | return; |
1233 | 1233 | ||
1234 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1234 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); |
1235 | if (!entry) | 1235 | if (!entry) |
1236 | return; | 1236 | return; |
1237 | 1237 | ||
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, | |||
1240 | for (i = 0; i < tp->nr_args; i++) | 1240 | for (i = 0; i < tp->nr_args; i++) |
1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1242 | 1242 | ||
1243 | ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); | 1243 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | /* Kretprobe profile handler */ | 1246 | /* Kretprobe profile handler */ |
1247 | static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | 1247 | static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, |
1248 | struct pt_regs *regs) | 1248 | struct pt_regs *regs) |
1249 | { | 1249 | { |
1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); |
1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1259 | size -= sizeof(u32); | 1259 | size -= sizeof(u32); |
1260 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1260 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
1261 | "profile buffer not large enough")) | 1261 | "profile buffer not large enough")) |
1262 | return; | 1262 | return; |
1263 | 1263 | ||
1264 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1264 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); |
1265 | if (!entry) | 1265 | if (!entry) |
1266 | return; | 1266 | return; |
1267 | 1267 | ||
@@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1271 | for (i = 0; i < tp->nr_args; i++) | 1271 | for (i = 0; i < tp->nr_args; i++) |
1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1273 | 1273 | ||
1274 | ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); | 1274 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, |
1275 | irq_flags, regs); | ||
1275 | } | 1276 | } |
1276 | 1277 | ||
1277 | static int probe_profile_enable(struct ftrace_event_call *call) | 1278 | static int probe_perf_enable(struct ftrace_event_call *call) |
1278 | { | 1279 | { |
1279 | struct trace_probe *tp = (struct trace_probe *)call->data; | 1280 | struct trace_probe *tp = (struct trace_probe *)call->data; |
1280 | 1281 | ||
@@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) | |||
1286 | return enable_kprobe(&tp->rp.kp); | 1287 | return enable_kprobe(&tp->rp.kp); |
1287 | } | 1288 | } |
1288 | 1289 | ||
1289 | static void probe_profile_disable(struct ftrace_event_call *call) | 1290 | static void probe_perf_disable(struct ftrace_event_call *call) |
1290 | { | 1291 | { |
1291 | struct trace_probe *tp = (struct trace_probe *)call->data; | 1292 | struct trace_probe *tp = (struct trace_probe *)call->data; |
1292 | 1293 | ||
@@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | |||
1311 | kprobe_trace_func(kp, regs); | 1312 | kprobe_trace_func(kp, regs); |
1312 | #ifdef CONFIG_PERF_EVENTS | 1313 | #ifdef CONFIG_PERF_EVENTS |
1313 | if (tp->flags & TP_FLAG_PROFILE) | 1314 | if (tp->flags & TP_FLAG_PROFILE) |
1314 | kprobe_profile_func(kp, regs); | 1315 | kprobe_perf_func(kp, regs); |
1315 | #endif | 1316 | #endif |
1316 | return 0; /* We don't tweek kernel, so just return 0 */ | 1317 | return 0; /* We don't tweek kernel, so just return 0 */ |
1317 | } | 1318 | } |
@@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
1325 | kretprobe_trace_func(ri, regs); | 1326 | kretprobe_trace_func(ri, regs); |
1326 | #ifdef CONFIG_PERF_EVENTS | 1327 | #ifdef CONFIG_PERF_EVENTS |
1327 | if (tp->flags & TP_FLAG_PROFILE) | 1328 | if (tp->flags & TP_FLAG_PROFILE) |
1328 | kretprobe_profile_func(ri, regs); | 1329 | kretprobe_perf_func(ri, regs); |
1329 | #endif | 1330 | #endif |
1330 | return 0; /* We don't tweek kernel, so just return 0 */ | 1331 | return 0; /* We don't tweek kernel, so just return 0 */ |
1331 | } | 1332 | } |
@@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) | |||
1358 | call->unregfunc = probe_event_disable; | 1359 | call->unregfunc = probe_event_disable; |
1359 | 1360 | ||
1360 | #ifdef CONFIG_PERF_EVENTS | 1361 | #ifdef CONFIG_PERF_EVENTS |
1361 | call->profile_enable = probe_profile_enable; | 1362 | call->perf_event_enable = probe_perf_enable; |
1362 | call->profile_disable = probe_profile_disable; | 1363 | call->perf_event_disable = probe_perf_disable; |
1363 | #endif | 1364 | #endif |
1364 | call->data = tp; | 1365 | call->data = tp; |
1365 | ret = trace_add_event_call(call); | 1366 | ret = trace_add_event_call(call); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); | |||
428 | 428 | ||
429 | #ifdef CONFIG_PERF_EVENTS | 429 | #ifdef CONFIG_PERF_EVENTS |
430 | 430 | ||
431 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); | 431 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); |
432 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); | 432 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); |
433 | static int sys_prof_refcount_enter; | 433 | static int sys_perf_refcount_enter; |
434 | static int sys_prof_refcount_exit; | 434 | static int sys_perf_refcount_exit; |
435 | 435 | ||
436 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 436 | static void perf_syscall_enter(struct pt_regs *regs, long id) |
437 | { | 437 | { |
438 | struct syscall_metadata *sys_data; | 438 | struct syscall_metadata *sys_data; |
439 | struct syscall_trace_enter *rec; | 439 | struct syscall_trace_enter *rec; |
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
443 | int size; | 443 | int size; |
444 | 444 | ||
445 | syscall_nr = syscall_get_nr(current, regs); | 445 | syscall_nr = syscall_get_nr(current, regs); |
446 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 446 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
447 | return; | 447 | return; |
448 | 448 | ||
449 | sys_data = syscall_nr_to_meta(syscall_nr); | 449 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
456 | size -= sizeof(u32); | 456 | size -= sizeof(u32); |
457 | 457 | ||
458 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 458 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
459 | "profile buffer not large enough")) | 459 | "perf buffer not large enough")) |
460 | return; | 460 | return; |
461 | 461 | ||
462 | rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, | 462 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
463 | sys_data->enter_event->id, &rctx, &flags); | 463 | sys_data->enter_event->id, &rctx, &flags); |
464 | if (!rec) | 464 | if (!rec) |
465 | return; | 465 | return; |
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
467 | rec->nr = syscall_nr; | 467 | rec->nr = syscall_nr; |
468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
469 | (unsigned long *)&rec->args); | 469 | (unsigned long *)&rec->args); |
470 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); | 470 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
471 | } | 471 | } |
472 | 472 | ||
473 | int prof_sysenter_enable(struct ftrace_event_call *call) | 473 | int perf_sysenter_enable(struct ftrace_event_call *call) |
474 | { | 474 | { |
475 | int ret = 0; | 475 | int ret = 0; |
476 | int num; | 476 | int num; |
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) | |||
478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
479 | 479 | ||
480 | mutex_lock(&syscall_trace_lock); | 480 | mutex_lock(&syscall_trace_lock); |
481 | if (!sys_prof_refcount_enter) | 481 | if (!sys_perf_refcount_enter) |
482 | ret = register_trace_sys_enter(prof_syscall_enter); | 482 | ret = register_trace_sys_enter(perf_syscall_enter); |
483 | if (ret) { | 483 | if (ret) { |
484 | pr_info("event trace: Could not activate" | 484 | pr_info("event trace: Could not activate" |
485 | "syscall entry trace point"); | 485 | "syscall entry trace point"); |
486 | } else { | 486 | } else { |
487 | set_bit(num, enabled_prof_enter_syscalls); | 487 | set_bit(num, enabled_perf_enter_syscalls); |
488 | sys_prof_refcount_enter++; | 488 | sys_perf_refcount_enter++; |
489 | } | 489 | } |
490 | mutex_unlock(&syscall_trace_lock); | 490 | mutex_unlock(&syscall_trace_lock); |
491 | return ret; | 491 | return ret; |
492 | } | 492 | } |
493 | 493 | ||
494 | void prof_sysenter_disable(struct ftrace_event_call *call) | 494 | void perf_sysenter_disable(struct ftrace_event_call *call) |
495 | { | 495 | { |
496 | int num; | 496 | int num; |
497 | 497 | ||
498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
499 | 499 | ||
500 | mutex_lock(&syscall_trace_lock); | 500 | mutex_lock(&syscall_trace_lock); |
501 | sys_prof_refcount_enter--; | 501 | sys_perf_refcount_enter--; |
502 | clear_bit(num, enabled_prof_enter_syscalls); | 502 | clear_bit(num, enabled_perf_enter_syscalls); |
503 | if (!sys_prof_refcount_enter) | 503 | if (!sys_perf_refcount_enter) |
504 | unregister_trace_sys_enter(prof_syscall_enter); | 504 | unregister_trace_sys_enter(perf_syscall_enter); |
505 | mutex_unlock(&syscall_trace_lock); | 505 | mutex_unlock(&syscall_trace_lock); |
506 | } | 506 | } |
507 | 507 | ||
508 | static void prof_syscall_exit(struct pt_regs *regs, long ret) | 508 | static void perf_syscall_exit(struct pt_regs *regs, long ret) |
509 | { | 509 | { |
510 | struct syscall_metadata *sys_data; | 510 | struct syscall_metadata *sys_data; |
511 | struct syscall_trace_exit *rec; | 511 | struct syscall_trace_exit *rec; |
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
515 | int size; | 515 | int size; |
516 | 516 | ||
517 | syscall_nr = syscall_get_nr(current, regs); | 517 | syscall_nr = syscall_get_nr(current, regs); |
518 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 518 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
519 | return; | 519 | return; |
520 | 520 | ||
521 | sys_data = syscall_nr_to_meta(syscall_nr); | 521 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
530 | * Impossible, but be paranoid with the future | 530 | * Impossible, but be paranoid with the future |
531 | * How to put this check outside runtime? | 531 | * How to put this check outside runtime? |
532 | */ | 532 | */ |
533 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 533 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
534 | "exit event has grown above profile buffer size")) | 534 | "exit event has grown above perf buffer size")) |
535 | return; | 535 | return; |
536 | 536 | ||
537 | rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, | 537 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
538 | sys_data->exit_event->id, &rctx, &flags); | 538 | sys_data->exit_event->id, &rctx, &flags); |
539 | if (!rec) | 539 | if (!rec) |
540 | return; | 540 | return; |
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
542 | rec->nr = syscall_nr; | 542 | rec->nr = syscall_nr; |
543 | rec->ret = syscall_get_return_value(current, regs); | 543 | rec->ret = syscall_get_return_value(current, regs); |
544 | 544 | ||
545 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); | 545 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
546 | } | 546 | } |
547 | 547 | ||
548 | int prof_sysexit_enable(struct ftrace_event_call *call) | 548 | int perf_sysexit_enable(struct ftrace_event_call *call) |
549 | { | 549 | { |
550 | int ret = 0; | 550 | int ret = 0; |
551 | int num; | 551 | int num; |
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) | |||
553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
554 | 554 | ||
555 | mutex_lock(&syscall_trace_lock); | 555 | mutex_lock(&syscall_trace_lock); |
556 | if (!sys_prof_refcount_exit) | 556 | if (!sys_perf_refcount_exit) |
557 | ret = register_trace_sys_exit(prof_syscall_exit); | 557 | ret = register_trace_sys_exit(perf_syscall_exit); |
558 | if (ret) { | 558 | if (ret) { |
559 | pr_info("event trace: Could not activate" | 559 | pr_info("event trace: Could not activate" |
560 | "syscall exit trace point"); | 560 | "syscall exit trace point"); |
561 | } else { | 561 | } else { |
562 | set_bit(num, enabled_prof_exit_syscalls); | 562 | set_bit(num, enabled_perf_exit_syscalls); |
563 | sys_prof_refcount_exit++; | 563 | sys_perf_refcount_exit++; |
564 | } | 564 | } |
565 | mutex_unlock(&syscall_trace_lock); | 565 | mutex_unlock(&syscall_trace_lock); |
566 | return ret; | 566 | return ret; |
567 | } | 567 | } |
568 | 568 | ||
569 | void prof_sysexit_disable(struct ftrace_event_call *call) | 569 | void perf_sysexit_disable(struct ftrace_event_call *call) |
570 | { | 570 | { |
571 | int num; | 571 | int num; |
572 | 572 | ||
573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
574 | 574 | ||
575 | mutex_lock(&syscall_trace_lock); | 575 | mutex_lock(&syscall_trace_lock); |
576 | sys_prof_refcount_exit--; | 576 | sys_perf_refcount_exit--; |
577 | clear_bit(num, enabled_prof_exit_syscalls); | 577 | clear_bit(num, enabled_perf_exit_syscalls); |
578 | if (!sys_prof_refcount_exit) | 578 | if (!sys_perf_refcount_exit) |
579 | unregister_trace_sys_exit(prof_syscall_exit); | 579 | unregister_trace_sys_exit(perf_syscall_exit); |
580 | mutex_unlock(&syscall_trace_lock); | 580 | mutex_unlock(&syscall_trace_lock); |
581 | } | 581 | } |
582 | 582 | ||