aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-03-12 04:20:57 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-12 04:20:59 -0500
commit937779db13fb6cb621e28d9ae0a6cf1d05b57d05 (patch)
tree6c27402677c347c4dc01980de78c270630588847 /kernel
parent6230f2c7ef01a69e2ba9370326572c287209d32a (diff)
parent9f591fd76afdc0e5192e9ed00a36f8efc0b4dfe6 (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: We want to queue up a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/lockdep.c9
-rw-r--r--kernel/perf_event.c30
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c)48
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_kprobe.c29
-rw-r--r--kernel/trace/trace_syscalls.c72
8 files changed, 104 insertions, 91 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fa034d29cf73..0ed46f3e51e9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 struct kprobe_insn_page *kip; 259 struct kprobe_insn_page *kip;
260 260
261 list_for_each_entry(kip, &c->pages, list) { 261 list_for_each_entry(kip, &c->pages, list) {
262 long idx = ((long)slot - (long)kip->insns) / c->insn_size; 262 long idx = ((long)slot - (long)kip->insns) /
263 (c->insn_size * sizeof(kprobe_opcode_t));
263 if (idx >= 0 && idx < slots_per_page(c)) { 264 if (idx >= 0 && idx < slots_per_page(c)) {
264 WARN_ON(kip->slot_used[idx] != SLOT_USED); 265 WARN_ON(kip->slot_used[idx] != SLOT_USED);
265 if (dirty) { 266 if (dirty) {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 0c30d0455de1..65b5f5b7c298 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3211{ 3211{
3212 unsigned long flags; 3212 unsigned long flags;
3213 3213
3214 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3215
3216 if (unlikely(current->lockdep_recursion)) 3214 if (unlikely(current->lockdep_recursion))
3217 return; 3215 return;
3218 3216
@@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3220 check_flags(flags); 3218 check_flags(flags);
3221 3219
3222 current->lockdep_recursion = 1; 3220 current->lockdep_recursion = 1;
3221 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3223 __lock_acquire(lock, subclass, trylock, read, check, 3222 __lock_acquire(lock, subclass, trylock, read, check,
3224 irqs_disabled_flags(flags), nest_lock, ip, 0); 3223 irqs_disabled_flags(flags), nest_lock, ip, 0);
3225 current->lockdep_recursion = 0; 3224 current->lockdep_recursion = 0;
@@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested,
3232{ 3231{
3233 unsigned long flags; 3232 unsigned long flags;
3234 3233
3235 trace_lock_release(lock, nested, ip);
3236
3237 if (unlikely(current->lockdep_recursion)) 3234 if (unlikely(current->lockdep_recursion))
3238 return; 3235 return;
3239 3236
3240 raw_local_irq_save(flags); 3237 raw_local_irq_save(flags);
3241 check_flags(flags); 3238 check_flags(flags);
3242 current->lockdep_recursion = 1; 3239 current->lockdep_recursion = 1;
3240 trace_lock_release(lock, nested, ip);
3243 __lock_release(lock, nested, ip); 3241 __lock_release(lock, nested, ip);
3244 current->lockdep_recursion = 0; 3242 current->lockdep_recursion = 0;
3245 raw_local_irq_restore(flags); 3243 raw_local_irq_restore(flags);
@@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3413{ 3411{
3414 unsigned long flags; 3412 unsigned long flags;
3415 3413
3416 trace_lock_contended(lock, ip);
3417
3418 if (unlikely(!lock_stat)) 3414 if (unlikely(!lock_stat))
3419 return; 3415 return;
3420 3416
@@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3424 raw_local_irq_save(flags); 3420 raw_local_irq_save(flags);
3425 check_flags(flags); 3421 check_flags(flags);
3426 current->lockdep_recursion = 1; 3422 current->lockdep_recursion = 1;
3423 trace_lock_contended(lock, ip);
3427 __lock_contended(lock, ip); 3424 __lock_contended(lock, ip);
3428 current->lockdep_recursion = 0; 3425 current->lockdep_recursion = 0;
3429 raw_local_irq_restore(flags); 3426 raw_local_irq_restore(flags);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3853d49c7d56..8bf61273c58b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2790,6 +2790,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2790 return NULL; 2790 return NULL;
2791} 2791}
2792 2792
2793__weak
2794void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2795{
2796}
2797
2793/* 2798/*
2794 * Output 2799 * Output
2795 */ 2800 */
@@ -4317,9 +4322,8 @@ static const struct pmu perf_ops_task_clock = {
4317#ifdef CONFIG_EVENT_TRACING 4322#ifdef CONFIG_EVENT_TRACING
4318 4323
4319void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4324void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4320 int entry_size) 4325 int entry_size, struct pt_regs *regs)
4321{ 4326{
4322 struct pt_regs *regs = get_irq_regs();
4323 struct perf_sample_data data; 4327 struct perf_sample_data data;
4324 struct perf_raw_record raw = { 4328 struct perf_raw_record raw = {
4325 .size = entry_size, 4329 .size = entry_size,
@@ -4329,12 +4333,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4329 perf_sample_data_init(&data, addr); 4333 perf_sample_data_init(&data, addr);
4330 data.raw = &raw; 4334 data.raw = &raw;
4331 4335
4332 if (!regs)
4333 regs = task_pt_regs(current);
4334
4335 /* Trace events already protected against recursion */ 4336 /* Trace events already protected against recursion */
4336 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4337 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4337 &data, regs); 4338 &data, regs);
4338} 4339}
4339EXPORT_SYMBOL_GPL(perf_tp_event); 4340EXPORT_SYMBOL_GPL(perf_tp_event);
4340 4341
@@ -4350,7 +4351,7 @@ static int perf_tp_event_match(struct perf_event *event,
4350 4351
4351static void tp_perf_event_destroy(struct perf_event *event) 4352static void tp_perf_event_destroy(struct perf_event *event)
4352{ 4353{
4353 ftrace_profile_disable(event->attr.config); 4354 perf_trace_disable(event->attr.config);
4354} 4355}
4355 4356
4356static const struct pmu *tp_perf_event_init(struct perf_event *event) 4357static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4364,7 +4365,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4364 !capable(CAP_SYS_ADMIN)) 4365 !capable(CAP_SYS_ADMIN))
4365 return ERR_PTR(-EPERM); 4366 return ERR_PTR(-EPERM);
4366 4367
4367 if (ftrace_profile_enable(event->attr.config)) 4368 if (perf_trace_enable(event->attr.config))
4368 return NULL; 4369 return NULL;
4369 4370
4370 event->destroy = tp_perf_event_destroy; 4371 event->destroy = tp_perf_event_destroy;
@@ -5371,12 +5372,22 @@ int perf_event_init_task(struct task_struct *child)
5371 return ret; 5372 return ret;
5372} 5373}
5373 5374
5375static void __init perf_event_init_all_cpus(void)
5376{
5377 int cpu;
5378 struct perf_cpu_context *cpuctx;
5379
5380 for_each_possible_cpu(cpu) {
5381 cpuctx = &per_cpu(perf_cpu_context, cpu);
5382 __perf_event_init_context(&cpuctx->ctx, NULL);
5383 }
5384}
5385
5374static void __cpuinit perf_event_init_cpu(int cpu) 5386static void __cpuinit perf_event_init_cpu(int cpu)
5375{ 5387{
5376 struct perf_cpu_context *cpuctx; 5388 struct perf_cpu_context *cpuctx;
5377 5389
5378 cpuctx = &per_cpu(perf_cpu_context, cpu); 5390 cpuctx = &per_cpu(perf_cpu_context, cpu);
5379 __perf_event_init_context(&cpuctx->ctx, NULL);
5380 5391
5381 spin_lock(&perf_resource_lock); 5392 spin_lock(&perf_resource_lock);
5382 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 5393 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
@@ -5442,6 +5453,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
5442 5453
5443void __init perf_event_init(void) 5454void __init perf_event_init(void)
5444{ 5455{
5456 perf_event_init_all_cpus();
5445 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5446 (void *)(long)smp_processor_id()); 5458 (void *)(long)smp_processor_id());
5447 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, 5459 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d00c6fe23f54..78edc6490038 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
52obj-$(CONFIG_EVENT_TRACING) += trace_export.o 52obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54ifeq ($(CONFIG_PERF_EVENTS),y) 54ifeq ($(CONFIG_PERF_EVENTS),y)
55obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o 55obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
56endif 56endif
57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c
index f0d693005075..0709e4f75114 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,32 +1,34 @@
1/* 1/*
2 * trace event based perf counter profiling 2 * trace event based perf event profiling/tracing
3 * 3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kprobes.h> 9#include <linux/kprobes.h>
10#include "trace.h" 10#include "trace.h"
11 11
12DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
12 14
13static char *perf_trace_buf; 15static char *perf_trace_buf;
14static char *perf_trace_buf_nmi; 16static char *perf_trace_buf_nmi;
15 17
16typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; 18typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
17 19
18/* Count the events in use (per event id, not per instance) */ 20/* Count the events in use (per event id, not per instance) */
19static int total_profile_count; 21static int total_ref_count;
20 22
21static int ftrace_profile_enable_event(struct ftrace_event_call *event) 23static int perf_trace_event_enable(struct ftrace_event_call *event)
22{ 24{
23 char *buf; 25 char *buf;
24 int ret = -ENOMEM; 26 int ret = -ENOMEM;
25 27
26 if (event->profile_count++ > 0) 28 if (event->perf_refcount++ > 0)
27 return 0; 29 return 0;
28 30
29 if (!total_profile_count) { 31 if (!total_ref_count) {
30 buf = (char *)alloc_percpu(perf_trace_t); 32 buf = (char *)alloc_percpu(perf_trace_t);
31 if (!buf) 33 if (!buf)
32 goto fail_buf; 34 goto fail_buf;
@@ -40,35 +42,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
40 rcu_assign_pointer(perf_trace_buf_nmi, buf); 42 rcu_assign_pointer(perf_trace_buf_nmi, buf);
41 } 43 }
42 44
43 ret = event->profile_enable(event); 45 ret = event->perf_event_enable(event);
44 if (!ret) { 46 if (!ret) {
45 total_profile_count++; 47 total_ref_count++;
46 return 0; 48 return 0;
47 } 49 }
48 50
49fail_buf_nmi: 51fail_buf_nmi:
50 if (!total_profile_count) { 52 if (!total_ref_count) {
51 free_percpu(perf_trace_buf_nmi); 53 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf); 54 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL; 55 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL; 56 perf_trace_buf = NULL;
55 } 57 }
56fail_buf: 58fail_buf:
57 event->profile_count--; 59 event->perf_refcount--;
58 60
59 return ret; 61 return ret;
60} 62}
61 63
62int ftrace_profile_enable(int event_id) 64int perf_trace_enable(int event_id)
63{ 65{
64 struct ftrace_event_call *event; 66 struct ftrace_event_call *event;
65 int ret = -EINVAL; 67 int ret = -EINVAL;
66 68
67 mutex_lock(&event_mutex); 69 mutex_lock(&event_mutex);
68 list_for_each_entry(event, &ftrace_events, list) { 70 list_for_each_entry(event, &ftrace_events, list) {
69 if (event->id == event_id && event->profile_enable && 71 if (event->id == event_id && event->perf_event_enable &&
70 try_module_get(event->mod)) { 72 try_module_get(event->mod)) {
71 ret = ftrace_profile_enable_event(event); 73 ret = perf_trace_event_enable(event);
72 break; 74 break;
73 } 75 }
74 } 76 }
@@ -77,16 +79,16 @@ int ftrace_profile_enable(int event_id)
77 return ret; 79 return ret;
78} 80}
79 81
80static void ftrace_profile_disable_event(struct ftrace_event_call *event) 82static void perf_trace_event_disable(struct ftrace_event_call *event)
81{ 83{
82 char *buf, *nmi_buf; 84 char *buf, *nmi_buf;
83 85
84 if (--event->profile_count > 0) 86 if (--event->perf_refcount > 0)
85 return; 87 return;
86 88
87 event->profile_disable(event); 89 event->perf_event_disable(event);
88 90
89 if (!--total_profile_count) { 91 if (!--total_ref_count) {
90 buf = perf_trace_buf; 92 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL); 93 rcu_assign_pointer(perf_trace_buf, NULL);
92 94
@@ -104,14 +106,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
104 } 106 }
105} 107}
106 108
107void ftrace_profile_disable(int event_id) 109void perf_trace_disable(int event_id)
108{ 110{
109 struct ftrace_event_call *event; 111 struct ftrace_event_call *event;
110 112
111 mutex_lock(&event_mutex); 113 mutex_lock(&event_mutex);
112 list_for_each_entry(event, &ftrace_events, list) { 114 list_for_each_entry(event, &ftrace_events, list) {
113 if (event->id == event_id) { 115 if (event->id == event_id) {
114 ftrace_profile_disable_event(event); 116 perf_trace_event_disable(event);
115 module_put(event->mod); 117 module_put(event->mod);
116 break; 118 break;
117 } 119 }
@@ -119,8 +121,8 @@ void ftrace_profile_disable(int event_id)
119 mutex_unlock(&event_mutex); 121 mutex_unlock(&event_mutex);
120} 122}
121 123
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, 124__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags) 125 int *rctxp, unsigned long *irq_flags)
124{ 126{
125 struct trace_entry *entry; 127 struct trace_entry *entry;
126 char *trace_buf, *raw_data; 128 char *trace_buf, *raw_data;
@@ -161,4 +163,4 @@ err_recursion:
161 local_irq_restore(*irq_flags); 163 local_irq_restore(*irq_flags);
162 return NULL; 164 return NULL;
163} 165}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); 166EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 3f972ad98d04..beab8bf2f310 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
938 trace_create_file("enable", 0644, call->dir, call, 938 trace_create_file("enable", 0644, call->dir, call,
939 enable); 939 enable);
940 940
941 if (call->id && call->profile_enable) 941 if (call->id && call->perf_event_enable)
942 trace_create_file("id", 0444, call->dir, call, 942 trace_create_file("id", 0444, call->dir, call,
943 id); 943 id);
944 944
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 505c92273b1a..1251e367bae9 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
1214#ifdef CONFIG_PERF_EVENTS 1214#ifdef CONFIG_PERF_EVENTS
1215 1215
1216/* Kprobe profile handler */ 1216/* Kprobe profile handler */
1217static __kprobes void kprobe_profile_func(struct kprobe *kp, 1217static __kprobes void kprobe_perf_func(struct kprobe *kp,
1218 struct pt_regs *regs) 1218 struct pt_regs *regs)
1219{ 1219{
1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1228 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229 size -= sizeof(u32); 1229 size -= sizeof(u32);
1230 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1230 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1231 "profile buffer not large enough")) 1231 "profile buffer not large enough"))
1232 return; 1232 return;
1233 1233
1234 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1234 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1235 if (!entry) 1235 if (!entry)
1236 return; 1236 return;
1237 1237
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1240 for (i = 0; i < tp->nr_args; i++) 1240 for (i = 0; i < tp->nr_args; i++)
1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1242 1242
1243 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); 1243 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1244} 1244}
1245 1245
1246/* Kretprobe profile handler */ 1246/* Kretprobe profile handler */
1247static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, 1247static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1248 struct pt_regs *regs) 1248 struct pt_regs *regs)
1249{ 1249{
1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1258 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259 size -= sizeof(u32); 1259 size -= sizeof(u32);
1260 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1260 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1261 "profile buffer not large enough")) 1261 "profile buffer not large enough"))
1262 return; 1262 return;
1263 1263
1264 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1264 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1265 if (!entry) 1265 if (!entry)
1266 return; 1266 return;
1267 1267
@@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1271 for (i = 0; i < tp->nr_args; i++) 1271 for (i = 0; i < tp->nr_args; i++)
1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1273 1273
1274 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); 1274 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1275 irq_flags, regs);
1275} 1276}
1276 1277
1277static int probe_profile_enable(struct ftrace_event_call *call) 1278static int probe_perf_enable(struct ftrace_event_call *call)
1278{ 1279{
1279 struct trace_probe *tp = (struct trace_probe *)call->data; 1280 struct trace_probe *tp = (struct trace_probe *)call->data;
1280 1281
@@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
1286 return enable_kprobe(&tp->rp.kp); 1287 return enable_kprobe(&tp->rp.kp);
1287} 1288}
1288 1289
1289static void probe_profile_disable(struct ftrace_event_call *call) 1290static void probe_perf_disable(struct ftrace_event_call *call)
1290{ 1291{
1291 struct trace_probe *tp = (struct trace_probe *)call->data; 1292 struct trace_probe *tp = (struct trace_probe *)call->data;
1292 1293
@@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1311 kprobe_trace_func(kp, regs); 1312 kprobe_trace_func(kp, regs);
1312#ifdef CONFIG_PERF_EVENTS 1313#ifdef CONFIG_PERF_EVENTS
1313 if (tp->flags & TP_FLAG_PROFILE) 1314 if (tp->flags & TP_FLAG_PROFILE)
1314 kprobe_profile_func(kp, regs); 1315 kprobe_perf_func(kp, regs);
1315#endif 1316#endif
1316 return 0; /* We don't tweek kernel, so just return 0 */ 1317 return 0; /* We don't tweek kernel, so just return 0 */
1317} 1318}
@@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1325 kretprobe_trace_func(ri, regs); 1326 kretprobe_trace_func(ri, regs);
1326#ifdef CONFIG_PERF_EVENTS 1327#ifdef CONFIG_PERF_EVENTS
1327 if (tp->flags & TP_FLAG_PROFILE) 1328 if (tp->flags & TP_FLAG_PROFILE)
1328 kretprobe_profile_func(ri, regs); 1329 kretprobe_perf_func(ri, regs);
1329#endif 1330#endif
1330 return 0; /* We don't tweek kernel, so just return 0 */ 1331 return 0; /* We don't tweek kernel, so just return 0 */
1331} 1332}
@@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
1358 call->unregfunc = probe_event_disable; 1359 call->unregfunc = probe_event_disable;
1359 1360
1360#ifdef CONFIG_PERF_EVENTS 1361#ifdef CONFIG_PERF_EVENTS
1361 call->profile_enable = probe_profile_enable; 1362 call->perf_event_enable = probe_perf_enable;
1362 call->profile_disable = probe_profile_disable; 1363 call->perf_event_disable = probe_perf_disable;
1363#endif 1364#endif
1364 call->data = tp; 1365 call->data = tp;
1365 ret = trace_add_event_call(call); 1366 ret = trace_add_event_call(call);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index cba47d7935cc..33c2a5b769dc 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
428 428
429#ifdef CONFIG_PERF_EVENTS 429#ifdef CONFIG_PERF_EVENTS
430 430
431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 431static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 432static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
433static int sys_prof_refcount_enter; 433static int sys_perf_refcount_enter;
434static int sys_prof_refcount_exit; 434static int sys_perf_refcount_exit;
435 435
436static void prof_syscall_enter(struct pt_regs *regs, long id) 436static void perf_syscall_enter(struct pt_regs *regs, long id)
437{ 437{
438 struct syscall_metadata *sys_data; 438 struct syscall_metadata *sys_data;
439 struct syscall_trace_enter *rec; 439 struct syscall_trace_enter *rec;
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
443 int size; 443 int size;
444 444
445 syscall_nr = syscall_get_nr(current, regs); 445 syscall_nr = syscall_get_nr(current, regs);
446 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 446 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
447 return; 447 return;
448 448
449 sys_data = syscall_nr_to_meta(syscall_nr); 449 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
455 size = ALIGN(size + sizeof(u32), sizeof(u64)); 455 size = ALIGN(size + sizeof(u32), sizeof(u64));
456 size -= sizeof(u32); 456 size -= sizeof(u32);
457 457
458 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 458 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
459 "profile buffer not large enough")) 459 "perf buffer not large enough"))
460 return; 460 return;
461 461
462 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, 462 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
463 sys_data->enter_event->id, &rctx, &flags); 463 sys_data->enter_event->id, &rctx, &flags);
464 if (!rec) 464 if (!rec)
465 return; 465 return;
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
467 rec->nr = syscall_nr; 467 rec->nr = syscall_nr;
468 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 468 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
469 (unsigned long *)&rec->args); 469 (unsigned long *)&rec->args);
470 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); 470 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
471} 471}
472 472
473int prof_sysenter_enable(struct ftrace_event_call *call) 473int perf_sysenter_enable(struct ftrace_event_call *call)
474{ 474{
475 int ret = 0; 475 int ret = 0;
476 int num; 476 int num;
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
478 num = ((struct syscall_metadata *)call->data)->syscall_nr; 478 num = ((struct syscall_metadata *)call->data)->syscall_nr;
479 479
480 mutex_lock(&syscall_trace_lock); 480 mutex_lock(&syscall_trace_lock);
481 if (!sys_prof_refcount_enter) 481 if (!sys_perf_refcount_enter)
482 ret = register_trace_sys_enter(prof_syscall_enter); 482 ret = register_trace_sys_enter(perf_syscall_enter);
483 if (ret) { 483 if (ret) {
484 pr_info("event trace: Could not activate" 484 pr_info("event trace: Could not activate"
485 "syscall entry trace point"); 485 "syscall entry trace point");
486 } else { 486 } else {
487 set_bit(num, enabled_prof_enter_syscalls); 487 set_bit(num, enabled_perf_enter_syscalls);
488 sys_prof_refcount_enter++; 488 sys_perf_refcount_enter++;
489 } 489 }
490 mutex_unlock(&syscall_trace_lock); 490 mutex_unlock(&syscall_trace_lock);
491 return ret; 491 return ret;
492} 492}
493 493
494void prof_sysenter_disable(struct ftrace_event_call *call) 494void perf_sysenter_disable(struct ftrace_event_call *call)
495{ 495{
496 int num; 496 int num;
497 497
498 num = ((struct syscall_metadata *)call->data)->syscall_nr; 498 num = ((struct syscall_metadata *)call->data)->syscall_nr;
499 499
500 mutex_lock(&syscall_trace_lock); 500 mutex_lock(&syscall_trace_lock);
501 sys_prof_refcount_enter--; 501 sys_perf_refcount_enter--;
502 clear_bit(num, enabled_prof_enter_syscalls); 502 clear_bit(num, enabled_perf_enter_syscalls);
503 if (!sys_prof_refcount_enter) 503 if (!sys_perf_refcount_enter)
504 unregister_trace_sys_enter(prof_syscall_enter); 504 unregister_trace_sys_enter(perf_syscall_enter);
505 mutex_unlock(&syscall_trace_lock); 505 mutex_unlock(&syscall_trace_lock);
506} 506}
507 507
508static void prof_syscall_exit(struct pt_regs *regs, long ret) 508static void perf_syscall_exit(struct pt_regs *regs, long ret)
509{ 509{
510 struct syscall_metadata *sys_data; 510 struct syscall_metadata *sys_data;
511 struct syscall_trace_exit *rec; 511 struct syscall_trace_exit *rec;
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
515 int size; 515 int size;
516 516
517 syscall_nr = syscall_get_nr(current, regs); 517 syscall_nr = syscall_get_nr(current, regs);
518 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 518 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
519 return; 519 return;
520 520
521 sys_data = syscall_nr_to_meta(syscall_nr); 521 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
530 * Impossible, but be paranoid with the future 530 * Impossible, but be paranoid with the future
531 * How to put this check outside runtime? 531 * How to put this check outside runtime?
532 */ 532 */
533 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 533 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
534 "exit event has grown above profile buffer size")) 534 "exit event has grown above perf buffer size"))
535 return; 535 return;
536 536
537 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, 537 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
538 sys_data->exit_event->id, &rctx, &flags); 538 sys_data->exit_event->id, &rctx, &flags);
539 if (!rec) 539 if (!rec)
540 return; 540 return;
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
542 rec->nr = syscall_nr; 542 rec->nr = syscall_nr;
543 rec->ret = syscall_get_return_value(current, regs); 543 rec->ret = syscall_get_return_value(current, regs);
544 544
545 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); 545 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
546} 546}
547 547
548int prof_sysexit_enable(struct ftrace_event_call *call) 548int perf_sysexit_enable(struct ftrace_event_call *call)
549{ 549{
550 int ret = 0; 550 int ret = 0;
551 int num; 551 int num;
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
553 num = ((struct syscall_metadata *)call->data)->syscall_nr; 553 num = ((struct syscall_metadata *)call->data)->syscall_nr;
554 554
555 mutex_lock(&syscall_trace_lock); 555 mutex_lock(&syscall_trace_lock);
556 if (!sys_prof_refcount_exit) 556 if (!sys_perf_refcount_exit)
557 ret = register_trace_sys_exit(prof_syscall_exit); 557 ret = register_trace_sys_exit(perf_syscall_exit);
558 if (ret) { 558 if (ret) {
559 pr_info("event trace: Could not activate" 559 pr_info("event trace: Could not activate"
560 "syscall exit trace point"); 560 "syscall exit trace point");
561 } else { 561 } else {
562 set_bit(num, enabled_prof_exit_syscalls); 562 set_bit(num, enabled_perf_exit_syscalls);
563 sys_prof_refcount_exit++; 563 sys_perf_refcount_exit++;
564 } 564 }
565 mutex_unlock(&syscall_trace_lock); 565 mutex_unlock(&syscall_trace_lock);
566 return ret; 566 return ret;
567} 567}
568 568
569void prof_sysexit_disable(struct ftrace_event_call *call) 569void perf_sysexit_disable(struct ftrace_event_call *call)
570{ 570{
571 int num; 571 int num;
572 572
573 num = ((struct syscall_metadata *)call->data)->syscall_nr; 573 num = ((struct syscall_metadata *)call->data)->syscall_nr;
574 574
575 mutex_lock(&syscall_trace_lock); 575 mutex_lock(&syscall_trace_lock);
576 sys_prof_refcount_exit--; 576 sys_perf_refcount_exit--;
577 clear_bit(num, enabled_prof_exit_syscalls); 577 clear_bit(num, enabled_perf_exit_syscalls);
578 if (!sys_prof_refcount_exit) 578 if (!sys_perf_refcount_exit)
579 unregister_trace_sys_exit(prof_syscall_exit); 579 unregister_trace_sys_exit(perf_syscall_exit);
580 mutex_unlock(&syscall_trace_lock); 580 mutex_unlock(&syscall_trace_lock);
581} 581}
582 582