diff options
author | Masami Hiramatsu <mhiramat@redhat.com> | 2009-09-25 14:20:12 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-02 20:21:39 -0400 |
commit | a1a138d05fa060ac4238c19a1e890aacc25ed3ba (patch) | |
tree | d91832365614c86e4e9509e539acb9f6bdced691 | |
parent | d7a4b414eed51f1653bb05ebe84122bf9a7ae18b (diff) |
tracing/kprobes: Use global event perf buffers in kprobe tracer
Use new percpu global event buffer instead of stack in kprobe
tracer while tracing through perf.
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Frank Ch. Eigler <fche@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: K.Prasad <prasad@linux.vnet.ibm.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <20090925182011.10157.60140.stgit@omoto>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r-- | kernel/trace/trace_kprobe.c | 115 |
1 files changed, 73 insertions, 42 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 09cba270392d..97309d4714f7 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1149,35 +1149,49 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, | |||
1149 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1149 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
1150 | struct ftrace_event_call *call = &tp->call; | 1150 | struct ftrace_event_call *call = &tp->call; |
1151 | struct kprobe_trace_entry *entry; | 1151 | struct kprobe_trace_entry *entry; |
1152 | int size, __size, i, pc; | 1152 | struct trace_entry *ent; |
1153 | int size, __size, i, pc, __cpu; | ||
1153 | unsigned long irq_flags; | 1154 | unsigned long irq_flags; |
1155 | char *raw_data; | ||
1154 | 1156 | ||
1155 | local_save_flags(irq_flags); | ||
1156 | pc = preempt_count(); | 1157 | pc = preempt_count(); |
1157 | |||
1158 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 1158 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); |
1159 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1159 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1160 | size -= sizeof(u32); | 1160 | size -= sizeof(u32); |
1161 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | ||
1162 | "profile buffer not large enough")) | ||
1163 | return 0; | ||
1161 | 1164 | ||
1162 | do { | 1165 | /* |
1163 | char raw_data[size]; | 1166 | * Protect the non nmi buffer |
1164 | struct trace_entry *ent; | 1167 | * This also protects the rcu read side |
1165 | /* | 1168 | */ |
1166 | * Zero dead bytes from alignment to avoid stack leak | 1169 | local_irq_save(irq_flags); |
1167 | * to userspace | 1170 | __cpu = smp_processor_id(); |
1168 | */ | 1171 | |
1169 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 1172 | if (in_nmi()) |
1170 | entry = (struct kprobe_trace_entry *)raw_data; | 1173 | raw_data = rcu_dereference(trace_profile_buf_nmi); |
1171 | ent = &entry->ent; | 1174 | else |
1172 | 1175 | raw_data = rcu_dereference(trace_profile_buf); | |
1173 | tracing_generic_entry_update(ent, irq_flags, pc); | 1176 | |
1174 | ent->type = call->id; | 1177 | if (!raw_data) |
1175 | entry->nargs = tp->nr_args; | 1178 | goto end; |
1176 | entry->ip = (unsigned long)kp->addr; | 1179 | |
1177 | for (i = 0; i < tp->nr_args; i++) | 1180 | raw_data = per_cpu_ptr(raw_data, __cpu); |
1178 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1181 | /* Zero dead bytes from alignment to avoid buffer leak to userspace */ |
1179 | perf_tp_event(call->id, entry->ip, 1, entry, size); | 1182 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; |
1180 | } while (0); | 1183 | entry = (struct kprobe_trace_entry *)raw_data; |
1184 | ent = &entry->ent; | ||
1185 | |||
1186 | tracing_generic_entry_update(ent, irq_flags, pc); | ||
1187 | ent->type = call->id; | ||
1188 | entry->nargs = tp->nr_args; | ||
1189 | entry->ip = (unsigned long)kp->addr; | ||
1190 | for (i = 0; i < tp->nr_args; i++) | ||
1191 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | ||
1192 | perf_tp_event(call->id, entry->ip, 1, entry, size); | ||
1193 | end: | ||
1194 | local_irq_restore(irq_flags); | ||
1181 | return 0; | 1195 | return 0; |
1182 | } | 1196 | } |
1183 | 1197 | ||
@@ -1188,33 +1202,50 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1188 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1202 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
1189 | struct ftrace_event_call *call = &tp->call; | 1203 | struct ftrace_event_call *call = &tp->call; |
1190 | struct kretprobe_trace_entry *entry; | 1204 | struct kretprobe_trace_entry *entry; |
1191 | int size, __size, i, pc; | 1205 | struct trace_entry *ent; |
1206 | int size, __size, i, pc, __cpu; | ||
1192 | unsigned long irq_flags; | 1207 | unsigned long irq_flags; |
1208 | char *raw_data; | ||
1193 | 1209 | ||
1194 | local_save_flags(irq_flags); | ||
1195 | pc = preempt_count(); | 1210 | pc = preempt_count(); |
1196 | |||
1197 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 1211 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); |
1198 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1212 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1199 | size -= sizeof(u32); | 1213 | size -= sizeof(u32); |
1214 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | ||
1215 | "profile buffer not large enough")) | ||
1216 | return 0; | ||
1217 | |||
1218 | /* | ||
1219 | * Protect the non nmi buffer | ||
1220 | * This also protects the rcu read side | ||
1221 | */ | ||
1222 | local_irq_save(irq_flags); | ||
1223 | __cpu = smp_processor_id(); | ||
1224 | |||
1225 | if (in_nmi()) | ||
1226 | raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
1227 | else | ||
1228 | raw_data = rcu_dereference(trace_profile_buf); | ||
1229 | |||
1230 | if (!raw_data) | ||
1231 | goto end; | ||
1232 | |||
1233 | raw_data = per_cpu_ptr(raw_data, __cpu); | ||
1234 | /* Zero dead bytes from alignment to avoid buffer leak to userspace */ | ||
1235 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
1236 | entry = (struct kretprobe_trace_entry *)raw_data; | ||
1237 | ent = &entry->ent; | ||
1200 | 1238 | ||
1201 | do { | 1239 | tracing_generic_entry_update(ent, irq_flags, pc); |
1202 | char raw_data[size]; | 1240 | ent->type = call->id; |
1203 | struct trace_entry *ent; | 1241 | entry->nargs = tp->nr_args; |
1204 | 1242 | entry->func = (unsigned long)tp->rp.kp.addr; | |
1205 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 1243 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1206 | entry = (struct kretprobe_trace_entry *)raw_data; | 1244 | for (i = 0; i < tp->nr_args; i++) |
1207 | ent = &entry->ent; | 1245 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1208 | 1246 | perf_tp_event(call->id, entry->ret_ip, 1, entry, size); | |
1209 | tracing_generic_entry_update(ent, irq_flags, pc); | 1247 | end: |
1210 | ent->type = call->id; | 1248 | local_irq_restore(irq_flags); |
1211 | entry->nargs = tp->nr_args; | ||
1212 | entry->func = (unsigned long)tp->rp.kp.addr; | ||
1213 | entry->ret_ip = (unsigned long)ri->ret_addr; | ||
1214 | for (i = 0; i < tp->nr_args; i++) | ||
1215 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | ||
1216 | perf_tp_event(call->id, entry->ret_ip, 1, entry, size); | ||
1217 | } while (0); | ||
1218 | return 0; | 1249 | return 0; |
1219 | } | 1250 | } |
1220 | 1251 | ||