aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_kprobe.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
-rw-r--r--kernel/trace/trace_kprobe.c50
1 files changed, 20 insertions, 30 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3696476f307d..79ce6a2bd74f 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1208,11 +1208,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1209 struct ftrace_event_call *call = &tp->call; 1209 struct ftrace_event_call *call = &tp->call;
1210 struct kprobe_trace_entry *entry; 1210 struct kprobe_trace_entry *entry;
1211 struct perf_trace_buf *trace_buf;
1212 struct trace_entry *ent; 1211 struct trace_entry *ent;
1213 int size, __size, i, pc, __cpu; 1212 int size, __size, i, pc, __cpu;
1214 unsigned long irq_flags; 1213 unsigned long irq_flags;
1214 char *trace_buf;
1215 char *raw_data; 1215 char *raw_data;
1216 int rctx;
1216 1217
1217 pc = preempt_count(); 1218 pc = preempt_count();
1218 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1219 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1227,6 +1228,11 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1227 * This also protects the rcu read side 1228 * This also protects the rcu read side
1228 */ 1229 */
1229 local_irq_save(irq_flags); 1230 local_irq_save(irq_flags);
1231
1232 rctx = perf_swevent_get_recursion_context();
1233 if (rctx < 0)
1234 goto end_recursion;
1235
1230 __cpu = smp_processor_id(); 1236 __cpu = smp_processor_id();
1231 1237
1232 if (in_nmi()) 1238 if (in_nmi())
@@ -1237,18 +1243,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1237 if (!trace_buf) 1243 if (!trace_buf)
1238 goto end; 1244 goto end;
1239 1245
1240 trace_buf = per_cpu_ptr(trace_buf, __cpu); 1246 raw_data = per_cpu_ptr(trace_buf, __cpu);
1241
1242 if (trace_buf->recursion++)
1243 goto end_recursion;
1244
1245 /*
1246 * Make recursion update visible before entering perf_tp_event
1247 * so that we protect from perf recursions.
1248 */
1249 barrier();
1250
1251 raw_data = trace_buf->buf;
1252 1247
1253 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1248 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1254 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1249 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1263,9 +1258,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1263 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1258 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1264 perf_tp_event(call->id, entry->ip, 1, entry, size); 1259 perf_tp_event(call->id, entry->ip, 1, entry, size);
1265 1260
1266end_recursion:
1267 trace_buf->recursion--;
1268end: 1261end:
1262 perf_swevent_put_recursion_context(rctx);
1263end_recursion:
1269 local_irq_restore(irq_flags); 1264 local_irq_restore(irq_flags);
1270 1265
1271 return 0; 1266 return 0;
@@ -1278,11 +1273,12 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1278 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1273 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1279 struct ftrace_event_call *call = &tp->call; 1274 struct ftrace_event_call *call = &tp->call;
1280 struct kretprobe_trace_entry *entry; 1275 struct kretprobe_trace_entry *entry;
1281 struct perf_trace_buf *trace_buf;
1282 struct trace_entry *ent; 1276 struct trace_entry *ent;
1283 int size, __size, i, pc, __cpu; 1277 int size, __size, i, pc, __cpu;
1284 unsigned long irq_flags; 1278 unsigned long irq_flags;
1279 char *trace_buf;
1285 char *raw_data; 1280 char *raw_data;
1281 int rctx;
1286 1282
1287 pc = preempt_count(); 1283 pc = preempt_count();
1288 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1284 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1297,6 +1293,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1297 * This also protects the rcu read side 1293 * This also protects the rcu read side
1298 */ 1294 */
1299 local_irq_save(irq_flags); 1295 local_irq_save(irq_flags);
1296
1297 rctx = perf_swevent_get_recursion_context();
1298 if (rctx < 0)
1299 goto end_recursion;
1300
1300 __cpu = smp_processor_id(); 1301 __cpu = smp_processor_id();
1301 1302
1302 if (in_nmi()) 1303 if (in_nmi())
@@ -1307,18 +1308,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1307 if (!trace_buf) 1308 if (!trace_buf)
1308 goto end; 1309 goto end;
1309 1310
1310 trace_buf = per_cpu_ptr(trace_buf, __cpu); 1311 raw_data = per_cpu_ptr(trace_buf, __cpu);
1311
1312 if (trace_buf->recursion++)
1313 goto end_recursion;
1314
1315 /*
1316 * Make recursion update visible before entering perf_tp_event
1317 * so that we protect from perf recursions.
1318 */
1319 barrier();
1320
1321 raw_data = trace_buf->buf;
1322 1312
1323 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1313 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1324 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1314 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1334,9 +1324,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1334 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1324 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1335 perf_tp_event(call->id, entry->ret_ip, 1, entry, size); 1325 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1336 1326
1337end_recursion:
1338 trace_buf->recursion--;
1339end: 1327end:
1328 perf_swevent_put_recursion_context(rctx);
1329end_recursion:
1340 local_irq_restore(irq_flags); 1330 local_irq_restore(irq_flags);
1341 1331
1342 return 0; 1332 return 0;