diff options
-rw-r--r-- | include/trace/ftrace.h | 110 | ||||
-rw-r--r-- | kernel/perf_counter.c | 6 |
2 files changed, 88 insertions, 28 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1867553c61e5..fec71f8dbc48 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -144,6 +144,9 @@ | |||
144 | #undef TP_fast_assign | 144 | #undef TP_fast_assign |
145 | #define TP_fast_assign(args...) args | 145 | #define TP_fast_assign(args...) args |
146 | 146 | ||
147 | #undef TP_perf_assign | ||
148 | #define TP_perf_assign(args...) | ||
149 | |||
147 | #undef TRACE_EVENT | 150 | #undef TRACE_EVENT |
148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 151 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
149 | static int \ | 152 | static int \ |
@@ -345,6 +348,88 @@ static inline int ftrace_get_offsets_##call( \ | |||
345 | 348 | ||
346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 349 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
347 | 350 | ||
351 | #ifdef CONFIG_EVENT_PROFILE | ||
352 | |||
353 | /* | ||
354 | * Generate the functions needed for tracepoint perf_counter support. | ||
355 | * | ||
356 | * static void ftrace_profile_<call>(proto) | ||
357 | * { | ||
358 | * extern void perf_tpcounter_event(int, u64, u64); | ||
359 | * u64 __addr = 0, __count = 1; | ||
360 | * | ||
361 | * <assign> <-- here we expand the TP_perf_assign() macro | ||
362 | * | ||
363 | * perf_tpcounter_event(event_<call>.id, __addr, __count); | ||
364 | * } | ||
365 | * | ||
366 | * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) | ||
367 | * { | ||
368 | * int ret = 0; | ||
369 | * | ||
370 | * if (!atomic_inc_return(&event_call->profile_count)) | ||
371 | * ret = register_trace_<call>(ftrace_profile_<call>); | ||
372 | * | ||
373 | * return ret; | ||
374 | * } | ||
375 | * | ||
376 | * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) | ||
377 | * { | ||
378 | * if (atomic_add_negative(-1, &event->call->profile_count)) | ||
379 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
380 | * } | ||
381 | * | ||
382 | */ | ||
383 | |||
384 | #undef TP_fast_assign | ||
385 | #define TP_fast_assign(args...) | ||
386 | |||
387 | #undef TP_perf_assign | ||
388 | #define TP_perf_assign(args...) args | ||
389 | |||
390 | #undef __perf_addr | ||
391 | #define __perf_addr(a) __addr = (a) | ||
392 | |||
393 | #undef __perf_count | ||
394 | #define __perf_count(c) __count = (c) | ||
395 | |||
396 | #undef TRACE_EVENT | ||
397 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
398 | \ | ||
399 | static void ftrace_profile_##call(proto) \ | ||
400 | { \ | ||
401 | extern void perf_tpcounter_event(int, u64, u64); \ | ||
402 | u64 __addr = 0, __count = 1; \ | ||
403 | { assign; } \ | ||
404 | perf_tpcounter_event(event_##call.id, __addr, __count); \ | ||
405 | } \ | ||
406 | \ | ||
407 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
408 | { \ | ||
409 | int ret = 0; \ | ||
410 | \ | ||
411 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
412 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
413 | \ | ||
414 | return ret; \ | ||
415 | } \ | ||
416 | \ | ||
417 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
418 | { \ | ||
419 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
420 | unregister_trace_##call(ftrace_profile_##call); \ | ||
421 | } | ||
422 | |||
423 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
424 | |||
425 | #undef TP_fast_assign | ||
426 | #define TP_fast_assign(args...) args | ||
427 | |||
428 | #undef TP_perf_assign | ||
429 | #define TP_perf_assign(args...) | ||
430 | |||
431 | #endif | ||
432 | |||
348 | /* | 433 | /* |
349 | * Stage 4 of the trace events. | 434 | * Stage 4 of the trace events. |
350 | * | 435 | * |
@@ -447,28 +532,6 @@ static inline int ftrace_get_offsets_##call( \ | |||
447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | 532 | #define TP_FMT(fmt, args...) fmt "\n", ##args |
448 | 533 | ||
449 | #ifdef CONFIG_EVENT_PROFILE | 534 | #ifdef CONFIG_EVENT_PROFILE |
450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
451 | static void ftrace_profile_##call(proto) \ | ||
452 | { \ | ||
453 | extern void perf_tpcounter_event(int); \ | ||
454 | perf_tpcounter_event(event_##call.id); \ | ||
455 | } \ | ||
456 | \ | ||
457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
458 | { \ | ||
459 | int ret = 0; \ | ||
460 | \ | ||
461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
463 | \ | ||
464 | return ret; \ | ||
465 | } \ | ||
466 | \ | ||
467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
468 | { \ | ||
469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
471 | } | ||
472 | 535 | ||
473 | #define _TRACE_PROFILE_INIT(call) \ | 536 | #define _TRACE_PROFILE_INIT(call) \ |
474 | .profile_count = ATOMIC_INIT(-1), \ | 537 | .profile_count = ATOMIC_INIT(-1), \ |
@@ -476,7 +539,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
476 | .profile_disable = ftrace_profile_disable_##call, | 539 | .profile_disable = ftrace_profile_disable_##call, |
477 | 540 | ||
478 | #else | 541 | #else |
479 | #define _TRACE_PROFILE(call, proto, args) | ||
480 | #define _TRACE_PROFILE_INIT(call) | 542 | #define _TRACE_PROFILE_INIT(call) |
481 | #endif | 543 | #endif |
482 | 544 | ||
@@ -502,7 +564,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
502 | 564 | ||
503 | #undef TRACE_EVENT | 565 | #undef TRACE_EVENT |
504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 566 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
506 | \ | 567 | \ |
507 | static struct ftrace_event_call event_##call; \ | 568 | static struct ftrace_event_call event_##call; \ |
508 | \ | 569 | \ |
@@ -586,6 +647,5 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
586 | 647 | ||
587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 648 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
588 | 649 | ||
589 | #undef _TRACE_PROFILE | ||
590 | #undef _TRACE_PROFILE_INIT | 650 | #undef _TRACE_PROFILE_INIT |
591 | 651 | ||
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 673c1aaf7332..52eb4b68d34f 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -3703,17 +3703,17 @@ static const struct pmu perf_ops_task_clock = { | |||
3703 | }; | 3703 | }; |
3704 | 3704 | ||
3705 | #ifdef CONFIG_EVENT_PROFILE | 3705 | #ifdef CONFIG_EVENT_PROFILE |
3706 | void perf_tpcounter_event(int event_id) | 3706 | void perf_tpcounter_event(int event_id, u64 addr, u64 count) |
3707 | { | 3707 | { |
3708 | struct perf_sample_data data = { | 3708 | struct perf_sample_data data = { |
3709 | .regs = get_irq_regs(), | 3709 | .regs = get_irq_regs(), |
3710 | .addr = 0, | 3710 | .addr = addr, |
3711 | }; | 3711 | }; |
3712 | 3712 | ||
3713 | if (!data.regs) | 3713 | if (!data.regs) |
3714 | data.regs = task_pt_regs(current); | 3714 | data.regs = task_pt_regs(current); |
3715 | 3715 | ||
3716 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 3716 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); |
3717 | } | 3717 | } |
3718 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3718 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
3719 | 3719 | ||