diff options
| -rw-r--r-- | include/linux/perf_event.h | 27 | ||||
| -rw-r--r-- | kernel/perf_event.c | 24 |
2 files changed, 34 insertions, 17 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 97965fac55fe..7f0e7f52af8b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -487,6 +487,7 @@ struct perf_guest_info_callbacks { | |||
| 487 | #include <linux/ftrace.h> | 487 | #include <linux/ftrace.h> |
| 488 | #include <linux/cpu.h> | 488 | #include <linux/cpu.h> |
| 489 | #include <linux/irq_work.h> | 489 | #include <linux/irq_work.h> |
| 490 | #include <linux/jump_label_ref.h> | ||
| 490 | #include <asm/atomic.h> | 491 | #include <asm/atomic.h> |
| 491 | #include <asm/local.h> | 492 | #include <asm/local.h> |
| 492 | 493 | ||
| @@ -895,8 +896,30 @@ extern void perf_pmu_unregister(struct pmu *pmu); | |||
| 895 | 896 | ||
| 896 | extern int perf_num_counters(void); | 897 | extern int perf_num_counters(void); |
| 897 | extern const char *perf_pmu_name(void); | 898 | extern const char *perf_pmu_name(void); |
| 898 | extern void perf_event_task_sched_in(struct task_struct *task); | 899 | extern void __perf_event_task_sched_in(struct task_struct *task); |
| 899 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 900 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
| 901 | |||
| 902 | extern atomic_t perf_task_events; | ||
| 903 | |||
| 904 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
| 905 | { | ||
| 906 | JUMP_LABEL(&perf_task_events, have_events); | ||
| 907 | return; | ||
| 908 | |||
| 909 | have_events: | ||
| 910 | __perf_event_task_sched_in(task); | ||
| 911 | } | ||
| 912 | |||
| 913 | static inline | ||
| 914 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
| 915 | { | ||
| 916 | JUMP_LABEL(&perf_task_events, have_events); | ||
| 917 | return; | ||
| 918 | |||
| 919 | have_events: | ||
| 920 | __perf_event_task_sched_out(task, next); | ||
| 921 | } | ||
| 922 | |||
| 900 | extern int perf_event_init_task(struct task_struct *child); | 923 | extern int perf_event_init_task(struct task_struct *child); |
| 901 | extern void perf_event_exit_task(struct task_struct *child); | 924 | extern void perf_event_exit_task(struct task_struct *child); |
| 902 | extern void perf_event_free_task(struct task_struct *task); | 925 | extern void perf_event_free_task(struct task_struct *task); |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 856e20baf13f..f7febb02ab97 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #include <asm/irq_regs.h> | 35 | #include <asm/irq_regs.h> |
| 36 | 36 | ||
| 37 | static atomic_t nr_events __read_mostly; | 37 | atomic_t perf_task_events __read_mostly; |
| 38 | static atomic_t nr_mmap_events __read_mostly; | 38 | static atomic_t nr_mmap_events __read_mostly; |
| 39 | static atomic_t nr_comm_events __read_mostly; | 39 | static atomic_t nr_comm_events __read_mostly; |
| 40 | static atomic_t nr_task_events __read_mostly; | 40 | static atomic_t nr_task_events __read_mostly; |
| @@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn, | |||
| 1311 | * accessing the event control register. If a NMI hits, then it will | 1311 | * accessing the event control register. If a NMI hits, then it will |
| 1312 | * not restart the event. | 1312 | * not restart the event. |
| 1313 | */ | 1313 | */ |
| 1314 | void perf_event_task_sched_out(struct task_struct *task, | 1314 | void __perf_event_task_sched_out(struct task_struct *task, |
| 1315 | struct task_struct *next) | 1315 | struct task_struct *next) |
| 1316 | { | 1316 | { |
| 1317 | int ctxn; | 1317 | int ctxn; |
| 1318 | 1318 | ||
| @@ -1340,14 +1340,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx, | |||
| 1340 | /* | 1340 | /* |
| 1341 | * Called with IRQs disabled | 1341 | * Called with IRQs disabled |
| 1342 | */ | 1342 | */ |
| 1343 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) | ||
| 1344 | { | ||
| 1345 | task_ctx_sched_out(ctx, EVENT_ALL); | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | /* | ||
| 1349 | * Called with IRQs disabled | ||
| 1350 | */ | ||
| 1351 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | 1343 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| 1352 | enum event_type_t event_type) | 1344 | enum event_type_t event_type) |
| 1353 | { | 1345 | { |
| @@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) | |||
| 1494 | * accessing the event control register. If a NMI hits, then it will | 1486 | * accessing the event control register. If a NMI hits, then it will |
| 1495 | * keep the event running. | 1487 | * keep the event running. |
| 1496 | */ | 1488 | */ |
| 1497 | void perf_event_task_sched_in(struct task_struct *task) | 1489 | void __perf_event_task_sched_in(struct task_struct *task) |
| 1498 | { | 1490 | { |
| 1499 | struct perf_event_context *ctx; | 1491 | struct perf_event_context *ctx; |
| 1500 | int ctxn; | 1492 | int ctxn; |
| @@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event) | |||
| 2216 | irq_work_sync(&event->pending); | 2208 | irq_work_sync(&event->pending); |
| 2217 | 2209 | ||
| 2218 | if (!event->parent) { | 2210 | if (!event->parent) { |
| 2219 | atomic_dec(&nr_events); | 2211 | if (event->attach_state & PERF_ATTACH_TASK) |
| 2212 | jump_label_dec(&perf_task_events); | ||
| 2220 | if (event->attr.mmap || event->attr.mmap_data) | 2213 | if (event->attr.mmap || event->attr.mmap_data) |
| 2221 | atomic_dec(&nr_mmap_events); | 2214 | atomic_dec(&nr_mmap_events); |
| 2222 | if (event->attr.comm) | 2215 | if (event->attr.comm) |
| @@ -5354,7 +5347,8 @@ done: | |||
| 5354 | event->pmu = pmu; | 5347 | event->pmu = pmu; |
| 5355 | 5348 | ||
| 5356 | if (!event->parent) { | 5349 | if (!event->parent) { |
| 5357 | atomic_inc(&nr_events); | 5350 | if (event->attach_state & PERF_ATTACH_TASK) |
| 5351 | jump_label_inc(&perf_task_events); | ||
| 5358 | if (event->attr.mmap || event->attr.mmap_data) | 5352 | if (event->attr.mmap || event->attr.mmap_data) |
| 5359 | atomic_inc(&nr_mmap_events); | 5353 | atomic_inc(&nr_mmap_events); |
| 5360 | if (event->attr.comm) | 5354 | if (event->attr.comm) |
| @@ -5849,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
| 5849 | * our context. | 5843 | * our context. |
| 5850 | */ | 5844 | */ |
| 5851 | child_ctx = child->perf_event_ctxp[ctxn]; | 5845 | child_ctx = child->perf_event_ctxp[ctxn]; |
| 5852 | __perf_event_task_sched_out(child_ctx); | 5846 | task_ctx_sched_out(child_ctx, EVENT_ALL); |
| 5853 | 5847 | ||
| 5854 | /* | 5848 | /* |
| 5855 | * Take the context lock here so that if find_get_context is | 5849 | * Take the context lock here so that if find_get_context is |
