diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-08-12 05:39:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-12 05:39:19 -0400 |
commit | 3d325bf0da91ca5d22f2525a72308dafd4fc0977 (patch) | |
tree | b6b5633538f4fd880dd26ca362f2d7aa4e9dd128 | |
parent | f1d800bf615b84ca253af372d2dac8cdef743a20 (diff) | |
parent | d7a702f0b1033cf402fef65bd6395072738f0844 (diff) |
Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 23 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_cqm.c | 8 | ||||
-rw-r--r-- | kernel/events/core.c | 87 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 10 |
4 files changed, 90 insertions, 38 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a478e3c4cc3f..3f124d553c5a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2758,7 +2758,7 @@ static int intel_pmu_cpu_prepare(int cpu) | |||
2758 | if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { | 2758 | if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { |
2759 | cpuc->shared_regs = allocate_shared_regs(cpu); | 2759 | cpuc->shared_regs = allocate_shared_regs(cpu); |
2760 | if (!cpuc->shared_regs) | 2760 | if (!cpuc->shared_regs) |
2761 | return NOTIFY_BAD; | 2761 | goto err; |
2762 | } | 2762 | } |
2763 | 2763 | ||
2764 | if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { | 2764 | if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { |
@@ -2766,18 +2766,27 @@ static int intel_pmu_cpu_prepare(int cpu) | |||
2766 | 2766 | ||
2767 | cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); | 2767 | cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); |
2768 | if (!cpuc->constraint_list) | 2768 | if (!cpuc->constraint_list) |
2769 | return NOTIFY_BAD; | 2769 | goto err_shared_regs; |
2770 | 2770 | ||
2771 | cpuc->excl_cntrs = allocate_excl_cntrs(cpu); | 2771 | cpuc->excl_cntrs = allocate_excl_cntrs(cpu); |
2772 | if (!cpuc->excl_cntrs) { | 2772 | if (!cpuc->excl_cntrs) |
2773 | kfree(cpuc->constraint_list); | 2773 | goto err_constraint_list; |
2774 | kfree(cpuc->shared_regs); | 2774 | |
2775 | return NOTIFY_BAD; | ||
2776 | } | ||
2777 | cpuc->excl_thread_id = 0; | 2775 | cpuc->excl_thread_id = 0; |
2778 | } | 2776 | } |
2779 | 2777 | ||
2780 | return NOTIFY_OK; | 2778 | return NOTIFY_OK; |
2779 | |||
2780 | err_constraint_list: | ||
2781 | kfree(cpuc->constraint_list); | ||
2782 | cpuc->constraint_list = NULL; | ||
2783 | |||
2784 | err_shared_regs: | ||
2785 | kfree(cpuc->shared_regs); | ||
2786 | cpuc->shared_regs = NULL; | ||
2787 | |||
2788 | err: | ||
2789 | return NOTIFY_BAD; | ||
2781 | } | 2790 | } |
2782 | 2791 | ||
2783 | static void intel_pmu_cpu_starting(int cpu) | 2792 | static void intel_pmu_cpu_starting(int cpu) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 63eb68b73589..377e8f8ed391 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c | |||
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu) | |||
1255 | cpumask_set_cpu(cpu, &cqm_cpumask); | 1255 | cpumask_set_cpu(cpu, &cqm_cpumask); |
1256 | } | 1256 | } |
1257 | 1257 | ||
1258 | static void intel_cqm_cpu_prepare(unsigned int cpu) | 1258 | static void intel_cqm_cpu_starting(unsigned int cpu) |
1259 | { | 1259 | { |
1260 | struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); | 1260 | struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); |
1261 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 1261 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb, | |||
1296 | unsigned int cpu = (unsigned long)hcpu; | 1296 | unsigned int cpu = (unsigned long)hcpu; |
1297 | 1297 | ||
1298 | switch (action & ~CPU_TASKS_FROZEN) { | 1298 | switch (action & ~CPU_TASKS_FROZEN) { |
1299 | case CPU_UP_PREPARE: | ||
1300 | intel_cqm_cpu_prepare(cpu); | ||
1301 | break; | ||
1302 | case CPU_DOWN_PREPARE: | 1299 | case CPU_DOWN_PREPARE: |
1303 | intel_cqm_cpu_exit(cpu); | 1300 | intel_cqm_cpu_exit(cpu); |
1304 | break; | 1301 | break; |
1305 | case CPU_STARTING: | 1302 | case CPU_STARTING: |
1303 | intel_cqm_cpu_starting(cpu); | ||
1306 | cqm_pick_event_reader(cpu); | 1304 | cqm_pick_event_reader(cpu); |
1307 | break; | 1305 | break; |
1308 | } | 1306 | } |
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void) | |||
1373 | goto out; | 1371 | goto out; |
1374 | 1372 | ||
1375 | for_each_online_cpu(i) { | 1373 | for_each_online_cpu(i) { |
1376 | intel_cqm_cpu_prepare(i); | 1374 | intel_cqm_cpu_starting(i); |
1377 | cqm_pick_event_reader(i); | 1375 | cqm_pick_event_reader(i); |
1378 | } | 1376 | } |
1379 | 1377 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 77f9e5d0e2d1..ae16867670a9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3972,28 +3972,21 @@ static void perf_event_for_each(struct perf_event *event, | |||
3972 | perf_event_for_each_child(sibling, func); | 3972 | perf_event_for_each_child(sibling, func); |
3973 | } | 3973 | } |
3974 | 3974 | ||
3975 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 3975 | struct period_event { |
3976 | { | 3976 | struct perf_event *event; |
3977 | struct perf_event_context *ctx = event->ctx; | ||
3978 | int ret = 0, active; | ||
3979 | u64 value; | 3977 | u64 value; |
3978 | }; | ||
3980 | 3979 | ||
3981 | if (!is_sampling_event(event)) | 3980 | static int __perf_event_period(void *info) |
3982 | return -EINVAL; | 3981 | { |
3983 | 3982 | struct period_event *pe = info; | |
3984 | if (copy_from_user(&value, arg, sizeof(value))) | 3983 | struct perf_event *event = pe->event; |
3985 | return -EFAULT; | 3984 | struct perf_event_context *ctx = event->ctx; |
3986 | 3985 | u64 value = pe->value; | |
3987 | if (!value) | 3986 | bool active; |
3988 | return -EINVAL; | ||
3989 | 3987 | ||
3990 | raw_spin_lock_irq(&ctx->lock); | 3988 | raw_spin_lock(&ctx->lock); |
3991 | if (event->attr.freq) { | 3989 | if (event->attr.freq) { |
3992 | if (value > sysctl_perf_event_sample_rate) { | ||
3993 | ret = -EINVAL; | ||
3994 | goto unlock; | ||
3995 | } | ||
3996 | |||
3997 | event->attr.sample_freq = value; | 3990 | event->attr.sample_freq = value; |
3998 | } else { | 3991 | } else { |
3999 | event->attr.sample_period = value; | 3992 | event->attr.sample_period = value; |
@@ -4012,11 +4005,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4012 | event->pmu->start(event, PERF_EF_RELOAD); | 4005 | event->pmu->start(event, PERF_EF_RELOAD); |
4013 | perf_pmu_enable(ctx->pmu); | 4006 | perf_pmu_enable(ctx->pmu); |
4014 | } | 4007 | } |
4008 | raw_spin_unlock(&ctx->lock); | ||
4015 | 4009 | ||
4016 | unlock: | 4010 | return 0; |
4011 | } | ||
4012 | |||
4013 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | ||
4014 | { | ||
4015 | struct period_event pe = { .event = event, }; | ||
4016 | struct perf_event_context *ctx = event->ctx; | ||
4017 | struct task_struct *task; | ||
4018 | u64 value; | ||
4019 | |||
4020 | if (!is_sampling_event(event)) | ||
4021 | return -EINVAL; | ||
4022 | |||
4023 | if (copy_from_user(&value, arg, sizeof(value))) | ||
4024 | return -EFAULT; | ||
4025 | |||
4026 | if (!value) | ||
4027 | return -EINVAL; | ||
4028 | |||
4029 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | ||
4030 | return -EINVAL; | ||
4031 | |||
4032 | task = ctx->task; | ||
4033 | pe.value = value; | ||
4034 | |||
4035 | if (!task) { | ||
4036 | cpu_function_call(event->cpu, __perf_event_period, &pe); | ||
4037 | return 0; | ||
4038 | } | ||
4039 | |||
4040 | retry: | ||
4041 | if (!task_function_call(task, __perf_event_period, &pe)) | ||
4042 | return 0; | ||
4043 | |||
4044 | raw_spin_lock_irq(&ctx->lock); | ||
4045 | if (ctx->is_active) { | ||
4046 | raw_spin_unlock_irq(&ctx->lock); | ||
4047 | task = ctx->task; | ||
4048 | goto retry; | ||
4049 | } | ||
4050 | |||
4051 | __perf_event_period(&pe); | ||
4017 | raw_spin_unlock_irq(&ctx->lock); | 4052 | raw_spin_unlock_irq(&ctx->lock); |
4018 | 4053 | ||
4019 | return ret; | 4054 | return 0; |
4020 | } | 4055 | } |
4021 | 4056 | ||
4022 | static const struct file_operations perf_fops; | 4057 | static const struct file_operations perf_fops; |
@@ -4754,12 +4789,20 @@ static const struct file_operations perf_fops = { | |||
4754 | * to user-space before waking everybody up. | 4789 | * to user-space before waking everybody up. |
4755 | */ | 4790 | */ |
4756 | 4791 | ||
4792 | static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) | ||
4793 | { | ||
4794 | /* only the parent has fasync state */ | ||
4795 | if (event->parent) | ||
4796 | event = event->parent; | ||
4797 | return &event->fasync; | ||
4798 | } | ||
4799 | |||
4757 | void perf_event_wakeup(struct perf_event *event) | 4800 | void perf_event_wakeup(struct perf_event *event) |
4758 | { | 4801 | { |
4759 | ring_buffer_wakeup(event); | 4802 | ring_buffer_wakeup(event); |
4760 | 4803 | ||
4761 | if (event->pending_kill) { | 4804 | if (event->pending_kill) { |
4762 | kill_fasync(&event->fasync, SIGIO, event->pending_kill); | 4805 | kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); |
4763 | event->pending_kill = 0; | 4806 | event->pending_kill = 0; |
4764 | } | 4807 | } |
4765 | } | 4808 | } |
@@ -6221,7 +6264,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
6221 | else | 6264 | else |
6222 | perf_event_output(event, data, regs); | 6265 | perf_event_output(event, data, regs); |
6223 | 6266 | ||
6224 | if (event->fasync && event->pending_kill) { | 6267 | if (*perf_event_fasync(event) && event->pending_kill) { |
6225 | event->pending_wakeup = 1; | 6268 | event->pending_wakeup = 1; |
6226 | irq_work_queue(&event->pending); | 6269 | irq_work_queue(&event->pending); |
6227 | } | 6270 | } |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index b2be01b1aa9d..c8aa3f75bc4d 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb) | |||
559 | rb->aux_priv = NULL; | 559 | rb->aux_priv = NULL; |
560 | } | 560 | } |
561 | 561 | ||
562 | for (pg = 0; pg < rb->aux_nr_pages; pg++) | 562 | if (rb->aux_nr_pages) { |
563 | rb_free_aux_page(rb, pg); | 563 | for (pg = 0; pg < rb->aux_nr_pages; pg++) |
564 | rb_free_aux_page(rb, pg); | ||
564 | 565 | ||
565 | kfree(rb->aux_pages); | 566 | kfree(rb->aux_pages); |
566 | rb->aux_nr_pages = 0; | 567 | rb->aux_nr_pages = 0; |
568 | } | ||
567 | } | 569 | } |
568 | 570 | ||
569 | void rb_free_aux(struct ring_buffer *rb) | 571 | void rb_free_aux(struct ring_buffer *rb) |