diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 91 |
1 files changed, 58 insertions, 33 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 98dc56b2ebe4..3852e2656bb0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1357,7 +1357,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1357 | u64 interrupts, freq; | 1357 | u64 interrupts, freq; |
1358 | 1358 | ||
1359 | spin_lock(&ctx->lock); | 1359 | spin_lock(&ctx->lock); |
1360 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1360 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
1361 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1361 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1362 | continue; | 1362 | continue; |
1363 | 1363 | ||
@@ -2696,20 +2696,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle) | |||
2696 | static void perf_output_lock(struct perf_output_handle *handle) | 2696 | static void perf_output_lock(struct perf_output_handle *handle) |
2697 | { | 2697 | { |
2698 | struct perf_mmap_data *data = handle->data; | 2698 | struct perf_mmap_data *data = handle->data; |
2699 | int cpu; | 2699 | int cur, cpu = get_cpu(); |
2700 | 2700 | ||
2701 | handle->locked = 0; | 2701 | handle->locked = 0; |
2702 | 2702 | ||
2703 | local_irq_save(handle->flags); | 2703 | for (;;) { |
2704 | cpu = smp_processor_id(); | 2704 | cur = atomic_cmpxchg(&data->lock, -1, cpu); |
2705 | 2705 | if (cur == -1) { | |
2706 | if (in_nmi() && atomic_read(&data->lock) == cpu) | 2706 | handle->locked = 1; |
2707 | return; | 2707 | break; |
2708 | } | ||
2709 | if (cur == cpu) | ||
2710 | break; | ||
2708 | 2711 | ||
2709 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2710 | cpu_relax(); | 2712 | cpu_relax(); |
2711 | 2713 | } | |
2712 | handle->locked = 1; | ||
2713 | } | 2714 | } |
2714 | 2715 | ||
2715 | static void perf_output_unlock(struct perf_output_handle *handle) | 2716 | static void perf_output_unlock(struct perf_output_handle *handle) |
@@ -2755,7 +2756,7 @@ again: | |||
2755 | if (atomic_xchg(&data->wakeup, 0)) | 2756 | if (atomic_xchg(&data->wakeup, 0)) |
2756 | perf_output_wakeup(handle); | 2757 | perf_output_wakeup(handle); |
2757 | out: | 2758 | out: |
2758 | local_irq_restore(handle->flags); | 2759 | put_cpu(); |
2759 | } | 2760 | } |
2760 | 2761 | ||
2761 | void perf_output_copy(struct perf_output_handle *handle, | 2762 | void perf_output_copy(struct perf_output_handle *handle, |
@@ -3998,8 +3999,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3998 | regs = task_pt_regs(current); | 3999 | regs = task_pt_regs(current); |
3999 | 4000 | ||
4000 | if (regs) { | 4001 | if (regs) { |
4001 | if (perf_event_overflow(event, 0, &data, regs)) | 4002 | if (!(event->attr.exclude_idle && current->pid == 0)) |
4002 | ret = HRTIMER_NORESTART; | 4003 | if (perf_event_overflow(event, 0, &data, regs)) |
4004 | ret = HRTIMER_NORESTART; | ||
4003 | } | 4005 | } |
4004 | 4006 | ||
4005 | period = max_t(u64, 10000, event->hw.sample_period); | 4007 | period = max_t(u64, 10000, event->hw.sample_period); |
@@ -4008,6 +4010,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
4008 | return ret; | 4010 | return ret; |
4009 | } | 4011 | } |
4010 | 4012 | ||
4013 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
4014 | { | ||
4015 | struct hw_perf_event *hwc = &event->hw; | ||
4016 | |||
4017 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
4018 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
4019 | if (hwc->sample_period) { | ||
4020 | u64 period; | ||
4021 | |||
4022 | if (hwc->remaining) { | ||
4023 | if (hwc->remaining < 0) | ||
4024 | period = 10000; | ||
4025 | else | ||
4026 | period = hwc->remaining; | ||
4027 | hwc->remaining = 0; | ||
4028 | } else { | ||
4029 | period = max_t(u64, 10000, hwc->sample_period); | ||
4030 | } | ||
4031 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4032 | ns_to_ktime(period), 0, | ||
4033 | HRTIMER_MODE_REL, 0); | ||
4034 | } | ||
4035 | } | ||
4036 | |||
4037 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
4038 | { | ||
4039 | struct hw_perf_event *hwc = &event->hw; | ||
4040 | |||
4041 | if (hwc->sample_period) { | ||
4042 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4043 | hwc->remaining = ktime_to_ns(remaining); | ||
4044 | |||
4045 | hrtimer_cancel(&hwc->hrtimer); | ||
4046 | } | ||
4047 | } | ||
4048 | |||
4011 | /* | 4049 | /* |
4012 | * Software event: cpu wall time clock | 4050 | * Software event: cpu wall time clock |
4013 | */ | 4051 | */ |
@@ -4030,22 +4068,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
4030 | int cpu = raw_smp_processor_id(); | 4068 | int cpu = raw_smp_processor_id(); |
4031 | 4069 | ||
4032 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4070 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
4033 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4071 | perf_swevent_start_hrtimer(event); |
4034 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
4035 | if (hwc->sample_period) { | ||
4036 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4037 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4038 | ns_to_ktime(period), 0, | ||
4039 | HRTIMER_MODE_REL, 0); | ||
4040 | } | ||
4041 | 4072 | ||
4042 | return 0; | 4073 | return 0; |
4043 | } | 4074 | } |
4044 | 4075 | ||
4045 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4076 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
4046 | { | 4077 | { |
4047 | if (event->hw.sample_period) | 4078 | perf_swevent_cancel_hrtimer(event); |
4048 | hrtimer_cancel(&event->hw.hrtimer); | ||
4049 | cpu_clock_perf_event_update(event); | 4079 | cpu_clock_perf_event_update(event); |
4050 | } | 4080 | } |
4051 | 4081 | ||
@@ -4082,22 +4112,15 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4082 | now = event->ctx->time; | 4112 | now = event->ctx->time; |
4083 | 4113 | ||
4084 | atomic64_set(&hwc->prev_count, now); | 4114 | atomic64_set(&hwc->prev_count, now); |
4085 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4115 | |
4086 | hwc->hrtimer.function = perf_swevent_hrtimer; | 4116 | perf_swevent_start_hrtimer(event); |
4087 | if (hwc->sample_period) { | ||
4088 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4089 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4090 | ns_to_ktime(period), 0, | ||
4091 | HRTIMER_MODE_REL, 0); | ||
4092 | } | ||
4093 | 4117 | ||
4094 | return 0; | 4118 | return 0; |
4095 | } | 4119 | } |
4096 | 4120 | ||
4097 | static void task_clock_perf_event_disable(struct perf_event *event) | 4121 | static void task_clock_perf_event_disable(struct perf_event *event) |
4098 | { | 4122 | { |
4099 | if (event->hw.sample_period) | 4123 | perf_swevent_cancel_hrtimer(event); |
4100 | hrtimer_cancel(&event->hw.hrtimer); | ||
4101 | task_clock_perf_event_update(event, event->ctx->time); | 4124 | task_clock_perf_event_update(event, event->ctx->time); |
4102 | 4125 | ||
4103 | } | 4126 | } |
@@ -4319,6 +4342,8 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event) | |||
4319 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 4342 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: |
4320 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 4343 | case PERF_COUNT_SW_CONTEXT_SWITCHES: |
4321 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 4344 | case PERF_COUNT_SW_CPU_MIGRATIONS: |
4345 | case PERF_COUNT_SW_ALIGNMENT_FAULTS: | ||
4346 | case PERF_COUNT_SW_EMULATION_FAULTS: | ||
4322 | if (!event->parent) { | 4347 | if (!event->parent) { |
4323 | atomic_inc(&perf_swevent_enabled[event_id]); | 4348 | atomic_inc(&perf_swevent_enabled[event_id]); |
4324 | event->destroy = sw_perf_event_destroy; | 4349 | event->destroy = sw_perf_event_destroy; |