diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 68 |
1 files changed, 45 insertions, 23 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c665883..7f29643c8985 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1355 | u64 interrupts, freq; | 1355 | u64 interrupts, freq; |
1356 | 1356 | ||
1357 | spin_lock(&ctx->lock); | 1357 | spin_lock(&ctx->lock); |
1358 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1358 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1360 | continue; | 1360 | continue; |
1361 | 1361 | ||
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3959 | regs = task_pt_regs(current); | 3959 | regs = task_pt_regs(current); |
3960 | 3960 | ||
3961 | if (regs) { | 3961 | if (regs) { |
3962 | if (perf_event_overflow(event, 0, &data, regs)) | 3962 | if (!(event->attr.exclude_idle && current->pid == 0)) |
3963 | ret = HRTIMER_NORESTART; | 3963 | if (perf_event_overflow(event, 0, &data, regs)) |
3964 | ret = HRTIMER_NORESTART; | ||
3964 | } | 3965 | } |
3965 | 3966 | ||
3966 | period = max_t(u64, 10000, event->hw.sample_period); | 3967 | period = max_t(u64, 10000, event->hw.sample_period); |
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3969 | return ret; | 3970 | return ret; |
3970 | } | 3971 | } |
3971 | 3972 | ||
3973 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
3974 | { | ||
3975 | struct hw_perf_event *hwc = &event->hw; | ||
3976 | |||
3977 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3978 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3979 | if (hwc->sample_period) { | ||
3980 | u64 period; | ||
3981 | |||
3982 | if (hwc->remaining) { | ||
3983 | if (hwc->remaining < 0) | ||
3984 | period = 10000; | ||
3985 | else | ||
3986 | period = hwc->remaining; | ||
3987 | hwc->remaining = 0; | ||
3988 | } else { | ||
3989 | period = max_t(u64, 10000, hwc->sample_period); | ||
3990 | } | ||
3991 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3992 | ns_to_ktime(period), 0, | ||
3993 | HRTIMER_MODE_REL, 0); | ||
3994 | } | ||
3995 | } | ||
3996 | |||
3997 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
3998 | { | ||
3999 | struct hw_perf_event *hwc = &event->hw; | ||
4000 | |||
4001 | if (hwc->sample_period) { | ||
4002 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4003 | hwc->remaining = ktime_to_ns(remaining); | ||
4004 | |||
4005 | hrtimer_cancel(&hwc->hrtimer); | ||
4006 | } | ||
4007 | } | ||
4008 | |||
3972 | /* | 4009 | /* |
3973 | * Software event: cpu wall time clock | 4010 | * Software event: cpu wall time clock |
3974 | */ | 4011 | */ |
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
3991 | int cpu = raw_smp_processor_id(); | 4028 | int cpu = raw_smp_processor_id(); |
3992 | 4029 | ||
3993 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4030 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4031 | perf_swevent_start_hrtimer(event); |
3995 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3996 | if (hwc->sample_period) { | ||
3997 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3998 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3999 | ns_to_ktime(period), 0, | ||
4000 | HRTIMER_MODE_REL, 0); | ||
4001 | } | ||
4002 | 4032 | ||
4003 | return 0; | 4033 | return 0; |
4004 | } | 4034 | } |
4005 | 4035 | ||
4006 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4036 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
4007 | { | 4037 | { |
4008 | if (event->hw.sample_period) | 4038 | perf_swevent_cancel_hrtimer(event); |
4009 | hrtimer_cancel(&event->hw.hrtimer); | ||
4010 | cpu_clock_perf_event_update(event); | 4039 | cpu_clock_perf_event_update(event); |
4011 | } | 4040 | } |
4012 | 4041 | ||
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4043 | now = event->ctx->time; | 4072 | now = event->ctx->time; |
4044 | 4073 | ||
4045 | atomic64_set(&hwc->prev_count, now); | 4074 | atomic64_set(&hwc->prev_count, now); |
4046 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4075 | |
4047 | hwc->hrtimer.function = perf_swevent_hrtimer; | 4076 | perf_swevent_start_hrtimer(event); |
4048 | if (hwc->sample_period) { | ||
4049 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4050 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4051 | ns_to_ktime(period), 0, | ||
4052 | HRTIMER_MODE_REL, 0); | ||
4053 | } | ||
4054 | 4077 | ||
4055 | return 0; | 4078 | return 0; |
4056 | } | 4079 | } |
4057 | 4080 | ||
4058 | static void task_clock_perf_event_disable(struct perf_event *event) | 4081 | static void task_clock_perf_event_disable(struct perf_event *event) |
4059 | { | 4082 | { |
4060 | if (event->hw.sample_period) | 4083 | perf_swevent_cancel_hrtimer(event); |
4061 | hrtimer_cancel(&event->hw.hrtimer); | ||
4062 | task_clock_perf_event_update(event, event->ctx->time); | 4084 | task_clock_perf_event_update(event, event->ctx->time); |
4063 | 4085 | ||
4064 | } | 4086 | } |