aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c61
1 files changed, 41 insertions, 20 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index afb7ef3dbc44..33ff019f9aa6 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3969,6 +3969,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3969 return ret; 3969 return ret;
3970} 3970}
3971 3971
3972static void perf_swevent_start_hrtimer(struct perf_event *event)
3973{
3974 struct hw_perf_event *hwc = &event->hw;
3975
3976 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3977 hwc->hrtimer.function = perf_swevent_hrtimer;
3978 if (hwc->sample_period) {
3979 u64 period;
3980
3981 if (hwc->remaining) {
3982 if (hwc->remaining < 0)
3983 period = 10000;
3984 else
3985 period = hwc->remaining;
3986 hwc->remaining = 0;
3987 } else {
3988 period = max_t(u64, 10000, hwc->sample_period);
3989 }
3990 __hrtimer_start_range_ns(&hwc->hrtimer,
3991 ns_to_ktime(period), 0,
3992 HRTIMER_MODE_REL, 0);
3993 }
3994}
3995
3996static void perf_swevent_cancel_hrtimer(struct perf_event *event)
3997{
3998 struct hw_perf_event *hwc = &event->hw;
3999
4000 if (hwc->sample_period) {
4001 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4002 hwc->remaining = ktime_to_ns(remaining);
4003
4004 hrtimer_cancel(&hwc->hrtimer);
4005 }
4006}
4007
3972/* 4008/*
3973 * Software event: cpu wall time clock 4009 * Software event: cpu wall time clock
3974 */ 4010 */
@@ -3991,22 +4027,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3991 int cpu = raw_smp_processor_id(); 4027 int cpu = raw_smp_processor_id();
3992 4028
3993 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4029 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4030 perf_swevent_start_hrtimer(event);
3995 hwc->hrtimer.function = perf_swevent_hrtimer;
3996 if (hwc->sample_period) {
3997 u64 period = max_t(u64, 10000, hwc->sample_period);
3998 __hrtimer_start_range_ns(&hwc->hrtimer,
3999 ns_to_ktime(period), 0,
4000 HRTIMER_MODE_REL, 0);
4001 }
4002 4031
4003 return 0; 4032 return 0;
4004} 4033}
4005 4034
4006static void cpu_clock_perf_event_disable(struct perf_event *event) 4035static void cpu_clock_perf_event_disable(struct perf_event *event)
4007{ 4036{
4008 if (event->hw.sample_period) 4037 perf_swevent_cancel_hrtimer(event);
4009 hrtimer_cancel(&event->hw.hrtimer);
4010 cpu_clock_perf_event_update(event); 4038 cpu_clock_perf_event_update(event);
4011} 4039}
4012 4040
@@ -4043,22 +4071,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
4043 now = event->ctx->time; 4071 now = event->ctx->time;
4044 4072
4045 atomic64_set(&hwc->prev_count, now); 4073 atomic64_set(&hwc->prev_count, now);
4046 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4074
4047 hwc->hrtimer.function = perf_swevent_hrtimer; 4075 perf_swevent_start_hrtimer(event);
4048 if (hwc->sample_period) {
4049 u64 period = max_t(u64, 10000, hwc->sample_period);
4050 __hrtimer_start_range_ns(&hwc->hrtimer,
4051 ns_to_ktime(period), 0,
4052 HRTIMER_MODE_REL, 0);
4053 }
4054 4076
4055 return 0; 4077 return 0;
4056} 4078}
4057 4079
4058static void task_clock_perf_event_disable(struct perf_event *event) 4080static void task_clock_perf_event_disable(struct perf_event *event)
4059{ 4081{
4060 if (event->hw.sample_period) 4082 perf_swevent_cancel_hrtimer(event);
4061 hrtimer_cancel(&event->hw.hrtimer);
4062 task_clock_perf_event_update(event, event->ctx->time); 4083 task_clock_perf_event_update(event, event->ctx->time);
4063 4084
4064} 4085}