aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-02-15 06:41:46 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-16 07:30:57 -0500
commitba3dd36c6775264ee6e7354ba1aabcd6e86d7298 (patch)
treef48441ff3179a91b4acde6c785726a34e6dc856a
parent163ec4354a5135c6c38c3f4a9b46a31900ebdf48 (diff)
perf: Optimize hrtimer events
There is no need to re-initialize the hrtimer every time we start it, so don't do that (shaves a few cycles). Also, since we know hrtimers run at a fixed rate (nanoseconds) we can pre-compute the desired frequency at which they tick. This avoids us having to go through the whole adaptive frequency feedback logic (shaves another few cycles). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1297448589.5226.47.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_event.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e03be08d0ddf..a0a6987fabc4 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -5602,6 +5602,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5602 u64 period; 5602 u64 period;
5603 5603
5604 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 5604 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5605
5606 if (event->state != PERF_EVENT_STATE_ACTIVE)
5607 return HRTIMER_NORESTART;
5608
5605 event->pmu->read(event); 5609 event->pmu->read(event);
5606 5610
5607 perf_sample_data_init(&data, 0); 5611 perf_sample_data_init(&data, 0);
@@ -5628,9 +5632,6 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
5628 if (!is_sampling_event(event)) 5632 if (!is_sampling_event(event))
5629 return; 5633 return;
5630 5634
5631 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5632 hwc->hrtimer.function = perf_swevent_hrtimer;
5633
5634 period = local64_read(&hwc->period_left); 5635 period = local64_read(&hwc->period_left);
5635 if (period) { 5636 if (period) {
5636 if (period < 0) 5637 if (period < 0)
@@ -5657,6 +5658,30 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5657 } 5658 }
5658} 5659}
5659 5660
5661static void perf_swevent_init_hrtimer(struct perf_event *event)
5662{
5663 struct hw_perf_event *hwc = &event->hw;
5664
5665 if (!is_sampling_event(event))
5666 return;
5667
5668 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5669 hwc->hrtimer.function = perf_swevent_hrtimer;
5670
5671 /*
5672 * Since hrtimers have a fixed rate, we can do a static freq->period
5673 * mapping and avoid the whole period adjust feedback stuff.
5674 */
5675 if (event->attr.freq) {
5676 long freq = event->attr.sample_freq;
5677
5678 event->attr.sample_period = NSEC_PER_SEC / freq;
5679 hwc->sample_period = event->attr.sample_period;
5680 local64_set(&hwc->period_left, hwc->sample_period);
5681 event->attr.freq = 0;
5682 }
5683}
5684
5660/* 5685/*
5661 * Software event: cpu wall time clock 5686 * Software event: cpu wall time clock
5662 */ 5687 */
@@ -5709,6 +5734,8 @@ static int cpu_clock_event_init(struct perf_event *event)
5709 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 5734 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5710 return -ENOENT; 5735 return -ENOENT;
5711 5736
5737 perf_swevent_init_hrtimer(event);
5738
5712 return 0; 5739 return 0;
5713} 5740}
5714 5741
@@ -5787,6 +5814,8 @@ static int task_clock_event_init(struct perf_event *event)
5787 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 5814 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5788 return -ENOENT; 5815 return -ENOENT;
5789 5816
5817 perf_swevent_init_hrtimer(event);
5818
5790 return 0; 5819 return 0;
5791} 5820}
5792 5821