diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-11-20 16:19:44 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-21 08:11:35 -0500 |
commit | 0cff784ae41cc125368ae77f1c01328ae2fdc6b3 (patch) | |
tree | 54e6f387d945d724c00bb33118ef138831a68d10 /kernel | |
parent | 453f19eea7dbad837425e9b07d84568d14898794 (diff) |
perf: Optimize some swcounter attr.sample_period==1 paths
Avoid the rather expensive perf_swevent_set_period() if we know
we have to sample every single event anyway.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20091120212508.299508332@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1dfb6cc4fdea..8e55b440e28a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -3759,16 +3759,16 @@ again: | |||
3759 | return nr; | 3759 | return nr; |
3760 | } | 3760 | } |
3761 | 3761 | ||
3762 | static void perf_swevent_overflow(struct perf_event *event, | 3762 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, |
3763 | int nmi, struct perf_sample_data *data, | 3763 | int nmi, struct perf_sample_data *data, |
3764 | struct pt_regs *regs) | 3764 | struct pt_regs *regs) |
3765 | { | 3765 | { |
3766 | struct hw_perf_event *hwc = &event->hw; | 3766 | struct hw_perf_event *hwc = &event->hw; |
3767 | int throttle = 0; | 3767 | int throttle = 0; |
3768 | u64 overflow; | ||
3769 | 3768 | ||
3770 | data->period = event->hw.last_period; | 3769 | data->period = event->hw.last_period; |
3771 | overflow = perf_swevent_set_period(event); | 3770 | if (!overflow) |
3771 | overflow = perf_swevent_set_period(event); | ||
3772 | 3772 | ||
3773 | if (hwc->interrupts == MAX_INTERRUPTS) | 3773 | if (hwc->interrupts == MAX_INTERRUPTS) |
3774 | return; | 3774 | return; |
@@ -3801,14 +3801,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
3801 | 3801 | ||
3802 | atomic64_add(nr, &event->count); | 3802 | atomic64_add(nr, &event->count); |
3803 | 3803 | ||
3804 | if (!regs) | ||
3805 | return; | ||
3806 | |||
3804 | if (!hwc->sample_period) | 3807 | if (!hwc->sample_period) |
3805 | return; | 3808 | return; |
3806 | 3809 | ||
3807 | if (!regs) | 3810 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
3811 | return perf_swevent_overflow(event, 1, nmi, data, regs); | ||
3812 | |||
3813 | if (atomic64_add_negative(nr, &hwc->period_left)) | ||
3808 | return; | 3814 | return; |
3809 | 3815 | ||
3810 | if (!atomic64_add_negative(nr, &hwc->period_left)) | 3816 | perf_swevent_overflow(event, 0, nmi, data, regs); |
3811 | perf_swevent_overflow(event, nmi, data, regs); | ||
3812 | } | 3817 | } |
3813 | 3818 | ||
3814 | static int perf_swevent_is_counting(struct perf_event *event) | 3819 | static int perf_swevent_is_counting(struct perf_event *event) |