aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-11-02 12:46:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-11-02 12:46:06 -0500
commitbce8fc4cb796dc77ea71699ef88802879a177474 (patch)
tree683ff9f2def12d390af0ae2dc2326b4d9f85a1cd /kernel
parenta5e3013d6612d2ed4aefdcd7920ae01df3b63b3a (diff)
parentec29b8d2af01912bb79adda8aeab4293539f29ac (diff)
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf tools: Remove -Wcast-align perf tools: Fix compatibility with libelf 0.8 and autodetect perf events: Don't generate events for the idle task when exclude_idle is set perf events: Fix swevent hrtimer sampling by keeping track of remaining time when enabling/disabling swevent hrtimers
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c66
1 files changed, 44 insertions, 22 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index afb7ef3dbc44..7f29643c8985 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3959 regs = task_pt_regs(current); 3959 regs = task_pt_regs(current);
3960 3960
3961 if (regs) { 3961 if (regs) {
3962 if (perf_event_overflow(event, 0, &data, regs)) 3962 if (!(event->attr.exclude_idle && current->pid == 0))
3963 ret = HRTIMER_NORESTART; 3963 if (perf_event_overflow(event, 0, &data, regs))
3964 ret = HRTIMER_NORESTART;
3964 } 3965 }
3965 3966
3966 period = max_t(u64, 10000, event->hw.sample_period); 3967 period = max_t(u64, 10000, event->hw.sample_period);
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3969 return ret; 3970 return ret;
3970} 3971}
3971 3972
3973static void perf_swevent_start_hrtimer(struct perf_event *event)
3974{
3975 struct hw_perf_event *hwc = &event->hw;
3976
3977 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3978 hwc->hrtimer.function = perf_swevent_hrtimer;
3979 if (hwc->sample_period) {
3980 u64 period;
3981
3982 if (hwc->remaining) {
3983 if (hwc->remaining < 0)
3984 period = 10000;
3985 else
3986 period = hwc->remaining;
3987 hwc->remaining = 0;
3988 } else {
3989 period = max_t(u64, 10000, hwc->sample_period);
3990 }
3991 __hrtimer_start_range_ns(&hwc->hrtimer,
3992 ns_to_ktime(period), 0,
3993 HRTIMER_MODE_REL, 0);
3994 }
3995}
3996
3997static void perf_swevent_cancel_hrtimer(struct perf_event *event)
3998{
3999 struct hw_perf_event *hwc = &event->hw;
4000
4001 if (hwc->sample_period) {
4002 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4003 hwc->remaining = ktime_to_ns(remaining);
4004
4005 hrtimer_cancel(&hwc->hrtimer);
4006 }
4007}
4008
3972/* 4009/*
3973 * Software event: cpu wall time clock 4010 * Software event: cpu wall time clock
3974 */ 4011 */
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3991 int cpu = raw_smp_processor_id(); 4028 int cpu = raw_smp_processor_id();
3992 4029
3993 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4030 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4031 perf_swevent_start_hrtimer(event);
3995 hwc->hrtimer.function = perf_swevent_hrtimer;
3996 if (hwc->sample_period) {
3997 u64 period = max_t(u64, 10000, hwc->sample_period);
3998 __hrtimer_start_range_ns(&hwc->hrtimer,
3999 ns_to_ktime(period), 0,
4000 HRTIMER_MODE_REL, 0);
4001 }
4002 4032
4003 return 0; 4033 return 0;
4004} 4034}
4005 4035
4006static void cpu_clock_perf_event_disable(struct perf_event *event) 4036static void cpu_clock_perf_event_disable(struct perf_event *event)
4007{ 4037{
4008 if (event->hw.sample_period) 4038 perf_swevent_cancel_hrtimer(event);
4009 hrtimer_cancel(&event->hw.hrtimer);
4010 cpu_clock_perf_event_update(event); 4039 cpu_clock_perf_event_update(event);
4011} 4040}
4012 4041
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
4043 now = event->ctx->time; 4072 now = event->ctx->time;
4044 4073
4045 atomic64_set(&hwc->prev_count, now); 4074 atomic64_set(&hwc->prev_count, now);
4046 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4075
4047 hwc->hrtimer.function = perf_swevent_hrtimer; 4076 perf_swevent_start_hrtimer(event);
4048 if (hwc->sample_period) {
4049 u64 period = max_t(u64, 10000, hwc->sample_period);
4050 __hrtimer_start_range_ns(&hwc->hrtimer,
4051 ns_to_ktime(period), 0,
4052 HRTIMER_MODE_REL, 0);
4053 }
4054 4077
4055 return 0; 4078 return 0;
4056} 4079}
4057 4080
4058static void task_clock_perf_event_disable(struct perf_event *event) 4081static void task_clock_perf_event_disable(struct perf_event *event)
4059{ 4082{
4060 if (event->hw.sample_period) 4083 perf_swevent_cancel_hrtimer(event);
4061 hrtimer_cancel(&event->hw.hrtimer);
4062 task_clock_perf_event_update(event, event->ctx->time); 4084 task_clock_perf_event_update(event, event->ctx->time);
4063 4085
4064} 4086}