diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-11-04 05:54:15 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-04 05:59:45 -0500 |
commit | a2e71271535fde493c32803b1f34789f97efcb5e (patch) | |
tree | 90d7139bea2f49e947f27af92614fa6eca50b64d /kernel/perf_event.c | |
parent | 6d7aa9d721c8c640066142fd9534afcdf68d7f9d (diff) | |
parent | b419148e567728f6af0c3b01965c1cc141e3e13a (diff) |
Merge commit 'v2.6.32-rc6' into perf/core
Conflicts:
tools/perf/Makefile
Merge reason: Resolve the conflict, merge to upstream and merge in
perf fixes so we can add a dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 66 |
1 files changed, 44 insertions, 22 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9ecaa45ab6b2..a69d4ed6a666 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -3976,8 +3976,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3976 | regs = task_pt_regs(current); | 3976 | regs = task_pt_regs(current); |
3977 | 3977 | ||
3978 | if (regs) { | 3978 | if (regs) { |
3979 | if (perf_event_overflow(event, 0, &data, regs)) | 3979 | if (!(event->attr.exclude_idle && current->pid == 0)) |
3980 | ret = HRTIMER_NORESTART; | 3980 | if (perf_event_overflow(event, 0, &data, regs)) |
3981 | ret = HRTIMER_NORESTART; | ||
3981 | } | 3982 | } |
3982 | 3983 | ||
3983 | period = max_t(u64, 10000, event->hw.sample_period); | 3984 | period = max_t(u64, 10000, event->hw.sample_period); |
@@ -3986,6 +3987,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3986 | return ret; | 3987 | return ret; |
3987 | } | 3988 | } |
3988 | 3989 | ||
3990 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
3991 | { | ||
3992 | struct hw_perf_event *hwc = &event->hw; | ||
3993 | |||
3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3995 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3996 | if (hwc->sample_period) { | ||
3997 | u64 period; | ||
3998 | |||
3999 | if (hwc->remaining) { | ||
4000 | if (hwc->remaining < 0) | ||
4001 | period = 10000; | ||
4002 | else | ||
4003 | period = hwc->remaining; | ||
4004 | hwc->remaining = 0; | ||
4005 | } else { | ||
4006 | period = max_t(u64, 10000, hwc->sample_period); | ||
4007 | } | ||
4008 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4009 | ns_to_ktime(period), 0, | ||
4010 | HRTIMER_MODE_REL, 0); | ||
4011 | } | ||
4012 | } | ||
4013 | |||
4014 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
4015 | { | ||
4016 | struct hw_perf_event *hwc = &event->hw; | ||
4017 | |||
4018 | if (hwc->sample_period) { | ||
4019 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4020 | hwc->remaining = ktime_to_ns(remaining); | ||
4021 | |||
4022 | hrtimer_cancel(&hwc->hrtimer); | ||
4023 | } | ||
4024 | } | ||
4025 | |||
3989 | /* | 4026 | /* |
3990 | * Software event: cpu wall time clock | 4027 | * Software event: cpu wall time clock |
3991 | */ | 4028 | */ |
@@ -4008,22 +4045,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
4008 | int cpu = raw_smp_processor_id(); | 4045 | int cpu = raw_smp_processor_id(); |
4009 | 4046 | ||
4010 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4047 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
4011 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4048 | perf_swevent_start_hrtimer(event); |
4012 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
4013 | if (hwc->sample_period) { | ||
4014 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4015 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4016 | ns_to_ktime(period), 0, | ||
4017 | HRTIMER_MODE_REL, 0); | ||
4018 | } | ||
4019 | 4049 | ||
4020 | return 0; | 4050 | return 0; |
4021 | } | 4051 | } |
4022 | 4052 | ||
4023 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4053 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
4024 | { | 4054 | { |
4025 | if (event->hw.sample_period) | 4055 | perf_swevent_cancel_hrtimer(event); |
4026 | hrtimer_cancel(&event->hw.hrtimer); | ||
4027 | cpu_clock_perf_event_update(event); | 4056 | cpu_clock_perf_event_update(event); |
4028 | } | 4057 | } |
4029 | 4058 | ||
@@ -4060,22 +4089,15 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4060 | now = event->ctx->time; | 4089 | now = event->ctx->time; |
4061 | 4090 | ||
4062 | atomic64_set(&hwc->prev_count, now); | 4091 | atomic64_set(&hwc->prev_count, now); |
4063 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4092 | |
4064 | hwc->hrtimer.function = perf_swevent_hrtimer; | 4093 | perf_swevent_start_hrtimer(event); |
4065 | if (hwc->sample_period) { | ||
4066 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4067 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4068 | ns_to_ktime(period), 0, | ||
4069 | HRTIMER_MODE_REL, 0); | ||
4070 | } | ||
4071 | 4094 | ||
4072 | return 0; | 4095 | return 0; |
4073 | } | 4096 | } |
4074 | 4097 | ||
4075 | static void task_clock_perf_event_disable(struct perf_event *event) | 4098 | static void task_clock_perf_event_disable(struct perf_event *event) |
4076 | { | 4099 | { |
4077 | if (event->hw.sample_period) | 4100 | perf_swevent_cancel_hrtimer(event); |
4078 | hrtimer_cancel(&event->hw.hrtimer); | ||
4079 | task_clock_perf_event_update(event, event->ctx->time); | 4101 | task_clock_perf_event_update(event, event->ctx->time); |
4080 | 4102 | ||
4081 | } | 4103 | } |