aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorAndrew Vagin <avagin@openvz.org>2011-11-07 07:54:12 -0500
committerIngo Molnar <mingo@elte.hu>2011-11-14 07:31:28 -0500
commit5d81e5cfb37a174e8ddc0413e2e70cdf05807ace (patch)
tree3190ed611a1b88092d4a0aee584b505999a26f17 /kernel/events
parent9251f904f95175b4a1d8cbc0449e748f9edd7629 (diff)
events: Don't divide events if it has field period
This patch solves the following problem: Now some samples may be lost due to throttling. The number of samples is restricted by sysctl_perf_event_sample_rate/HZ. A trace event is divided on some samples according to event's period. I don't sure, that we should generate more than one sample on each trace event. I think the better way to use SAMPLE_PERIOD. E.g.: I want to trace when a process sleeps. I created a process, which sleeps for 1ms and for 4ms. perf got 100 events in both cases. swapper 0 [000] 1141.371830: sched_stat_sleep: comm=foo pid=1801 delay=1386750 [ns] swapper 0 [000] 1141.369444: sched_stat_sleep: comm=foo pid=1801 delay=4499585 [ns] In the first case a kernel want to send 4499585 events and in the second case it wants to send 1386750 events. perf-reports shows that process sleeps in both places equal time. It's bug. With this patch kernel generates one event on each "sleep" and the time slice is saved in the field "period". Perf knows how handle it. Signed-off-by: Andrew Vagin <avagin@openvz.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1320670457-2633428-3-git-send-email-avagin@openvz.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eadac69265fc..8d9dea56c262 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4528,7 +4528,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4528 struct hw_perf_event *hwc = &event->hw; 4528 struct hw_perf_event *hwc = &event->hw;
4529 int throttle = 0; 4529 int throttle = 0;
4530 4530
4531 data->period = event->hw.last_period;
4532 if (!overflow) 4531 if (!overflow)
4533 overflow = perf_swevent_set_period(event); 4532 overflow = perf_swevent_set_period(event);
4534 4533
@@ -4562,6 +4561,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
4562 if (!is_sampling_event(event)) 4561 if (!is_sampling_event(event))
4563 return; 4562 return;
4564 4563
4564 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4565 data->period = nr;
4566 return perf_swevent_overflow(event, 1, data, regs);
4567 } else
4568 data->period = event->hw.last_period;
4569
4565 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 4570 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4566 return perf_swevent_overflow(event, 1, data, regs); 4571 return perf_swevent_overflow(event, 1, data, regs);
4567 4572