aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-20 06:21:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-20 06:43:33 -0400
commit26b119bc811a73bac6ecf95bdf284bf31c7955f0 (patch)
treecc8b39284012c27c4014c253655cff7ee95f79ce
parentd7b629a34fc4134a43c730b5f0197855dc4948d0 (diff)
perf_counter: Log irq_period changes
For the dynamic irq_period code, log whenever we change the period so that analyzing code can normalize the event flow. [ Impact: add new feature to allow more precise profiling ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090520102553.298769743@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h8
-rw-r--r--kernel/perf_counter.c40
2 files changed, 47 insertions, 1 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index c8c1dfc22c93..f612941ef46e 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -258,6 +258,14 @@ enum perf_event_type {
258 PERF_EVENT_COMM = 3, 258 PERF_EVENT_COMM = 3,
259 259
260 /* 260 /*
261 * struct {
262 * struct perf_event_header header;
263 * u64 irq_period;
264 * };
265 */
266 PERF_EVENT_PERIOD = 4,
267
268 /*
261 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 269 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
262 * will be PERF_RECORD_* 270 * will be PERF_RECORD_*
263 * 271 *
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 64113e6d1942..db02eb16c777 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void)
1046 return 0; 1046 return 0;
1047} 1047}
1048 1048
1049void perf_adjust_freq(struct perf_counter_context *ctx) 1049static void perf_log_period(struct perf_counter *counter, u64 period);
1050
1051static void perf_adjust_freq(struct perf_counter_context *ctx)
1050{ 1052{
1051 struct perf_counter *counter; 1053 struct perf_counter *counter;
1052 u64 irq_period; 1054 u64 irq_period;
@@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx)
1072 if (!irq_period) 1074 if (!irq_period)
1073 irq_period = 1; 1075 irq_period = 1;
1074 1076
1077 perf_log_period(counter, irq_period);
1078
1075 counter->hw.irq_period = irq_period; 1079 counter->hw.irq_period = irq_period;
1076 counter->hw.interrupts = 0; 1080 counter->hw.interrupts = 0;
1077 } 1081 }
@@ -2407,6 +2411,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
2407} 2411}
2408 2412
2409/* 2413/*
2414 *
2415 */
2416
2417static void perf_log_period(struct perf_counter *counter, u64 period)
2418{
2419 struct perf_output_handle handle;
2420 int ret;
2421
2422 struct {
2423 struct perf_event_header header;
2424 u64 time;
2425 u64 period;
2426 } freq_event = {
2427 .header = {
2428 .type = PERF_EVENT_PERIOD,
2429 .misc = 0,
2430 .size = sizeof(freq_event),
2431 },
2432 .time = sched_clock(),
2433 .period = period,
2434 };
2435
2436 if (counter->hw.irq_period == period)
2437 return;
2438
2439 ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2440 if (ret)
2441 return;
2442
2443 perf_output_put(&handle, freq_event);
2444 perf_output_end(&handle);
2445}
2446
2447/*
2410 * Generic counter overflow handling. 2448 * Generic counter overflow handling.
2411 */ 2449 */
2412 2450