diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-02 09:13:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-02 15:45:30 -0400 |
commit | b23f3325ed465f1bd914384884269af0d106778c (patch) | |
tree | 7b263c707e50463f1e1defc60d371b09e352a21a /kernel/perf_counter.c | |
parent | 8e5799b1ad2a0567fdfaaf0e91b40efee010f2c1 (diff) |
perf_counter: Rename various fields
A few renames:
s/irq_period/sample_period/
s/irq_freq/sample_freq/
s/PERF_RECORD_/PERF_SAMPLE_/
s/record_type/sample_type/
And change both the new sample_type and read_format to u64.
Reported-by: Stephane Eranian <eranian@googlemail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 104 |
1 files changed, 52 insertions, 52 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 978ecfcc7aaf..5ecd9981c035 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1186,7 +1186,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period); | |||
1186 | static void perf_adjust_freq(struct perf_counter_context *ctx) | 1186 | static void perf_adjust_freq(struct perf_counter_context *ctx) |
1187 | { | 1187 | { |
1188 | struct perf_counter *counter; | 1188 | struct perf_counter *counter; |
1189 | u64 interrupts, irq_period; | 1189 | u64 interrupts, sample_period; |
1190 | u64 events, period; | 1190 | u64 events, period; |
1191 | s64 delta; | 1191 | s64 delta; |
1192 | 1192 | ||
@@ -1204,23 +1204,23 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) | |||
1204 | interrupts = 2*sysctl_perf_counter_limit/HZ; | 1204 | interrupts = 2*sysctl_perf_counter_limit/HZ; |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | if (!counter->hw_event.freq || !counter->hw_event.irq_freq) | 1207 | if (!counter->hw_event.freq || !counter->hw_event.sample_freq) |
1208 | continue; | 1208 | continue; |
1209 | 1209 | ||
1210 | events = HZ * interrupts * counter->hw.irq_period; | 1210 | events = HZ * interrupts * counter->hw.sample_period; |
1211 | period = div64_u64(events, counter->hw_event.irq_freq); | 1211 | period = div64_u64(events, counter->hw_event.sample_freq); |
1212 | 1212 | ||
1213 | delta = (s64)(1 + period - counter->hw.irq_period); | 1213 | delta = (s64)(1 + period - counter->hw.sample_period); |
1214 | delta >>= 1; | 1214 | delta >>= 1; |
1215 | 1215 | ||
1216 | irq_period = counter->hw.irq_period + delta; | 1216 | sample_period = counter->hw.sample_period + delta; |
1217 | 1217 | ||
1218 | if (!irq_period) | 1218 | if (!sample_period) |
1219 | irq_period = 1; | 1219 | sample_period = 1; |
1220 | 1220 | ||
1221 | perf_log_period(counter, irq_period); | 1221 | perf_log_period(counter, sample_period); |
1222 | 1222 | ||
1223 | counter->hw.irq_period = irq_period; | 1223 | counter->hw.sample_period = sample_period; |
1224 | } | 1224 | } |
1225 | spin_unlock(&ctx->lock); | 1225 | spin_unlock(&ctx->lock); |
1226 | } | 1226 | } |
@@ -2297,7 +2297,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2297 | int nmi, struct pt_regs *regs, u64 addr) | 2297 | int nmi, struct pt_regs *regs, u64 addr) |
2298 | { | 2298 | { |
2299 | int ret; | 2299 | int ret; |
2300 | u64 record_type = counter->hw_event.record_type; | 2300 | u64 sample_type = counter->hw_event.sample_type; |
2301 | struct perf_output_handle handle; | 2301 | struct perf_output_handle handle; |
2302 | struct perf_event_header header; | 2302 | struct perf_event_header header; |
2303 | u64 ip; | 2303 | u64 ip; |
@@ -2321,61 +2321,61 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2321 | header.misc = PERF_EVENT_MISC_OVERFLOW; | 2321 | header.misc = PERF_EVENT_MISC_OVERFLOW; |
2322 | header.misc |= perf_misc_flags(regs); | 2322 | header.misc |= perf_misc_flags(regs); |
2323 | 2323 | ||
2324 | if (record_type & PERF_RECORD_IP) { | 2324 | if (sample_type & PERF_SAMPLE_IP) { |
2325 | ip = perf_instruction_pointer(regs); | 2325 | ip = perf_instruction_pointer(regs); |
2326 | header.type |= PERF_RECORD_IP; | 2326 | header.type |= PERF_SAMPLE_IP; |
2327 | header.size += sizeof(ip); | 2327 | header.size += sizeof(ip); |
2328 | } | 2328 | } |
2329 | 2329 | ||
2330 | if (record_type & PERF_RECORD_TID) { | 2330 | if (sample_type & PERF_SAMPLE_TID) { |
2331 | /* namespace issues */ | 2331 | /* namespace issues */ |
2332 | tid_entry.pid = perf_counter_pid(counter, current); | 2332 | tid_entry.pid = perf_counter_pid(counter, current); |
2333 | tid_entry.tid = perf_counter_tid(counter, current); | 2333 | tid_entry.tid = perf_counter_tid(counter, current); |
2334 | 2334 | ||
2335 | header.type |= PERF_RECORD_TID; | 2335 | header.type |= PERF_SAMPLE_TID; |
2336 | header.size += sizeof(tid_entry); | 2336 | header.size += sizeof(tid_entry); |
2337 | } | 2337 | } |
2338 | 2338 | ||
2339 | if (record_type & PERF_RECORD_TIME) { | 2339 | if (sample_type & PERF_SAMPLE_TIME) { |
2340 | /* | 2340 | /* |
2341 | * Maybe do better on x86 and provide cpu_clock_nmi() | 2341 | * Maybe do better on x86 and provide cpu_clock_nmi() |
2342 | */ | 2342 | */ |
2343 | time = sched_clock(); | 2343 | time = sched_clock(); |
2344 | 2344 | ||
2345 | header.type |= PERF_RECORD_TIME; | 2345 | header.type |= PERF_SAMPLE_TIME; |
2346 | header.size += sizeof(u64); | 2346 | header.size += sizeof(u64); |
2347 | } | 2347 | } |
2348 | 2348 | ||
2349 | if (record_type & PERF_RECORD_ADDR) { | 2349 | if (sample_type & PERF_SAMPLE_ADDR) { |
2350 | header.type |= PERF_RECORD_ADDR; | 2350 | header.type |= PERF_SAMPLE_ADDR; |
2351 | header.size += sizeof(u64); | 2351 | header.size += sizeof(u64); |
2352 | } | 2352 | } |
2353 | 2353 | ||
2354 | if (record_type & PERF_RECORD_CONFIG) { | 2354 | if (sample_type & PERF_SAMPLE_CONFIG) { |
2355 | header.type |= PERF_RECORD_CONFIG; | 2355 | header.type |= PERF_SAMPLE_CONFIG; |
2356 | header.size += sizeof(u64); | 2356 | header.size += sizeof(u64); |
2357 | } | 2357 | } |
2358 | 2358 | ||
2359 | if (record_type & PERF_RECORD_CPU) { | 2359 | if (sample_type & PERF_SAMPLE_CPU) { |
2360 | header.type |= PERF_RECORD_CPU; | 2360 | header.type |= PERF_SAMPLE_CPU; |
2361 | header.size += sizeof(cpu_entry); | 2361 | header.size += sizeof(cpu_entry); |
2362 | 2362 | ||
2363 | cpu_entry.cpu = raw_smp_processor_id(); | 2363 | cpu_entry.cpu = raw_smp_processor_id(); |
2364 | } | 2364 | } |
2365 | 2365 | ||
2366 | if (record_type & PERF_RECORD_GROUP) { | 2366 | if (sample_type & PERF_SAMPLE_GROUP) { |
2367 | header.type |= PERF_RECORD_GROUP; | 2367 | header.type |= PERF_SAMPLE_GROUP; |
2368 | header.size += sizeof(u64) + | 2368 | header.size += sizeof(u64) + |
2369 | counter->nr_siblings * sizeof(group_entry); | 2369 | counter->nr_siblings * sizeof(group_entry); |
2370 | } | 2370 | } |
2371 | 2371 | ||
2372 | if (record_type & PERF_RECORD_CALLCHAIN) { | 2372 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2373 | callchain = perf_callchain(regs); | 2373 | callchain = perf_callchain(regs); |
2374 | 2374 | ||
2375 | if (callchain) { | 2375 | if (callchain) { |
2376 | callchain_size = (1 + callchain->nr) * sizeof(u64); | 2376 | callchain_size = (1 + callchain->nr) * sizeof(u64); |
2377 | 2377 | ||
2378 | header.type |= PERF_RECORD_CALLCHAIN; | 2378 | header.type |= PERF_SAMPLE_CALLCHAIN; |
2379 | header.size += callchain_size; | 2379 | header.size += callchain_size; |
2380 | } | 2380 | } |
2381 | } | 2381 | } |
@@ -2386,28 +2386,28 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2386 | 2386 | ||
2387 | perf_output_put(&handle, header); | 2387 | perf_output_put(&handle, header); |
2388 | 2388 | ||
2389 | if (record_type & PERF_RECORD_IP) | 2389 | if (sample_type & PERF_SAMPLE_IP) |
2390 | perf_output_put(&handle, ip); | 2390 | perf_output_put(&handle, ip); |
2391 | 2391 | ||
2392 | if (record_type & PERF_RECORD_TID) | 2392 | if (sample_type & PERF_SAMPLE_TID) |
2393 | perf_output_put(&handle, tid_entry); | 2393 | perf_output_put(&handle, tid_entry); |
2394 | 2394 | ||
2395 | if (record_type & PERF_RECORD_TIME) | 2395 | if (sample_type & PERF_SAMPLE_TIME) |
2396 | perf_output_put(&handle, time); | 2396 | perf_output_put(&handle, time); |
2397 | 2397 | ||
2398 | if (record_type & PERF_RECORD_ADDR) | 2398 | if (sample_type & PERF_SAMPLE_ADDR) |
2399 | perf_output_put(&handle, addr); | 2399 | perf_output_put(&handle, addr); |
2400 | 2400 | ||
2401 | if (record_type & PERF_RECORD_CONFIG) | 2401 | if (sample_type & PERF_SAMPLE_CONFIG) |
2402 | perf_output_put(&handle, counter->hw_event.config); | 2402 | perf_output_put(&handle, counter->hw_event.config); |
2403 | 2403 | ||
2404 | if (record_type & PERF_RECORD_CPU) | 2404 | if (sample_type & PERF_SAMPLE_CPU) |
2405 | perf_output_put(&handle, cpu_entry); | 2405 | perf_output_put(&handle, cpu_entry); |
2406 | 2406 | ||
2407 | /* | 2407 | /* |
2408 | * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. | 2408 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. |
2409 | */ | 2409 | */ |
2410 | if (record_type & PERF_RECORD_GROUP) { | 2410 | if (sample_type & PERF_SAMPLE_GROUP) { |
2411 | struct perf_counter *leader, *sub; | 2411 | struct perf_counter *leader, *sub; |
2412 | u64 nr = counter->nr_siblings; | 2412 | u64 nr = counter->nr_siblings; |
2413 | 2413 | ||
@@ -2702,7 +2702,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, | |||
2702 | } | 2702 | } |
2703 | 2703 | ||
2704 | /* | 2704 | /* |
2705 | * Log irq_period changes so that analyzing tools can re-normalize the | 2705 | * Log sample_period changes so that analyzing tools can re-normalize the |
2706 | * event flow. | 2706 | * event flow. |
2707 | */ | 2707 | */ |
2708 | 2708 | ||
@@ -2725,7 +2725,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) | |||
2725 | .period = period, | 2725 | .period = period, |
2726 | }; | 2726 | }; |
2727 | 2727 | ||
2728 | if (counter->hw.irq_period == period) | 2728 | if (counter->hw.sample_period == period) |
2729 | return; | 2729 | return; |
2730 | 2730 | ||
2731 | ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0); | 2731 | ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0); |
@@ -2834,7 +2834,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter) | |||
2834 | { | 2834 | { |
2835 | struct hw_perf_counter *hwc = &counter->hw; | 2835 | struct hw_perf_counter *hwc = &counter->hw; |
2836 | s64 left = atomic64_read(&hwc->period_left); | 2836 | s64 left = atomic64_read(&hwc->period_left); |
2837 | s64 period = hwc->irq_period; | 2837 | s64 period = hwc->sample_period; |
2838 | 2838 | ||
2839 | if (unlikely(left <= -period)) { | 2839 | if (unlikely(left <= -period)) { |
2840 | left = period; | 2840 | left = period; |
@@ -2874,7 +2874,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
2874 | ret = HRTIMER_NORESTART; | 2874 | ret = HRTIMER_NORESTART; |
2875 | } | 2875 | } |
2876 | 2876 | ||
2877 | period = max_t(u64, 10000, counter->hw.irq_period); | 2877 | period = max_t(u64, 10000, counter->hw.sample_period); |
2878 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | 2878 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); |
2879 | 2879 | ||
2880 | return ret; | 2880 | return ret; |
@@ -2959,7 +2959,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | |||
2959 | { | 2959 | { |
2960 | int neg = atomic64_add_negative(nr, &counter->hw.count); | 2960 | int neg = atomic64_add_negative(nr, &counter->hw.count); |
2961 | 2961 | ||
2962 | if (counter->hw.irq_period && !neg && regs) | 2962 | if (counter->hw.sample_period && !neg && regs) |
2963 | perf_swcounter_overflow(counter, nmi, regs, addr); | 2963 | perf_swcounter_overflow(counter, nmi, regs, addr); |
2964 | } | 2964 | } |
2965 | 2965 | ||
@@ -3080,8 +3080,8 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) | |||
3080 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 3080 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
3081 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 3081 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
3082 | hwc->hrtimer.function = perf_swcounter_hrtimer; | 3082 | hwc->hrtimer.function = perf_swcounter_hrtimer; |
3083 | if (hwc->irq_period) { | 3083 | if (hwc->sample_period) { |
3084 | u64 period = max_t(u64, 10000, hwc->irq_period); | 3084 | u64 period = max_t(u64, 10000, hwc->sample_period); |
3085 | __hrtimer_start_range_ns(&hwc->hrtimer, | 3085 | __hrtimer_start_range_ns(&hwc->hrtimer, |
3086 | ns_to_ktime(period), 0, | 3086 | ns_to_ktime(period), 0, |
3087 | HRTIMER_MODE_REL, 0); | 3087 | HRTIMER_MODE_REL, 0); |
@@ -3092,7 +3092,7 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter) | |||
3092 | 3092 | ||
3093 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | 3093 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) |
3094 | { | 3094 | { |
3095 | if (counter->hw.irq_period) | 3095 | if (counter->hw.sample_period) |
3096 | hrtimer_cancel(&counter->hw.hrtimer); | 3096 | hrtimer_cancel(&counter->hw.hrtimer); |
3097 | cpu_clock_perf_counter_update(counter); | 3097 | cpu_clock_perf_counter_update(counter); |
3098 | } | 3098 | } |
@@ -3132,8 +3132,8 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) | |||
3132 | atomic64_set(&hwc->prev_count, now); | 3132 | atomic64_set(&hwc->prev_count, now); |
3133 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 3133 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
3134 | hwc->hrtimer.function = perf_swcounter_hrtimer; | 3134 | hwc->hrtimer.function = perf_swcounter_hrtimer; |
3135 | if (hwc->irq_period) { | 3135 | if (hwc->sample_period) { |
3136 | u64 period = max_t(u64, 10000, hwc->irq_period); | 3136 | u64 period = max_t(u64, 10000, hwc->sample_period); |
3137 | __hrtimer_start_range_ns(&hwc->hrtimer, | 3137 | __hrtimer_start_range_ns(&hwc->hrtimer, |
3138 | ns_to_ktime(period), 0, | 3138 | ns_to_ktime(period), 0, |
3139 | HRTIMER_MODE_REL, 0); | 3139 | HRTIMER_MODE_REL, 0); |
@@ -3144,7 +3144,7 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) | |||
3144 | 3144 | ||
3145 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | 3145 | static void task_clock_perf_counter_disable(struct perf_counter *counter) |
3146 | { | 3146 | { |
3147 | if (counter->hw.irq_period) | 3147 | if (counter->hw.sample_period) |
3148 | hrtimer_cancel(&counter->hw.hrtimer); | 3148 | hrtimer_cancel(&counter->hw.hrtimer); |
3149 | task_clock_perf_counter_update(counter, counter->ctx->time); | 3149 | task_clock_perf_counter_update(counter, counter->ctx->time); |
3150 | 3150 | ||
@@ -3223,7 +3223,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | |||
3223 | return NULL; | 3223 | return NULL; |
3224 | 3224 | ||
3225 | counter->destroy = tp_perf_counter_destroy; | 3225 | counter->destroy = tp_perf_counter_destroy; |
3226 | counter->hw.irq_period = counter->hw_event.irq_period; | 3226 | counter->hw.sample_period = counter->hw_event.sample_period; |
3227 | 3227 | ||
3228 | return &perf_ops_generic; | 3228 | return &perf_ops_generic; |
3229 | } | 3229 | } |
@@ -3323,15 +3323,15 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
3323 | pmu = NULL; | 3323 | pmu = NULL; |
3324 | 3324 | ||
3325 | hwc = &counter->hw; | 3325 | hwc = &counter->hw; |
3326 | if (hw_event->freq && hw_event->irq_freq) | 3326 | if (hw_event->freq && hw_event->sample_freq) |
3327 | hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq); | 3327 | hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq); |
3328 | else | 3328 | else |
3329 | hwc->irq_period = hw_event->irq_period; | 3329 | hwc->sample_period = hw_event->sample_period; |
3330 | 3330 | ||
3331 | /* | 3331 | /* |
3332 | * we currently do not support PERF_RECORD_GROUP on inherited counters | 3332 | * we currently do not support PERF_SAMPLE_GROUP on inherited counters |
3333 | */ | 3333 | */ |
3334 | if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP)) | 3334 | if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP)) |
3335 | goto done; | 3335 | goto done; |
3336 | 3336 | ||
3337 | if (perf_event_raw(hw_event)) { | 3337 | if (perf_event_raw(hw_event)) { |