aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-22 03:29:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-09 06:54:30 -0400
commit7b4b6658e152ed4568cfff48175d93645df081d1 (patch)
tree663ff8dc6b7b11fcfbfaf6e24e146e2e29b3489c /kernel/perf_counter.c
parent46ab976443c6c566c8fe6fc72a6733a55ba9fbea (diff)
perf_counter: Fix software counters for fast moving event sources
Reimplement the software counters to deal with fast moving event sources (such as tracepoints). This means being able to generate multiple overflows from a single 'event' as well as support throttling. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c164
1 files changed, 94 insertions, 70 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 868102172aa4..615440ab9295 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3344,87 +3344,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
3344 * Generic software counter infrastructure 3344 * Generic software counter infrastructure
3345 */ 3345 */
3346 3346
3347static void perf_swcounter_update(struct perf_counter *counter) 3347/*
3348 * We directly increment counter->count and keep a second value in
3349 * counter->hw.period_left to count intervals. This period counter
3350 * is kept in the range [-sample_period, 0] so that we can use the
3351 * sign as trigger.
3352 */
3353
3354static u64 perf_swcounter_set_period(struct perf_counter *counter)
3348{ 3355{
3349 struct hw_perf_counter *hwc = &counter->hw; 3356 struct hw_perf_counter *hwc = &counter->hw;
3350 u64 prev, now; 3357 u64 period = hwc->last_period;
3351 s64 delta; 3358 u64 nr, offset;
3359 s64 old, val;
3360
3361 hwc->last_period = hwc->sample_period;
3352 3362
3353again: 3363again:
3354 prev = atomic64_read(&hwc->prev_count); 3364 old = val = atomic64_read(&hwc->period_left);
3355 now = atomic64_read(&hwc->count); 3365 if (val < 0)
3356 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) 3366 return 0;
3357 goto again;
3358 3367
3359 delta = now - prev; 3368 nr = div64_u64(period + val, period);
3369 offset = nr * period;
3370 val -= offset;
3371 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3372 goto again;
3360 3373
3361 atomic64_add(delta, &counter->count); 3374 return nr;
3362 atomic64_sub(delta, &hwc->period_left);
3363} 3375}
3364 3376
3365static void perf_swcounter_set_period(struct perf_counter *counter) 3377static void perf_swcounter_overflow(struct perf_counter *counter,
3378 int nmi, struct perf_sample_data *data)
3366{ 3379{
3367 struct hw_perf_counter *hwc = &counter->hw; 3380 struct hw_perf_counter *hwc = &counter->hw;
3368 s64 left = atomic64_read(&hwc->period_left); 3381 u64 overflow;
3369 s64 period = hwc->sample_period;
3370 3382
3371 if (unlikely(left <= -period)) { 3383 data->period = counter->hw.last_period;
3372 left = period; 3384 overflow = perf_swcounter_set_period(counter);
3373 atomic64_set(&hwc->period_left, left);
3374 hwc->last_period = period;
3375 }
3376 3385
3377 if (unlikely(left <= 0)) { 3386 if (hwc->interrupts == MAX_INTERRUPTS)
3378 left += period; 3387 return;
3379 atomic64_add(period, &hwc->period_left);
3380 hwc->last_period = period;
3381 }
3382 3388
3383 atomic64_set(&hwc->prev_count, -left); 3389 for (; overflow; overflow--) {
3384 atomic64_set(&hwc->count, -left); 3390 if (perf_counter_overflow(counter, nmi, data)) {
3391 /*
3392 * We inhibit the overflow from happening when
3393 * hwc->interrupts == MAX_INTERRUPTS.
3394 */
3395 break;
3396 }
3397 }
3385} 3398}
3386 3399
3387static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3400static void perf_swcounter_unthrottle(struct perf_counter *counter)
3388{ 3401{
3389 enum hrtimer_restart ret = HRTIMER_RESTART;
3390 struct perf_sample_data data;
3391 struct perf_counter *counter;
3392 u64 period;
3393
3394 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3395 counter->pmu->read(counter);
3396
3397 data.addr = 0;
3398 data.regs = get_irq_regs();
3399 /* 3402 /*
3400 * In case we exclude kernel IPs or are somehow not in interrupt 3403 * Nothing to do, we already reset hwc->interrupts.
3401 * context, provide the next best thing, the user IP.
3402 */ 3404 */
3403 if ((counter->attr.exclude_kernel || !data.regs) && 3405}
3404 !counter->attr.exclude_user)
3405 data.regs = task_pt_regs(current);
3406 3406
3407 if (data.regs) { 3407static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3408 if (perf_counter_overflow(counter, 0, &data)) 3408 int nmi, struct perf_sample_data *data)
3409 ret = HRTIMER_NORESTART; 3409{
3410 } 3410 struct hw_perf_counter *hwc = &counter->hw;
3411 3411
3412 period = max_t(u64, 10000, counter->hw.sample_period); 3412 atomic64_add(nr, &counter->count);
3413 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3414 3413
3415 return ret; 3414 if (!hwc->sample_period)
3416} 3415 return;
3417 3416
3418static void perf_swcounter_overflow(struct perf_counter *counter, 3417 if (!data->regs)
3419 int nmi, struct perf_sample_data *data) 3418 return;
3420{
3421 data->period = counter->hw.last_period;
3422 3419
3423 perf_swcounter_update(counter); 3420 if (!atomic64_add_negative(nr, &hwc->period_left))
3424 perf_swcounter_set_period(counter); 3421 perf_swcounter_overflow(counter, nmi, data);
3425 if (perf_counter_overflow(counter, nmi, data))
3426 /* soft-disable the counter */
3427 ;
3428} 3422}
3429 3423
3430static int perf_swcounter_is_counting(struct perf_counter *counter) 3424static int perf_swcounter_is_counting(struct perf_counter *counter)
@@ -3488,15 +3482,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
3488 return 1; 3482 return 1;
3489} 3483}
3490 3484
3491static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3492 int nmi, struct perf_sample_data *data)
3493{
3494 int neg = atomic64_add_negative(nr, &counter->hw.count);
3495
3496 if (counter->hw.sample_period && !neg && data->regs)
3497 perf_swcounter_overflow(counter, nmi, data);
3498}
3499
3500static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3485static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3501 enum perf_type_id type, 3486 enum perf_type_id type,
3502 u32 event, u64 nr, int nmi, 3487 u32 event, u64 nr, int nmi,
@@ -3575,27 +3560,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3575 3560
3576static void perf_swcounter_read(struct perf_counter *counter) 3561static void perf_swcounter_read(struct perf_counter *counter)
3577{ 3562{
3578 perf_swcounter_update(counter);
3579} 3563}
3580 3564
3581static int perf_swcounter_enable(struct perf_counter *counter) 3565static int perf_swcounter_enable(struct perf_counter *counter)
3582{ 3566{
3583 perf_swcounter_set_period(counter); 3567 struct hw_perf_counter *hwc = &counter->hw;
3568
3569 if (hwc->sample_period) {
3570 hwc->last_period = hwc->sample_period;
3571 perf_swcounter_set_period(counter);
3572 }
3584 return 0; 3573 return 0;
3585} 3574}
3586 3575
3587static void perf_swcounter_disable(struct perf_counter *counter) 3576static void perf_swcounter_disable(struct perf_counter *counter)
3588{ 3577{
3589 perf_swcounter_update(counter);
3590} 3578}
3591 3579
3592static const struct pmu perf_ops_generic = { 3580static const struct pmu perf_ops_generic = {
3593 .enable = perf_swcounter_enable, 3581 .enable = perf_swcounter_enable,
3594 .disable = perf_swcounter_disable, 3582 .disable = perf_swcounter_disable,
3595 .read = perf_swcounter_read, 3583 .read = perf_swcounter_read,
3584 .unthrottle = perf_swcounter_unthrottle,
3596}; 3585};
3597 3586
3598/* 3587/*
3588 * hrtimer based swcounter callback
3589 */
3590
3591static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3592{
3593 enum hrtimer_restart ret = HRTIMER_RESTART;
3594 struct perf_sample_data data;
3595 struct perf_counter *counter;
3596 u64 period;
3597
3598 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3599 counter->pmu->read(counter);
3600
3601 data.addr = 0;
3602 data.regs = get_irq_regs();
3603 /*
3604 * In case we exclude kernel IPs or are somehow not in interrupt
3605 * context, provide the next best thing, the user IP.
3606 */
3607 if ((counter->attr.exclude_kernel || !data.regs) &&
3608 !counter->attr.exclude_user)
3609 data.regs = task_pt_regs(current);
3610
3611 if (data.regs) {
3612 if (perf_counter_overflow(counter, 0, &data))
3613 ret = HRTIMER_NORESTART;
3614 }
3615
3616 period = max_t(u64, 10000, counter->hw.sample_period);
3617 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3618
3619 return ret;
3620}
3621
3622/*
3599 * Software counter: cpu wall time clock 3623 * Software counter: cpu wall time clock
3600 */ 3624 */
3601 3625