diff options
Diffstat (limited to 'arch/alpha/kernel/perf_event.c')
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index a25fe9eb4739..1cc49683fb69 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -422,9 +422,10 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
422 | static int alpha_pmu_add(struct perf_event *event, int flags) | 422 | static int alpha_pmu_add(struct perf_event *event, int flags) |
423 | { | 423 | { |
424 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 424 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
425 | struct hw_perf_event *hwc = &event->hw; | ||
425 | int n0; | 426 | int n0; |
426 | int ret; | 427 | int ret; |
427 | unsigned long flags; | 428 | unsigned long irq_flags; |
428 | 429 | ||
429 | /* | 430 | /* |
430 | * The Sparc code has the IRQ disable first followed by the perf | 431 | * The Sparc code has the IRQ disable first followed by the perf |
@@ -435,7 +436,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags) | |||
435 | * final PMI to occur before we disable interrupts. | 436 | * final PMI to occur before we disable interrupts. |
436 | */ | 437 | */ |
437 | perf_pmu_disable(event->pmu); | 438 | perf_pmu_disable(event->pmu); |
438 | local_irq_save(flags); | 439 | local_irq_save(irq_flags); |
439 | 440 | ||
440 | /* Default to error to be returned */ | 441 | /* Default to error to be returned */ |
441 | ret = -EAGAIN; | 442 | ret = -EAGAIN; |
@@ -458,7 +459,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags) | |||
458 | if (!(flags & PERF_EF_START)) | 459 | if (!(flags & PERF_EF_START)) |
459 | hwc->state |= PERF_HES_STOPPED; | 460 | hwc->state |= PERF_HES_STOPPED; |
460 | 461 | ||
461 | local_irq_restore(flags); | 462 | local_irq_restore(irq_flags); |
462 | perf_pmu_enable(event->pmu); | 463 | perf_pmu_enable(event->pmu); |
463 | 464 | ||
464 | return ret; | 465 | return ret; |
@@ -474,11 +475,11 @@ static void alpha_pmu_del(struct perf_event *event, int flags) | |||
474 | { | 475 | { |
475 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 476 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
476 | struct hw_perf_event *hwc = &event->hw; | 477 | struct hw_perf_event *hwc = &event->hw; |
477 | unsigned long flags; | 478 | unsigned long irq_flags; |
478 | int j; | 479 | int j; |
479 | 480 | ||
480 | perf_pmu_disable(event->pmu); | 481 | perf_pmu_disable(event->pmu); |
481 | local_irq_save(flags); | 482 | local_irq_save(irq_flags); |
482 | 483 | ||
483 | for (j = 0; j < cpuc->n_events; j++) { | 484 | for (j = 0; j < cpuc->n_events; j++) { |
484 | if (event == cpuc->event[j]) { | 485 | if (event == cpuc->event[j]) { |
@@ -504,7 +505,7 @@ static void alpha_pmu_del(struct perf_event *event, int flags) | |||
504 | } | 505 | } |
505 | } | 506 | } |
506 | 507 | ||
507 | local_irq_restore(flags); | 508 | local_irq_restore(irq_flags); |
508 | perf_pmu_enable(event->pmu); | 509 | perf_pmu_enable(event->pmu); |
509 | } | 510 | } |
510 | 511 | ||
@@ -523,7 +524,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags) | |||
523 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 524 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
524 | 525 | ||
525 | if (!(hwc->state & PERF_HES_STOPPED)) { | 526 | if (!(hwc->state & PERF_HES_STOPPED)) { |
526 | cpuc->idx_mask &= !(1UL<<hwc->idx); | 527 | cpuc->idx_mask &= ~(1UL<<hwc->idx); |
527 | hwc->state |= PERF_HES_STOPPED; | 528 | hwc->state |= PERF_HES_STOPPED; |
528 | } | 529 | } |
529 | 530 | ||
@@ -533,7 +534,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags) | |||
533 | } | 534 | } |
534 | 535 | ||
535 | if (cpuc->enabled) | 536 | if (cpuc->enabled) |
536 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | 537 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); |
537 | } | 538 | } |
538 | 539 | ||
539 | 540 | ||
@@ -849,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
849 | /* Interrupts coming too quickly; "throttle" the | 850 | /* Interrupts coming too quickly; "throttle" the |
850 | * counter, i.e., disable it for a little while. | 851 | * counter, i.e., disable it for a little while. |
851 | */ | 852 | */ |
852 | cpuc->idx_mask &= ~(1UL<<idx); | 853 | alpha_pmu_stop(event, 0); |
853 | } | 854 | } |
854 | } | 855 | } |
855 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | 856 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |