aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/kernel/perf_event.c')
-rw-r--r--arch/alpha/kernel/perf_event.c137
1 files changed, 92 insertions, 45 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 85d8e4f58c83..90561c45e7d8 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/kdebug.h> 15#include <linux/kdebug.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/init.h>
17 18
18#include <asm/hwrpb.h> 19#include <asm/hwrpb.h>
19#include <asm/atomic.h> 20#include <asm/atomic.h>
@@ -307,7 +308,7 @@ again:
307 new_raw_count) != prev_raw_count) 308 new_raw_count) != prev_raw_count)
308 goto again; 309 goto again;
309 310
310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; 311 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
311 312
312 /* It is possible on very rare occasions that the PMC has overflowed 313 /* It is possible on very rare occasions that the PMC has overflowed
313 * but the interrupt is yet to come. Detect and fix this situation. 314 * but the interrupt is yet to come. Detect and fix this situation.
@@ -402,14 +403,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
402 struct hw_perf_event *hwc = &pe->hw; 403 struct hw_perf_event *hwc = &pe->hw;
403 int idx = hwc->idx; 404 int idx = hwc->idx;
404 405
405 if (cpuc->current_idx[j] != PMC_NO_INDEX) { 406 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
406 cpuc->idx_mask |= (1<<cpuc->current_idx[j]); 407 alpha_perf_event_set_period(pe, hwc, idx);
407 continue; 408 cpuc->current_idx[j] = idx;
408 } 409 }
409 410
410 alpha_perf_event_set_period(pe, hwc, idx); 411 if (!(hwc->state & PERF_HES_STOPPED))
411 cpuc->current_idx[j] = idx; 412 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
412 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
413 } 413 }
414 cpuc->config = cpuc->event[0]->hw.config_base; 414 cpuc->config = cpuc->event[0]->hw.config_base;
415} 415}
@@ -420,12 +420,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
420 * - this function is called from outside this module via the pmu struct 420 * - this function is called from outside this module via the pmu struct
421 * returned from perf event initialisation. 421 * returned from perf event initialisation.
422 */ 422 */
423static int alpha_pmu_enable(struct perf_event *event) 423static int alpha_pmu_add(struct perf_event *event, int flags)
424{ 424{
425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
426 struct hw_perf_event *hwc = &event->hw;
426 int n0; 427 int n0;
427 int ret; 428 int ret;
428 unsigned long flags; 429 unsigned long irq_flags;
429 430
430 /* 431 /*
431 * The Sparc code has the IRQ disable first followed by the perf 432 * The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +436,8 @@ static int alpha_pmu_enable(struct perf_event *event)
435 * nevertheless we disable the PMCs first to enable a potential 436 * nevertheless we disable the PMCs first to enable a potential
436 * final PMI to occur before we disable interrupts. 437 * final PMI to occur before we disable interrupts.
437 */ 438 */
438 perf_disable(); 439 perf_pmu_disable(event->pmu);
439 local_irq_save(flags); 440 local_irq_save(irq_flags);
440 441
441 /* Default to error to be returned */ 442 /* Default to error to be returned */
442 ret = -EAGAIN; 443 ret = -EAGAIN;
@@ -455,8 +456,12 @@ static int alpha_pmu_enable(struct perf_event *event)
455 } 456 }
456 } 457 }
457 458
458 local_irq_restore(flags); 459 hwc->state = PERF_HES_UPTODATE;
459 perf_enable(); 460 if (!(flags & PERF_EF_START))
461 hwc->state |= PERF_HES_STOPPED;
462
463 local_irq_restore(irq_flags);
464 perf_pmu_enable(event->pmu);
460 465
461 return ret; 466 return ret;
462} 467}
@@ -467,15 +472,15 @@ static int alpha_pmu_enable(struct perf_event *event)
467 * - this function is called from outside this module via the pmu struct 472 * - this function is called from outside this module via the pmu struct
468 * returned from perf event initialisation. 473 * returned from perf event initialisation.
469 */ 474 */
470static void alpha_pmu_disable(struct perf_event *event) 475static void alpha_pmu_del(struct perf_event *event, int flags)
471{ 476{
472 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 477 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
473 struct hw_perf_event *hwc = &event->hw; 478 struct hw_perf_event *hwc = &event->hw;
474 unsigned long flags; 479 unsigned long irq_flags;
475 int j; 480 int j;
476 481
477 perf_disable(); 482 perf_pmu_disable(event->pmu);
478 local_irq_save(flags); 483 local_irq_save(irq_flags);
479 484
480 for (j = 0; j < cpuc->n_events; j++) { 485 for (j = 0; j < cpuc->n_events; j++) {
481 if (event == cpuc->event[j]) { 486 if (event == cpuc->event[j]) {
@@ -501,8 +506,8 @@ static void alpha_pmu_disable(struct perf_event *event)
501 } 506 }
502 } 507 }
503 508
504 local_irq_restore(flags); 509 local_irq_restore(irq_flags);
505 perf_enable(); 510 perf_pmu_enable(event->pmu);
506} 511}
507 512
508 513
@@ -514,13 +519,44 @@ static void alpha_pmu_read(struct perf_event *event)
514} 519}
515 520
516 521
517static void alpha_pmu_unthrottle(struct perf_event *event) 522static void alpha_pmu_stop(struct perf_event *event, int flags)
523{
524 struct hw_perf_event *hwc = &event->hw;
525 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
526
527 if (!(hwc->state & PERF_HES_STOPPED)) {
528 cpuc->idx_mask &= ~(1UL<<hwc->idx);
529 hwc->state |= PERF_HES_STOPPED;
530 }
531
532 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
533 alpha_perf_event_update(event, hwc, hwc->idx, 0);
534 hwc->state |= PERF_HES_UPTODATE;
535 }
536
537 if (cpuc->enabled)
538 wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
539}
540
541
542static void alpha_pmu_start(struct perf_event *event, int flags)
518{ 543{
519 struct hw_perf_event *hwc = &event->hw; 544 struct hw_perf_event *hwc = &event->hw;
520 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 545 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
521 546
547 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
548 return;
549
550 if (flags & PERF_EF_RELOAD) {
551 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
552 alpha_perf_event_set_period(event, hwc, hwc->idx);
553 }
554
555 hwc->state = 0;
556
522 cpuc->idx_mask |= 1UL<<hwc->idx; 557 cpuc->idx_mask |= 1UL<<hwc->idx;
523 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); 558 if (cpuc->enabled)
559 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
524} 560}
525 561
526 562
@@ -642,39 +678,36 @@ static int __hw_perf_event_init(struct perf_event *event)
642 return 0; 678 return 0;
643} 679}
644 680
645static const struct pmu pmu = {
646 .enable = alpha_pmu_enable,
647 .disable = alpha_pmu_disable,
648 .read = alpha_pmu_read,
649 .unthrottle = alpha_pmu_unthrottle,
650};
651
652
653/* 681/*
654 * Main entry point to initialise a HW performance event. 682 * Main entry point to initialise a HW performance event.
655 */ 683 */
656const struct pmu *hw_perf_event_init(struct perf_event *event) 684static int alpha_pmu_event_init(struct perf_event *event)
657{ 685{
658 int err; 686 int err;
659 687
688 switch (event->attr.type) {
689 case PERF_TYPE_RAW:
690 case PERF_TYPE_HARDWARE:
691 case PERF_TYPE_HW_CACHE:
692 break;
693
694 default:
695 return -ENOENT;
696 }
697
660 if (!alpha_pmu) 698 if (!alpha_pmu)
661 return ERR_PTR(-ENODEV); 699 return -ENODEV;
662 700
663 /* Do the real initialisation work. */ 701 /* Do the real initialisation work. */
664 err = __hw_perf_event_init(event); 702 err = __hw_perf_event_init(event);
665 703
666 if (err) 704 return err;
667 return ERR_PTR(err);
668
669 return &pmu;
670} 705}
671 706
672
673
674/* 707/*
675 * Main entry point - enable HW performance counters. 708 * Main entry point - enable HW performance counters.
676 */ 709 */
677void hw_perf_enable(void) 710static void alpha_pmu_enable(struct pmu *pmu)
678{ 711{
679 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 712 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
680 713
@@ -700,7 +733,7 @@ void hw_perf_enable(void)
700 * Main entry point - disable HW performance counters. 733 * Main entry point - disable HW performance counters.
701 */ 734 */
702 735
703void hw_perf_disable(void) 736static void alpha_pmu_disable(struct pmu *pmu)
704{ 737{
705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 738 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
706 739
@@ -713,6 +746,17 @@ void hw_perf_disable(void)
713 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 746 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
714} 747}
715 748
749static struct pmu pmu = {
750 .pmu_enable = alpha_pmu_enable,
751 .pmu_disable = alpha_pmu_disable,
752 .event_init = alpha_pmu_event_init,
753 .add = alpha_pmu_add,
754 .del = alpha_pmu_del,
755 .start = alpha_pmu_start,
756 .stop = alpha_pmu_stop,
757 .read = alpha_pmu_read,
758};
759
716 760
717/* 761/*
718 * Main entry point - don't know when this is called but it 762 * Main entry point - don't know when this is called but it
@@ -766,7 +810,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
766 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 810 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
767 811
768 /* la_ptr is the counter that overflowed. */ 812 /* la_ptr is the counter that overflowed. */
769 if (unlikely(la_ptr >= perf_max_events)) { 813 if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
770 /* This should never occur! */ 814 /* This should never occur! */
771 irq_err_count++; 815 irq_err_count++;
772 pr_warning("PMI: silly index %ld\n", la_ptr); 816 pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +851,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
807 /* Interrupts coming too quickly; "throttle" the 851 /* Interrupts coming too quickly; "throttle" the
808 * counter, i.e., disable it for a little while. 852 * counter, i.e., disable it for a little while.
809 */ 853 */
810 cpuc->idx_mask &= ~(1UL<<idx); 854 alpha_pmu_stop(event, 0);
811 } 855 }
812 } 856 }
813 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 857 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -820,13 +864,13 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
820/* 864/*
821 * Init call to initialise performance events at kernel startup. 865 * Init call to initialise performance events at kernel startup.
822 */ 866 */
823void __init init_hw_perf_events(void) 867int __init init_hw_perf_events(void)
824{ 868{
825 pr_info("Performance events: "); 869 pr_info("Performance events: ");
826 870
827 if (!supported_cpu()) { 871 if (!supported_cpu()) {
828 pr_cont("No support for your CPU.\n"); 872 pr_cont("No support for your CPU.\n");
829 return; 873 return 0;
830 } 874 }
831 875
832 pr_cont("Supported CPU type!\n"); 876 pr_cont("Supported CPU type!\n");
@@ -837,6 +881,9 @@ void __init init_hw_perf_events(void)
837 881
838 /* And set up PMU specification */ 882 /* And set up PMU specification */
839 alpha_pmu = &ev67_pmu; 883 alpha_pmu = &ev67_pmu;
840 perf_max_events = alpha_pmu->num_pmcs;
841}
842 884
885 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
886
887 return 0;
888}
889early_initcall(init_hw_perf_events);