diff options
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/Kconfig | 1 | ||||
-rw-r--r-- | arch/alpha/include/asm/perf_event.h | 5 | ||||
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 128 | ||||
-rw-r--r-- | arch/alpha/kernel/time.c | 30 |
4 files changed, 102 insertions, 62 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b9647bb66d13..d04ccd73af45 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -9,6 +9,7 @@ config ALPHA | |||
9 | select HAVE_IDE | 9 | select HAVE_IDE |
10 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
11 | select HAVE_SYSCALL_WRAPPERS | 11 | select HAVE_SYSCALL_WRAPPERS |
12 | select HAVE_IRQ_WORK | ||
12 | select HAVE_PERF_EVENTS | 13 | select HAVE_PERF_EVENTS |
13 | select HAVE_DMA_ATTRS | 14 | select HAVE_DMA_ATTRS |
14 | help | 15 | help |
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 4157cd3c44a9..fe792ca818f6 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h | |||
@@ -1,11 +1,6 @@ | |||
1 | #ifndef __ASM_ALPHA_PERF_EVENT_H | 1 | #ifndef __ASM_ALPHA_PERF_EVENT_H |
2 | #define __ASM_ALPHA_PERF_EVENT_H | 2 | #define __ASM_ALPHA_PERF_EVENT_H |
3 | 3 | ||
4 | /* Alpha only supports software events through this interface. */ | ||
5 | extern void set_perf_event_pending(void); | ||
6 | |||
7 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
8 | |||
9 | #ifdef CONFIG_PERF_EVENTS | 4 | #ifdef CONFIG_PERF_EVENTS |
10 | extern void init_hw_perf_events(void); | 5 | extern void init_hw_perf_events(void); |
11 | #else | 6 | #else |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 85d8e4f58c83..1cc49683fb69 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -307,7 +307,7 @@ again: | |||
307 | new_raw_count) != prev_raw_count) | 307 | new_raw_count) != prev_raw_count) |
308 | goto again; | 308 | goto again; |
309 | 309 | ||
310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; | 310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
311 | 311 | ||
312 | /* It is possible on very rare occasions that the PMC has overflowed | 312 | /* It is possible on very rare occasions that the PMC has overflowed |
313 | * but the interrupt is yet to come. Detect and fix this situation. | 313 | * but the interrupt is yet to come. Detect and fix this situation. |
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
402 | struct hw_perf_event *hwc = &pe->hw; | 402 | struct hw_perf_event *hwc = &pe->hw; |
403 | int idx = hwc->idx; | 403 | int idx = hwc->idx; |
404 | 404 | ||
405 | if (cpuc->current_idx[j] != PMC_NO_INDEX) { | 405 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
406 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | 406 | alpha_perf_event_set_period(pe, hwc, idx); |
407 | continue; | 407 | cpuc->current_idx[j] = idx; |
408 | } | 408 | } |
409 | 409 | ||
410 | alpha_perf_event_set_period(pe, hwc, idx); | 410 | if (!(hwc->state & PERF_HES_STOPPED)) |
411 | cpuc->current_idx[j] = idx; | 411 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); |
412 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | ||
413 | } | 412 | } |
414 | cpuc->config = cpuc->event[0]->hw.config_base; | 413 | cpuc->config = cpuc->event[0]->hw.config_base; |
415 | } | 414 | } |
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
420 | * - this function is called from outside this module via the pmu struct | 419 | * - this function is called from outside this module via the pmu struct |
421 | * returned from perf event initialisation. | 420 | * returned from perf event initialisation. |
422 | */ | 421 | */ |
423 | static int alpha_pmu_enable(struct perf_event *event) | 422 | static int alpha_pmu_add(struct perf_event *event, int flags) |
424 | { | 423 | { |
425 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 424 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
425 | struct hw_perf_event *hwc = &event->hw; | ||
426 | int n0; | 426 | int n0; |
427 | int ret; | 427 | int ret; |
428 | unsigned long flags; | 428 | unsigned long irq_flags; |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * The Sparc code has the IRQ disable first followed by the perf | 431 | * The Sparc code has the IRQ disable first followed by the perf |
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
435 | * nevertheless we disable the PMCs first to enable a potential | 435 | * nevertheless we disable the PMCs first to enable a potential |
436 | * final PMI to occur before we disable interrupts. | 436 | * final PMI to occur before we disable interrupts. |
437 | */ | 437 | */ |
438 | perf_disable(); | 438 | perf_pmu_disable(event->pmu); |
439 | local_irq_save(flags); | 439 | local_irq_save(irq_flags); |
440 | 440 | ||
441 | /* Default to error to be returned */ | 441 | /* Default to error to be returned */ |
442 | ret = -EAGAIN; | 442 | ret = -EAGAIN; |
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | local_irq_restore(flags); | 458 | hwc->state = PERF_HES_UPTODATE; |
459 | perf_enable(); | 459 | if (!(flags & PERF_EF_START)) |
460 | hwc->state |= PERF_HES_STOPPED; | ||
461 | |||
462 | local_irq_restore(irq_flags); | ||
463 | perf_pmu_enable(event->pmu); | ||
460 | 464 | ||
461 | return ret; | 465 | return ret; |
462 | } | 466 | } |
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
467 | * - this function is called from outside this module via the pmu struct | 471 | * - this function is called from outside this module via the pmu struct |
468 | * returned from perf event initialisation. | 472 | * returned from perf event initialisation. |
469 | */ | 473 | */ |
470 | static void alpha_pmu_disable(struct perf_event *event) | 474 | static void alpha_pmu_del(struct perf_event *event, int flags) |
471 | { | 475 | { |
472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 476 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
473 | struct hw_perf_event *hwc = &event->hw; | 477 | struct hw_perf_event *hwc = &event->hw; |
474 | unsigned long flags; | 478 | unsigned long irq_flags; |
475 | int j; | 479 | int j; |
476 | 480 | ||
477 | perf_disable(); | 481 | perf_pmu_disable(event->pmu); |
478 | local_irq_save(flags); | 482 | local_irq_save(irq_flags); |
479 | 483 | ||
480 | for (j = 0; j < cpuc->n_events; j++) { | 484 | for (j = 0; j < cpuc->n_events; j++) { |
481 | if (event == cpuc->event[j]) { | 485 | if (event == cpuc->event[j]) { |
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
501 | } | 505 | } |
502 | } | 506 | } |
503 | 507 | ||
504 | local_irq_restore(flags); | 508 | local_irq_restore(irq_flags); |
505 | perf_enable(); | 509 | perf_pmu_enable(event->pmu); |
506 | } | 510 | } |
507 | 511 | ||
508 | 512 | ||
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event) | |||
514 | } | 518 | } |
515 | 519 | ||
516 | 520 | ||
517 | static void alpha_pmu_unthrottle(struct perf_event *event) | 521 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
522 | { | ||
523 | struct hw_perf_event *hwc = &event->hw; | ||
524 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
525 | |||
526 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
527 | cpuc->idx_mask &= ~(1UL<<hwc->idx); | ||
528 | hwc->state |= PERF_HES_STOPPED; | ||
529 | } | ||
530 | |||
531 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
532 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | ||
533 | hwc->state |= PERF_HES_UPTODATE; | ||
534 | } | ||
535 | |||
536 | if (cpuc->enabled) | ||
537 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); | ||
538 | } | ||
539 | |||
540 | |||
541 | static void alpha_pmu_start(struct perf_event *event, int flags) | ||
518 | { | 542 | { |
519 | struct hw_perf_event *hwc = &event->hw; | 543 | struct hw_perf_event *hwc = &event->hw; |
520 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 544 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
521 | 545 | ||
546 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | ||
547 | return; | ||
548 | |||
549 | if (flags & PERF_EF_RELOAD) { | ||
550 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
551 | alpha_perf_event_set_period(event, hwc, hwc->idx); | ||
552 | } | ||
553 | |||
554 | hwc->state = 0; | ||
555 | |||
522 | cpuc->idx_mask |= 1UL<<hwc->idx; | 556 | cpuc->idx_mask |= 1UL<<hwc->idx; |
523 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | 557 | if (cpuc->enabled) |
558 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | ||
524 | } | 559 | } |
525 | 560 | ||
526 | 561 | ||
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
642 | return 0; | 677 | return 0; |
643 | } | 678 | } |
644 | 679 | ||
645 | static const struct pmu pmu = { | ||
646 | .enable = alpha_pmu_enable, | ||
647 | .disable = alpha_pmu_disable, | ||
648 | .read = alpha_pmu_read, | ||
649 | .unthrottle = alpha_pmu_unthrottle, | ||
650 | }; | ||
651 | |||
652 | |||
653 | /* | 680 | /* |
654 | * Main entry point to initialise a HW performance event. | 681 | * Main entry point to initialise a HW performance event. |
655 | */ | 682 | */ |
656 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 683 | static int alpha_pmu_event_init(struct perf_event *event) |
657 | { | 684 | { |
658 | int err; | 685 | int err; |
659 | 686 | ||
687 | switch (event->attr.type) { | ||
688 | case PERF_TYPE_RAW: | ||
689 | case PERF_TYPE_HARDWARE: | ||
690 | case PERF_TYPE_HW_CACHE: | ||
691 | break; | ||
692 | |||
693 | default: | ||
694 | return -ENOENT; | ||
695 | } | ||
696 | |||
660 | if (!alpha_pmu) | 697 | if (!alpha_pmu) |
661 | return ERR_PTR(-ENODEV); | 698 | return -ENODEV; |
662 | 699 | ||
663 | /* Do the real initialisation work. */ | 700 | /* Do the real initialisation work. */ |
664 | err = __hw_perf_event_init(event); | 701 | err = __hw_perf_event_init(event); |
665 | 702 | ||
666 | if (err) | 703 | return err; |
667 | return ERR_PTR(err); | ||
668 | |||
669 | return &pmu; | ||
670 | } | 704 | } |
671 | 705 | ||
672 | |||
673 | |||
674 | /* | 706 | /* |
675 | * Main entry point - enable HW performance counters. | 707 | * Main entry point - enable HW performance counters. |
676 | */ | 708 | */ |
677 | void hw_perf_enable(void) | 709 | static void alpha_pmu_enable(struct pmu *pmu) |
678 | { | 710 | { |
679 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 711 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
680 | 712 | ||
@@ -700,7 +732,7 @@ void hw_perf_enable(void) | |||
700 | * Main entry point - disable HW performance counters. | 732 | * Main entry point - disable HW performance counters. |
701 | */ | 733 | */ |
702 | 734 | ||
703 | void hw_perf_disable(void) | 735 | static void alpha_pmu_disable(struct pmu *pmu) |
704 | { | 736 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 737 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
706 | 738 | ||
@@ -713,6 +745,17 @@ void hw_perf_disable(void) | |||
713 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 745 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
714 | } | 746 | } |
715 | 747 | ||
748 | static struct pmu pmu = { | ||
749 | .pmu_enable = alpha_pmu_enable, | ||
750 | .pmu_disable = alpha_pmu_disable, | ||
751 | .event_init = alpha_pmu_event_init, | ||
752 | .add = alpha_pmu_add, | ||
753 | .del = alpha_pmu_del, | ||
754 | .start = alpha_pmu_start, | ||
755 | .stop = alpha_pmu_stop, | ||
756 | .read = alpha_pmu_read, | ||
757 | }; | ||
758 | |||
716 | 759 | ||
717 | /* | 760 | /* |
718 | * Main entry point - don't know when this is called but it | 761 | * Main entry point - don't know when this is called but it |
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
766 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 809 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
767 | 810 | ||
768 | /* la_ptr is the counter that overflowed. */ | 811 | /* la_ptr is the counter that overflowed. */ |
769 | if (unlikely(la_ptr >= perf_max_events)) { | 812 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
770 | /* This should never occur! */ | 813 | /* This should never occur! */ |
771 | irq_err_count++; | 814 | irq_err_count++; |
772 | pr_warning("PMI: silly index %ld\n", la_ptr); | 815 | pr_warning("PMI: silly index %ld\n", la_ptr); |
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
807 | /* Interrupts coming too quickly; "throttle" the | 850 | /* Interrupts coming too quickly; "throttle" the |
808 | * counter, i.e., disable it for a little while. | 851 | * counter, i.e., disable it for a little while. |
809 | */ | 852 | */ |
810 | cpuc->idx_mask &= ~(1UL<<idx); | 853 | alpha_pmu_stop(event, 0); |
811 | } | 854 | } |
812 | } | 855 | } |
813 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | 856 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void) | |||
837 | 880 | ||
838 | /* And set up PMU specification */ | 881 | /* And set up PMU specification */ |
839 | alpha_pmu = &ev67_pmu; | 882 | alpha_pmu = &ev67_pmu; |
840 | perf_max_events = alpha_pmu->num_pmcs; | 883 | |
884 | perf_pmu_register(&pmu); | ||
841 | } | 885 | } |
842 | 886 | ||
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 396af1799ea4..0f1d8493cfca 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/bcd.h> | 42 | #include <linux/bcd.h> |
43 | #include <linux/profile.h> | 43 | #include <linux/profile.h> |
44 | #include <linux/perf_event.h> | 44 | #include <linux/irq_work.h> |
45 | 45 | ||
46 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
47 | #include <asm/io.h> | 47 | #include <asm/io.h> |
@@ -83,25 +83,25 @@ static struct { | |||
83 | 83 | ||
84 | unsigned long est_cycle_freq; | 84 | unsigned long est_cycle_freq; |
85 | 85 | ||
86 | #ifdef CONFIG_PERF_EVENTS | 86 | #ifdef CONFIG_IRQ_WORK |
87 | 87 | ||
88 | DEFINE_PER_CPU(u8, perf_event_pending); | 88 | DEFINE_PER_CPU(u8, irq_work_pending); |
89 | 89 | ||
90 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | 90 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
91 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 91 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
92 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 92 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
93 | 93 | ||
94 | void set_perf_event_pending(void) | 94 | void set_irq_work_pending(void) |
95 | { | 95 | { |
96 | set_perf_event_pending_flag(); | 96 | set_irq_work_pending_flag(); |
97 | } | 97 | } |
98 | 98 | ||
99 | #else /* CONFIG_PERF_EVENTS */ | 99 | #else /* CONFIG_IRQ_WORK */ |
100 | 100 | ||
101 | #define test_perf_event_pending() 0 | 101 | #define test_irq_work_pending() 0 |
102 | #define clear_perf_event_pending() | 102 | #define clear_irq_work_pending() |
103 | 103 | ||
104 | #endif /* CONFIG_PERF_EVENTS */ | 104 | #endif /* CONFIG_IRQ_WORK */ |
105 | 105 | ||
106 | 106 | ||
107 | static inline __u32 rpcc(void) | 107 | static inline __u32 rpcc(void) |
@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
191 | 191 | ||
192 | write_sequnlock(&xtime_lock); | 192 | write_sequnlock(&xtime_lock); |
193 | 193 | ||
194 | if (test_perf_event_pending()) { | 194 | if (test_irq_work_pending()) { |
195 | clear_perf_event_pending(); | 195 | clear_irq_work_pending(); |
196 | perf_event_do_pending(); | 196 | irq_work_run(); |
197 | } | 197 | } |
198 | 198 | ||
199 | #ifndef CONFIG_SMP | 199 | #ifndef CONFIG_SMP |