aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/irqflags.h67
-rw-r--r--arch/alpha/include/asm/perf_event.h5
-rw-r--r--arch/alpha/include/asm/system.h28
-rw-r--r--arch/alpha/kernel/perf_event.c128
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/alpha/kernel/time.c30
7 files changed, 170 insertions, 91 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b9647bb66d1..d04ccd73af4 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,6 +9,7 @@ config ALPHA
9 select HAVE_IDE 9 select HAVE_IDE
10 select HAVE_OPROFILE 10 select HAVE_OPROFILE
11 select HAVE_SYSCALL_WRAPPERS 11 select HAVE_SYSCALL_WRAPPERS
12 select HAVE_IRQ_WORK
12 select HAVE_PERF_EVENTS 13 select HAVE_PERF_EVENTS
13 select HAVE_DMA_ATTRS 14 select HAVE_DMA_ATTRS
14 help 15 help
diff --git a/arch/alpha/include/asm/irqflags.h b/arch/alpha/include/asm/irqflags.h
new file mode 100644
index 00000000000..299bbc7e9d7
--- /dev/null
+++ b/arch/alpha/include/asm/irqflags.h
@@ -0,0 +1,67 @@
1#ifndef __ALPHA_IRQFLAGS_H
2#define __ALPHA_IRQFLAGS_H
3
4#include <asm/system.h>
5
6#define IPL_MIN 0
7#define IPL_SW0 1
8#define IPL_SW1 2
9#define IPL_DEV0 3
10#define IPL_DEV1 4
11#define IPL_TIMER 5
12#define IPL_PERF 6
13#define IPL_POWERFAIL 6
14#define IPL_MCHECK 7
15#define IPL_MAX 7
16
17#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
18#undef IPL_MIN
19#define IPL_MIN __min_ipl
20extern int __min_ipl;
21#endif
22
23#define getipl() (rdps() & 7)
24#define setipl(ipl) ((void) swpipl(ipl))
25
26static inline unsigned long arch_local_save_flags(void)
27{
28 return rdps();
29}
30
31static inline void arch_local_irq_disable(void)
32{
33 setipl(IPL_MAX);
34 barrier();
35}
36
37static inline unsigned long arch_local_irq_save(void)
38{
39 unsigned long flags = swpipl(IPL_MAX);
40 barrier();
41 return flags;
42}
43
44static inline void arch_local_irq_enable(void)
45{
46 barrier();
47 setipl(IPL_MIN);
48}
49
50static inline void arch_local_irq_restore(unsigned long flags)
51{
52 barrier();
53 setipl(flags);
54 barrier();
55}
56
57static inline bool arch_irqs_disabled_flags(unsigned long flags)
58{
59 return flags == IPL_MAX;
60}
61
62static inline bool arch_irqs_disabled(void)
63{
64 return arch_irqs_disabled_flags(getipl());
65}
66
67#endif /* __ALPHA_IRQFLAGS_H */
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h
index 4157cd3c44a..fe792ca818f 100644
--- a/arch/alpha/include/asm/perf_event.h
+++ b/arch/alpha/include/asm/perf_event.h
@@ -1,11 +1,6 @@
1#ifndef __ASM_ALPHA_PERF_EVENT_H 1#ifndef __ASM_ALPHA_PERF_EVENT_H
2#define __ASM_ALPHA_PERF_EVENT_H 2#define __ASM_ALPHA_PERF_EVENT_H
3 3
4/* Alpha only supports software events through this interface. */
5extern void set_perf_event_pending(void);
6
7#define PERF_EVENT_INDEX_OFFSET 0
8
9#ifdef CONFIG_PERF_EVENTS 4#ifdef CONFIG_PERF_EVENTS
10extern void init_hw_perf_events(void); 5extern void init_hw_perf_events(void);
11#else 6#else
diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h
index 5aa40cca4f2..9f78e693463 100644
--- a/arch/alpha/include/asm/system.h
+++ b/arch/alpha/include/asm/system.h
@@ -259,34 +259,6 @@ __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
259__CALL_PAL_W1(wrusp, unsigned long); 259__CALL_PAL_W1(wrusp, unsigned long);
260__CALL_PAL_W1(wrvptptr, unsigned long); 260__CALL_PAL_W1(wrvptptr, unsigned long);
261 261
262#define IPL_MIN 0
263#define IPL_SW0 1
264#define IPL_SW1 2
265#define IPL_DEV0 3
266#define IPL_DEV1 4
267#define IPL_TIMER 5
268#define IPL_PERF 6
269#define IPL_POWERFAIL 6
270#define IPL_MCHECK 7
271#define IPL_MAX 7
272
273#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
274#undef IPL_MIN
275#define IPL_MIN __min_ipl
276extern int __min_ipl;
277#endif
278
279#define getipl() (rdps() & 7)
280#define setipl(ipl) ((void) swpipl(ipl))
281
282#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0)
283#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0)
284#define local_save_flags(flags) ((flags) = rdps())
285#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
286#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0)
287
288#define irqs_disabled() (getipl() == IPL_MAX)
289
290/* 262/*
291 * TB routines.. 263 * TB routines..
292 */ 264 */
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 85d8e4f58c8..1cc49683fb6 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -307,7 +307,7 @@ again:
307 new_raw_count) != prev_raw_count) 307 new_raw_count) != prev_raw_count)
308 goto again; 308 goto again;
309 309
310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; 310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
311 311
312 /* It is possible on very rare occasions that the PMC has overflowed 312 /* It is possible on very rare occasions that the PMC has overflowed
313 * but the interrupt is yet to come. Detect and fix this situation. 313 * but the interrupt is yet to come. Detect and fix this situation.
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
402 struct hw_perf_event *hwc = &pe->hw; 402 struct hw_perf_event *hwc = &pe->hw;
403 int idx = hwc->idx; 403 int idx = hwc->idx;
404 404
405 if (cpuc->current_idx[j] != PMC_NO_INDEX) { 405 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
406 cpuc->idx_mask |= (1<<cpuc->current_idx[j]); 406 alpha_perf_event_set_period(pe, hwc, idx);
407 continue; 407 cpuc->current_idx[j] = idx;
408 } 408 }
409 409
410 alpha_perf_event_set_period(pe, hwc, idx); 410 if (!(hwc->state & PERF_HES_STOPPED))
411 cpuc->current_idx[j] = idx; 411 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
412 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
413 } 412 }
414 cpuc->config = cpuc->event[0]->hw.config_base; 413 cpuc->config = cpuc->event[0]->hw.config_base;
415} 414}
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
420 * - this function is called from outside this module via the pmu struct 419 * - this function is called from outside this module via the pmu struct
421 * returned from perf event initialisation. 420 * returned from perf event initialisation.
422 */ 421 */
423static int alpha_pmu_enable(struct perf_event *event) 422static int alpha_pmu_add(struct perf_event *event, int flags)
424{ 423{
425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 424 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
425 struct hw_perf_event *hwc = &event->hw;
426 int n0; 426 int n0;
427 int ret; 427 int ret;
428 unsigned long flags; 428 unsigned long irq_flags;
429 429
430 /* 430 /*
431 * The Sparc code has the IRQ disable first followed by the perf 431 * The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event)
435 * nevertheless we disable the PMCs first to enable a potential 435 * nevertheless we disable the PMCs first to enable a potential
436 * final PMI to occur before we disable interrupts. 436 * final PMI to occur before we disable interrupts.
437 */ 437 */
438 perf_disable(); 438 perf_pmu_disable(event->pmu);
439 local_irq_save(flags); 439 local_irq_save(irq_flags);
440 440
441 /* Default to error to be returned */ 441 /* Default to error to be returned */
442 ret = -EAGAIN; 442 ret = -EAGAIN;
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event)
455 } 455 }
456 } 456 }
457 457
458 local_irq_restore(flags); 458 hwc->state = PERF_HES_UPTODATE;
459 perf_enable(); 459 if (!(flags & PERF_EF_START))
460 hwc->state |= PERF_HES_STOPPED;
461
462 local_irq_restore(irq_flags);
463 perf_pmu_enable(event->pmu);
460 464
461 return ret; 465 return ret;
462} 466}
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event)
467 * - this function is called from outside this module via the pmu struct 471 * - this function is called from outside this module via the pmu struct
468 * returned from perf event initialisation. 472 * returned from perf event initialisation.
469 */ 473 */
470static void alpha_pmu_disable(struct perf_event *event) 474static void alpha_pmu_del(struct perf_event *event, int flags)
471{ 475{
472 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 476 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
473 struct hw_perf_event *hwc = &event->hw; 477 struct hw_perf_event *hwc = &event->hw;
474 unsigned long flags; 478 unsigned long irq_flags;
475 int j; 479 int j;
476 480
477 perf_disable(); 481 perf_pmu_disable(event->pmu);
478 local_irq_save(flags); 482 local_irq_save(irq_flags);
479 483
480 for (j = 0; j < cpuc->n_events; j++) { 484 for (j = 0; j < cpuc->n_events; j++) {
481 if (event == cpuc->event[j]) { 485 if (event == cpuc->event[j]) {
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event)
501 } 505 }
502 } 506 }
503 507
504 local_irq_restore(flags); 508 local_irq_restore(irq_flags);
505 perf_enable(); 509 perf_pmu_enable(event->pmu);
506} 510}
507 511
508 512
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event)
514} 518}
515 519
516 520
517static void alpha_pmu_unthrottle(struct perf_event *event) 521static void alpha_pmu_stop(struct perf_event *event, int flags)
522{
523 struct hw_perf_event *hwc = &event->hw;
524 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
525
526 if (!(hwc->state & PERF_HES_STOPPED)) {
527 cpuc->idx_mask &= ~(1UL<<hwc->idx);
528 hwc->state |= PERF_HES_STOPPED;
529 }
530
531 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
532 alpha_perf_event_update(event, hwc, hwc->idx, 0);
533 hwc->state |= PERF_HES_UPTODATE;
534 }
535
536 if (cpuc->enabled)
537 wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
538}
539
540
541static void alpha_pmu_start(struct perf_event *event, int flags)
518{ 542{
519 struct hw_perf_event *hwc = &event->hw; 543 struct hw_perf_event *hwc = &event->hw;
520 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 544 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
521 545
546 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
547 return;
548
549 if (flags & PERF_EF_RELOAD) {
550 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
551 alpha_perf_event_set_period(event, hwc, hwc->idx);
552 }
553
554 hwc->state = 0;
555
522 cpuc->idx_mask |= 1UL<<hwc->idx; 556 cpuc->idx_mask |= 1UL<<hwc->idx;
523 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); 557 if (cpuc->enabled)
558 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
524} 559}
525 560
526 561
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event)
642 return 0; 677 return 0;
643} 678}
644 679
645static const struct pmu pmu = {
646 .enable = alpha_pmu_enable,
647 .disable = alpha_pmu_disable,
648 .read = alpha_pmu_read,
649 .unthrottle = alpha_pmu_unthrottle,
650};
651
652
653/* 680/*
654 * Main entry point to initialise a HW performance event. 681 * Main entry point to initialise a HW performance event.
655 */ 682 */
656const struct pmu *hw_perf_event_init(struct perf_event *event) 683static int alpha_pmu_event_init(struct perf_event *event)
657{ 684{
658 int err; 685 int err;
659 686
687 switch (event->attr.type) {
688 case PERF_TYPE_RAW:
689 case PERF_TYPE_HARDWARE:
690 case PERF_TYPE_HW_CACHE:
691 break;
692
693 default:
694 return -ENOENT;
695 }
696
660 if (!alpha_pmu) 697 if (!alpha_pmu)
661 return ERR_PTR(-ENODEV); 698 return -ENODEV;
662 699
663 /* Do the real initialisation work. */ 700 /* Do the real initialisation work. */
664 err = __hw_perf_event_init(event); 701 err = __hw_perf_event_init(event);
665 702
666 if (err) 703 return err;
667 return ERR_PTR(err);
668
669 return &pmu;
670} 704}
671 705
672
673
674/* 706/*
675 * Main entry point - enable HW performance counters. 707 * Main entry point - enable HW performance counters.
676 */ 708 */
677void hw_perf_enable(void) 709static void alpha_pmu_enable(struct pmu *pmu)
678{ 710{
679 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 711 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
680 712
@@ -700,7 +732,7 @@ void hw_perf_enable(void)
700 * Main entry point - disable HW performance counters. 732 * Main entry point - disable HW performance counters.
701 */ 733 */
702 734
703void hw_perf_disable(void) 735static void alpha_pmu_disable(struct pmu *pmu)
704{ 736{
705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 737 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
706 738
@@ -713,6 +745,17 @@ void hw_perf_disable(void)
713 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 745 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
714} 746}
715 747
748static struct pmu pmu = {
749 .pmu_enable = alpha_pmu_enable,
750 .pmu_disable = alpha_pmu_disable,
751 .event_init = alpha_pmu_event_init,
752 .add = alpha_pmu_add,
753 .del = alpha_pmu_del,
754 .start = alpha_pmu_start,
755 .stop = alpha_pmu_stop,
756 .read = alpha_pmu_read,
757};
758
716 759
717/* 760/*
718 * Main entry point - don't know when this is called but it 761 * Main entry point - don't know when this is called but it
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
766 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 809 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
767 810
768 /* la_ptr is the counter that overflowed. */ 811 /* la_ptr is the counter that overflowed. */
769 if (unlikely(la_ptr >= perf_max_events)) { 812 if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
770 /* This should never occur! */ 813 /* This should never occur! */
771 irq_err_count++; 814 irq_err_count++;
772 pr_warning("PMI: silly index %ld\n", la_ptr); 815 pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
807 /* Interrupts coming too quickly; "throttle" the 850 /* Interrupts coming too quickly; "throttle" the
808 * counter, i.e., disable it for a little while. 851 * counter, i.e., disable it for a little while.
809 */ 852 */
810 cpuc->idx_mask &= ~(1UL<<idx); 853 alpha_pmu_stop(event, 0);
811 } 854 }
812 } 855 }
813 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); 856 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void)
837 880
838 /* And set up PMU specification */ 881 /* And set up PMU specification */
839 alpha_pmu = &ev67_pmu; 882 alpha_pmu = &ev67_pmu;
840 perf_max_events = alpha_pmu->num_pmcs; 883
884 perf_pmu_register(&pmu);
841} 885}
842 886
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index d290845aef5..6f7feb5db27 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -48,7 +48,7 @@ SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
48 sigset_t mask; 48 sigset_t mask;
49 unsigned long res; 49 unsigned long res;
50 50
51 siginitset(&mask, newmask & ~_BLOCKABLE); 51 siginitset(&mask, newmask & _BLOCKABLE);
52 res = sigprocmask(how, &mask, &oldmask); 52 res = sigprocmask(how, &mask, &oldmask);
53 if (!res) { 53 if (!res) {
54 force_successful_syscall_return(); 54 force_successful_syscall_return();
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 396af1799ea..0f1d8493cfc 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -41,7 +41,7 @@
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/bcd.h> 42#include <linux/bcd.h>
43#include <linux/profile.h> 43#include <linux/profile.h>
44#include <linux/perf_event.h> 44#include <linux/irq_work.h>
45 45
46#include <asm/uaccess.h> 46#include <asm/uaccess.h>
47#include <asm/io.h> 47#include <asm/io.h>
@@ -83,25 +83,25 @@ static struct {
83 83
84unsigned long est_cycle_freq; 84unsigned long est_cycle_freq;
85 85
86#ifdef CONFIG_PERF_EVENTS 86#ifdef CONFIG_IRQ_WORK
87 87
88DEFINE_PER_CPU(u8, perf_event_pending); 88DEFINE_PER_CPU(u8, irq_work_pending);
89 89
90#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 90#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
91#define test_perf_event_pending() __get_cpu_var(perf_event_pending) 91#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
92#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 92#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
93 93
94void set_perf_event_pending(void) 94void set_irq_work_pending(void)
95{ 95{
96 set_perf_event_pending_flag(); 96 set_irq_work_pending_flag();
97} 97}
98 98
99#else /* CONFIG_PERF_EVENTS */ 99#else /* CONFIG_IRQ_WORK */
100 100
101#define test_perf_event_pending() 0 101#define test_irq_work_pending() 0
102#define clear_perf_event_pending() 102#define clear_irq_work_pending()
103 103
104#endif /* CONFIG_PERF_EVENTS */ 104#endif /* CONFIG_IRQ_WORK */
105 105
106 106
107static inline __u32 rpcc(void) 107static inline __u32 rpcc(void)
@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev)
191 191
192 write_sequnlock(&xtime_lock); 192 write_sequnlock(&xtime_lock);
193 193
194 if (test_perf_event_pending()) { 194 if (test_irq_work_pending()) {
195 clear_perf_event_pending(); 195 clear_irq_work_pending();
196 perf_event_do_pending(); 196 irq_work_run();
197 } 197 }
198 198
199#ifndef CONFIG_SMP 199#ifndef CONFIG_SMP