aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/kernel
diff options
context:
space:
mode:
authorMichael Cree <mcree@orcon.net.nz>2010-08-09 20:20:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:45:04 -0400
commit979f867191f80e74713394cf8c0a3c1b3662b648 (patch)
treee2483309110e2e285330e7fd2e5f50eb25dced25 /arch/alpha/kernel
parent92484f10ca8f7d36f0bfad92b66a20aa03120cc0 (diff)
alpha: implement HW performance events on the EV67 and later CPUs
This implements hardware performance events for the EV67 and later CPUs within the Linux performance events subsystem. Only using the performance monitoring unit in HP/Compaq's so called "Aggregrate mode" is supported. The code has been implemented in a manner that makes extension to other older Alpha CPUs relatively straightforward should some mug wish to indulge themselves. Signed-off-by: Michael Cree <mcree@orcon.net.nz> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Jay Estabrook <jay.estabrook@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r--arch/alpha/kernel/Makefile1
-rw-r--r--arch/alpha/kernel/irq_alpha.c3
-rw-r--r--arch/alpha/kernel/perf_event.c842
-rw-r--r--arch/alpha/kernel/time.c26
4 files changed, 872 insertions, 0 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 5a62fb46ef20..1ee9b5b629b8 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SMP) += smp.o
15obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o 15obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o
16obj-$(CONFIG_SRM_ENV) += srm_env.o 16obj-$(CONFIG_SRM_ENV) += srm_env.o
17obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
18obj-$(CONFIG_PERF_EVENTS) += perf_event.o
18 19
19ifdef CONFIG_ALPHA_GENERIC 20ifdef CONFIG_ALPHA_GENERIC
20 21
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index cfde865b78e0..5f77afb88e89 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -10,6 +10,7 @@
10 10
11#include <asm/machvec.h> 11#include <asm/machvec.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13#include <asm/perf_event.h>
13 14
14#include "proto.h" 15#include "proto.h"
15#include "irq_impl.h" 16#include "irq_impl.h"
@@ -111,6 +112,8 @@ init_IRQ(void)
111 wrent(entInt, 0); 112 wrent(entInt, 0);
112 113
113 alpha_mv.init_irq(); 114 alpha_mv.init_irq();
115
116 init_hw_perf_events();
114} 117}
115 118
116/* 119/*
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
new file mode 100644
index 000000000000..51c39fa41693
--- /dev/null
+++ b/arch/alpha/kernel/perf_event.c
@@ -0,0 +1,842 @@
1/*
2 * Hardware performance events for the Alpha.
3 *
4 * We implement HW counts on the EV67 and subsequent CPUs only.
5 *
6 * (C) 2010 Michael J. Cree
7 *
8 * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
9 * ARM code, which are copyright by their respective authors.
10 */
11
12#include <linux/perf_event.h>
13#include <linux/kprobes.h>
14#include <linux/kernel.h>
15#include <linux/kdebug.h>
16#include <linux/mutex.h>
17
18#include <asm/hwrpb.h>
19#include <asm/atomic.h>
20#include <asm/irq.h>
21#include <asm/irq_regs.h>
22#include <asm/pal.h>
23#include <asm/wrperfmon.h>
24#include <asm/hw_irq.h>
25
26
27/* The maximum number of PMCs on any Alpha CPU whatsoever. */
28#define MAX_HWEVENTS 3
29#define PMC_NO_INDEX -1
30
31/* For tracking PMCs and the hw events they monitor on each CPU. */
32struct cpu_hw_events {
33 int enabled;
34 /* Number of events scheduled; also number entries valid in arrays below. */
35 int n_events;
36 /* Number events added since last hw_perf_disable(). */
37 int n_added;
38 /* Events currently scheduled. */
39 struct perf_event *event[MAX_HWEVENTS];
40 /* Event type of each scheduled event. */
41 unsigned long evtype[MAX_HWEVENTS];
42 /* Current index of each scheduled event; if not yet determined
43 * contains PMC_NO_INDEX.
44 */
45 int current_idx[MAX_HWEVENTS];
46 /* The active PMCs' config for easy use with wrperfmon(). */
47 unsigned long config;
48 /* The active counters' indices for easy use with wrperfmon(). */
49 unsigned long idx_mask;
50};
51DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
52
53
54
55/*
56 * A structure to hold the description of the PMCs available on a particular
57 * type of Alpha CPU.
58 */
59struct alpha_pmu_t {
60 /* Mapping of the perf system hw event types to indigenous event types */
61 const int *event_map;
62 /* The number of entries in the event_map */
63 int max_events;
64 /* The number of PMCs on this Alpha */
65 int num_pmcs;
66 /*
67 * All PMC counters reside in the IBOX register PCTR. This is the
68 * LSB of the counter.
69 */
70 int pmc_count_shift[MAX_HWEVENTS];
71 /*
72 * The mask that isolates the PMC bits when the LSB of the counter
73 * is shifted to bit 0.
74 */
75 unsigned long pmc_count_mask[MAX_HWEVENTS];
76 /* The maximum period the PMC can count. */
77 unsigned long pmc_max_period[MAX_HWEVENTS];
78 /*
79 * The maximum value that may be written to the counter due to
80 * hardware restrictions is pmc_max_period - pmc_left.
81 */
82 long pmc_left[3];
83 /* Subroutine for allocation of PMCs. Enforces constraints. */
84 int (*check_constraints)(struct perf_event **, unsigned long *, int);
85};
86
87/*
88 * The Alpha CPU PMU description currently in operation. This is set during
89 * the boot process to the specific CPU of the machine.
90 */
91static const struct alpha_pmu_t *alpha_pmu;
92
93
94#define HW_OP_UNSUPPORTED -1
95
96/*
97 * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
98 * follow. Since they are identical we refer to them collectively as the
99 * EV67 henceforth.
100 */
101
102/*
103 * EV67 PMC event types
104 *
105 * There is no one-to-one mapping of the possible hw event types to the
106 * actual codes that are used to program the PMCs hence we introduce our
107 * own hw event type identifiers.
108 */
109enum ev67_pmc_event_type {
110 EV67_CYCLES = 1,
111 EV67_INSTRUCTIONS,
112 EV67_BCACHEMISS,
113 EV67_MBOXREPLAY,
114 EV67_LAST_ET
115};
116#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
117
118
119/* Mapping of the hw event types to the perf tool interface */
120static const int ev67_perfmon_event_map[] = {
121 [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
122 [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
123 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
124 [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
125};
126
127struct ev67_mapping_t {
128 int config;
129 int idx;
130};
131
132/*
133 * The mapping used for one event only - these must be in same order as enum
134 * ev67_pmc_event_type definition.
135 */
136static const struct ev67_mapping_t ev67_mapping[] = {
137 {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
138 {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
139 {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
140 {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
141};
142
143
144/*
145 * Check that a group of events can be simultaneously scheduled on to the
146 * EV67 PMU. Also allocate counter indices and config.
147 */
148static int ev67_check_constraints(struct perf_event **event,
149 unsigned long *evtype, int n_ev)
150{
151 int idx0;
152 unsigned long config;
153
154 idx0 = ev67_mapping[evtype[0]-1].idx;
155 config = ev67_mapping[evtype[0]-1].config;
156 if (n_ev == 1)
157 goto success;
158
159 BUG_ON(n_ev != 2);
160
161 if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
162 /* MBOX replay traps must be on PMC 1 */
163 idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
164 /* Only cycles can accompany MBOX replay traps */
165 if (evtype[idx0] == EV67_CYCLES) {
166 config = EV67_PCTR_CYCLES_MBOX;
167 goto success;
168 }
169 }
170
171 if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
172 /* Bcache misses must be on PMC 1 */
173 idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
174 /* Only instructions can accompany Bcache misses */
175 if (evtype[idx0] == EV67_INSTRUCTIONS) {
176 config = EV67_PCTR_INSTR_BCACHEMISS;
177 goto success;
178 }
179 }
180
181 if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
182 /* Instructions must be on PMC 0 */
183 idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
184 /* By this point only cycles can accompany instructions */
185 if (evtype[idx0^1] == EV67_CYCLES) {
186 config = EV67_PCTR_INSTR_CYCLES;
187 goto success;
188 }
189 }
190
191 /* Otherwise, darn it, there is a conflict. */
192 return -1;
193
194success:
195 event[0]->hw.idx = idx0;
196 event[0]->hw.config_base = config;
197 if (n_ev == 2) {
198 event[1]->hw.idx = idx0 ^ 1;
199 event[1]->hw.config_base = config;
200 }
201 return 0;
202}
203
204
205static const struct alpha_pmu_t ev67_pmu = {
206 .event_map = ev67_perfmon_event_map,
207 .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
208 .num_pmcs = 2,
209 .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
210 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
211 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
212 .pmc_left = {16, 4, 0},
213 .check_constraints = ev67_check_constraints
214};
215
216
217
218/*
219 * Helper routines to ensure that we read/write only the correct PMC bits
220 * when calling the wrperfmon PALcall.
221 */
222static inline void alpha_write_pmc(int idx, unsigned long val)
223{
224 val &= alpha_pmu->pmc_count_mask[idx];
225 val <<= alpha_pmu->pmc_count_shift[idx];
226 val |= (1<<idx);
227 wrperfmon(PERFMON_CMD_WRITE, val);
228}
229
230static inline unsigned long alpha_read_pmc(int idx)
231{
232 unsigned long val;
233
234 val = wrperfmon(PERFMON_CMD_READ, 0);
235 val >>= alpha_pmu->pmc_count_shift[idx];
236 val &= alpha_pmu->pmc_count_mask[idx];
237 return val;
238}
239
240/* Set a new period to sample over */
241static int alpha_perf_event_set_period(struct perf_event *event,
242 struct hw_perf_event *hwc, int idx)
243{
244 long left = atomic64_read(&hwc->period_left);
245 long period = hwc->sample_period;
246 int ret = 0;
247
248 if (unlikely(left <= -period)) {
249 left = period;
250 atomic64_set(&hwc->period_left, left);
251 hwc->last_period = period;
252 ret = 1;
253 }
254
255 if (unlikely(left <= 0)) {
256 left += period;
257 atomic64_set(&hwc->period_left, left);
258 hwc->last_period = period;
259 ret = 1;
260 }
261
262 /*
263 * Hardware restrictions require that the counters must not be
264 * written with values that are too close to the maximum period.
265 */
266 if (unlikely(left < alpha_pmu->pmc_left[idx]))
267 left = alpha_pmu->pmc_left[idx];
268
269 if (left > (long)alpha_pmu->pmc_max_period[idx])
270 left = alpha_pmu->pmc_max_period[idx];
271
272 atomic64_set(&hwc->prev_count, (unsigned long)(-left));
273
274 alpha_write_pmc(idx, (unsigned long)(-left));
275
276 perf_event_update_userpage(event);
277
278 return ret;
279}
280
281
282/*
283 * Calculates the count (the 'delta') since the last time the PMC was read.
284 *
285 * As the PMCs' full period can easily be exceeded within the perf system
286 * sampling period we cannot use any high order bits as a guard bit in the
287 * PMCs to detect overflow as is done by other architectures. The code here
288 * calculates the delta on the basis that there is no overflow when ovf is
289 * zero. The value passed via ovf by the interrupt handler corrects for
290 * overflow.
291 *
292 * This can be racey on rare occasions -- a call to this routine can occur
293 * with an overflowed counter just before the PMI service routine is called.
294 * The check for delta negative hopefully always rectifies this situation.
295 */
296static unsigned long alpha_perf_event_update(struct perf_event *event,
297 struct hw_perf_event *hwc, int idx, long ovf)
298{
299 long prev_raw_count, new_raw_count;
300 long delta;
301
302again:
303 prev_raw_count = atomic64_read(&hwc->prev_count);
304 new_raw_count = alpha_read_pmc(idx);
305
306 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
307 new_raw_count) != prev_raw_count)
308 goto again;
309
310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
311
312 /* It is possible on very rare occasions that the PMC has overflowed
313 * but the interrupt is yet to come. Detect and fix this situation.
314 */
315 if (unlikely(delta < 0)) {
316 delta += alpha_pmu->pmc_max_period[idx] + 1;
317 }
318
319 atomic64_add(delta, &event->count);
320 atomic64_sub(delta, &hwc->period_left);
321
322 return new_raw_count;
323}
324
325
326/*
327 * Collect all HW events into the array event[].
328 */
329static int collect_events(struct perf_event *group, int max_count,
330 struct perf_event *event[], unsigned long *evtype,
331 int *current_idx)
332{
333 struct perf_event *pe;
334 int n = 0;
335
336 if (!is_software_event(group)) {
337 if (n >= max_count)
338 return -1;
339 event[n] = group;
340 evtype[n] = group->hw.event_base;
341 current_idx[n++] = PMC_NO_INDEX;
342 }
343 list_for_each_entry(pe, &group->sibling_list, group_entry) {
344 if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
345 if (n >= max_count)
346 return -1;
347 event[n] = pe;
348 evtype[n] = pe->hw.event_base;
349 current_idx[n++] = PMC_NO_INDEX;
350 }
351 }
352 return n;
353}
354
355
356
357/*
358 * Check that a group of events can be simultaneously scheduled on to the PMU.
359 */
360static int alpha_check_constraints(struct perf_event **events,
361 unsigned long *evtypes, int n_ev)
362{
363
364 /* No HW events is possible from hw_perf_group_sched_in(). */
365 if (n_ev == 0)
366 return 0;
367
368 if (n_ev > alpha_pmu->num_pmcs)
369 return -1;
370
371 return alpha_pmu->check_constraints(events, evtypes, n_ev);
372}
373
374
375/*
376 * If new events have been scheduled then update cpuc with the new
377 * configuration. This may involve shifting cycle counts from one PMC to
378 * another.
379 */
380static void maybe_change_configuration(struct cpu_hw_events *cpuc)
381{
382 int j;
383
384 if (cpuc->n_added == 0)
385 return;
386
387 /* Find counters that are moving to another PMC and update */
388 for (j = 0; j < cpuc->n_events; j++) {
389 struct perf_event *pe = cpuc->event[j];
390
391 if (cpuc->current_idx[j] != PMC_NO_INDEX &&
392 cpuc->current_idx[j] != pe->hw.idx) {
393 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
394 cpuc->current_idx[j] = PMC_NO_INDEX;
395 }
396 }
397
398 /* Assign to counters all unassigned events. */
399 cpuc->idx_mask = 0;
400 for (j = 0; j < cpuc->n_events; j++) {
401 struct perf_event *pe = cpuc->event[j];
402 struct hw_perf_event *hwc = &pe->hw;
403 int idx = hwc->idx;
404
405 if (cpuc->current_idx[j] != PMC_NO_INDEX) {
406 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
407 continue;
408 }
409
410 alpha_perf_event_set_period(pe, hwc, idx);
411 cpuc->current_idx[j] = idx;
412 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
413 }
414 cpuc->config = cpuc->event[0]->hw.config_base;
415}
416
417
418
419/* Schedule perf HW event on to PMU.
420 * - this function is called from outside this module via the pmu struct
421 * returned from perf event initialisation.
422 */
423static int alpha_pmu_enable(struct perf_event *event)
424{
425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
426 int n0;
427 int ret;
428 unsigned long flags;
429
430 /*
431 * The Sparc code has the IRQ disable first followed by the perf
432 * disable, however this can lead to an overflowed counter with the
433 * PMI disabled on rare occasions. The alpha_perf_event_update()
434 * routine should detect this situation by noting a negative delta,
435 * nevertheless we disable the PMCs first to enable a potential
436 * final PMI to occur before we disable interrupts.
437 */
438 perf_disable();
439 local_irq_save(flags);
440
441 /* Default to error to be returned */
442 ret = -EAGAIN;
443
444 /* Insert event on to PMU and if successful modify ret to valid return */
445 n0 = cpuc->n_events;
446 if (n0 < alpha_pmu->num_pmcs) {
447 cpuc->event[n0] = event;
448 cpuc->evtype[n0] = event->hw.event_base;
449 cpuc->current_idx[n0] = PMC_NO_INDEX;
450
451 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
452 cpuc->n_events++;
453 cpuc->n_added++;
454 ret = 0;
455 }
456 }
457
458 local_irq_restore(flags);
459 perf_enable();
460
461 return ret;
462}
463
464
465
466/* Disable performance monitoring unit
467 * - this function is called from outside this module via the pmu struct
468 * returned from perf event initialisation.
469 */
470static void alpha_pmu_disable(struct perf_event *event)
471{
472 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
473 struct hw_perf_event *hwc = &event->hw;
474 unsigned long flags;
475 int j;
476
477 perf_disable();
478 local_irq_save(flags);
479
480 for (j = 0; j < cpuc->n_events; j++) {
481 if (event == cpuc->event[j]) {
482 int idx = cpuc->current_idx[j];
483
484 /* Shift remaining entries down into the existing
485 * slot.
486 */
487 while (++j < cpuc->n_events) {
488 cpuc->event[j - 1] = cpuc->event[j];
489 cpuc->evtype[j - 1] = cpuc->evtype[j];
490 cpuc->current_idx[j - 1] =
491 cpuc->current_idx[j];
492 }
493
494 /* Absorb the final count and turn off the event. */
495 alpha_perf_event_update(event, hwc, idx, 0);
496 perf_event_update_userpage(event);
497
498 cpuc->idx_mask &= ~(1UL<<idx);
499 cpuc->n_events--;
500 break;
501 }
502 }
503
504 local_irq_restore(flags);
505 perf_enable();
506}
507
508
509static void alpha_pmu_read(struct perf_event *event)
510{
511 struct hw_perf_event *hwc = &event->hw;
512
513 alpha_perf_event_update(event, hwc, hwc->idx, 0);
514}
515
516
517static void alpha_pmu_unthrottle(struct perf_event *event)
518{
519 struct hw_perf_event *hwc = &event->hw;
520 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
521
522 cpuc->idx_mask |= 1UL<<hwc->idx;
523 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
524}
525
526
527/*
528 * Check that CPU performance counters are supported.
529 * - currently support EV67 and later CPUs.
530 * - actually some later revisions of the EV6 have the same PMC model as the
531 * EV67 but we don't do suffiently deep CPU detection to detect them.
532 * Bad luck to the very few people who might have one, I guess.
533 */
534static int supported_cpu(void)
535{
536 struct percpu_struct *cpu;
537 unsigned long cputype;
538
539 /* Get cpu type from HW */
540 cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
541 cputype = cpu->type & 0xffffffff;
542 /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
543 return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
544}
545
546
547
548static void hw_perf_event_destroy(struct perf_event *event)
549{
550 /* Nothing to be done! */
551 return;
552}
553
554
555
556static int __hw_perf_event_init(struct perf_event *event)
557{
558 struct perf_event_attr *attr = &event->attr;
559 struct hw_perf_event *hwc = &event->hw;
560 struct perf_event *evts[MAX_HWEVENTS];
561 unsigned long evtypes[MAX_HWEVENTS];
562 int idx_rubbish_bin[MAX_HWEVENTS];
563 int ev;
564 int n;
565
566 /* We only support a limited range of HARDWARE event types with one
567 * only programmable via a RAW event type.
568 */
569 if (attr->type == PERF_TYPE_HARDWARE) {
570 if (attr->config >= alpha_pmu->max_events)
571 return -EINVAL;
572 ev = alpha_pmu->event_map[attr->config];
573 } else if (attr->type == PERF_TYPE_HW_CACHE) {
574 return -EOPNOTSUPP;
575 } else if (attr->type == PERF_TYPE_RAW) {
576 ev = attr->config & 0xff;
577 } else {
578 return -EOPNOTSUPP;
579 }
580
581 if (ev < 0) {
582 return ev;
583 }
584
585 /* The EV67 does not support mode exclusion */
586 if (attr->exclude_kernel || attr->exclude_user
587 || attr->exclude_hv || attr->exclude_idle) {
588 return -EPERM;
589 }
590
591 /*
592 * We place the event type in event_base here and leave calculation
593 * of the codes to programme the PMU for alpha_pmu_enable() because
594 * it is only then we will know what HW events are actually
595 * scheduled on to the PMU. At that point the code to programme the
596 * PMU is put into config_base and the PMC to use is placed into
597 * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
598 * it is yet to be determined.
599 */
600 hwc->event_base = ev;
601
602 /* Collect events in a group together suitable for calling
603 * alpha_check_constraints() to verify that the group as a whole can
604 * be scheduled on to the PMU.
605 */
606 n = 0;
607 if (event->group_leader != event) {
608 n = collect_events(event->group_leader,
609 alpha_pmu->num_pmcs - 1,
610 evts, evtypes, idx_rubbish_bin);
611 if (n < 0)
612 return -EINVAL;
613 }
614 evtypes[n] = hwc->event_base;
615 evts[n] = event;
616
617 if (alpha_check_constraints(evts, evtypes, n + 1))
618 return -EINVAL;
619
620 /* Indicate that PMU config and idx are yet to be determined. */
621 hwc->config_base = 0;
622 hwc->idx = PMC_NO_INDEX;
623
624 event->destroy = hw_perf_event_destroy;
625
626 /*
627 * Most architectures reserve the PMU for their use at this point.
628 * As there is no existing mechanism to arbitrate usage and there
629 * appears to be no other user of the Alpha PMU we just assume
630 * that we can just use it, hence a NO-OP here.
631 *
632 * Maybe an alpha_reserve_pmu() routine should be implemented but is
633 * anything else ever going to use it?
634 */
635
636 if (!hwc->sample_period) {
637 hwc->sample_period = alpha_pmu->pmc_max_period[0];
638 hwc->last_period = hwc->sample_period;
639 atomic64_set(&hwc->period_left, hwc->sample_period);
640 }
641
642 return 0;
643}
644
645static const struct pmu pmu = {
646 .enable = alpha_pmu_enable,
647 .disable = alpha_pmu_disable,
648 .read = alpha_pmu_read,
649 .unthrottle = alpha_pmu_unthrottle,
650};
651
652
653/*
654 * Main entry point to initialise a HW performance event.
655 */
656const struct pmu *hw_perf_event_init(struct perf_event *event)
657{
658 int err;
659
660 if (!alpha_pmu)
661 return ERR_PTR(-ENODEV);
662
663 /* Do the real initialisation work. */
664 err = __hw_perf_event_init(event);
665
666 if (err)
667 return ERR_PTR(err);
668
669 return &pmu;
670}
671
672
673
674/*
675 * Main entry point - enable HW performance counters.
676 */
677void hw_perf_enable(void)
678{
679 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
680
681 if (cpuc->enabled)
682 return;
683
684 cpuc->enabled = 1;
685 barrier();
686
687 if (cpuc->n_events > 0) {
688 /* Update cpuc with information from any new scheduled events. */
689 maybe_change_configuration(cpuc);
690
691 /* Start counting the desired events. */
692 wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
693 wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
694 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
695 }
696}
697
698
699/*
700 * Main entry point - disable HW performance counters.
701 */
702
703void hw_perf_disable(void)
704{
705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
706
707 if (!cpuc->enabled)
708 return;
709
710 cpuc->enabled = 0;
711 cpuc->n_added = 0;
712
713 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
714}
715
716
717/*
718 * Main entry point - don't know when this is called but it
719 * obviously dumps debug info.
720 */
721void perf_event_print_debug(void)
722{
723 unsigned long flags;
724 unsigned long pcr;
725 int pcr0, pcr1;
726 int cpu;
727
728 if (!supported_cpu())
729 return;
730
731 local_irq_save(flags);
732
733 cpu = smp_processor_id();
734
735 pcr = wrperfmon(PERFMON_CMD_READ, 0);
736 pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
737 pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
738
739 pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
740
741 local_irq_restore(flags);
742}
743
744
745/*
746 * Performance Monitoring Interrupt Service Routine called when a PMC
747 * overflows. The PMC that overflowed is passed in la_ptr.
748 */
749static void alpha_perf_event_irq_handler(unsigned long la_ptr,
750 struct pt_regs *regs)
751{
752 struct cpu_hw_events *cpuc;
753 struct perf_sample_data data;
754 struct perf_event *event;
755 struct hw_perf_event *hwc;
756 int idx, j;
757
758 __get_cpu_var(irq_pmi_count)++;
759 cpuc = &__get_cpu_var(cpu_hw_events);
760
761 /* Completely counting through the PMC's period to trigger a new PMC
762 * overflow interrupt while in this interrupt routine is utterly
763 * disastrous! The EV6 and EV67 counters are sufficiently large to
764 * prevent this but to be really sure disable the PMCs.
765 */
766 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
767
768 /* la_ptr is the counter that overflowed. */
769 if (unlikely(la_ptr >= perf_max_events)) {
770 /* This should never occur! */
771 irq_err_count++;
772 pr_warning("PMI: silly index %ld\n", la_ptr);
773 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
774 return;
775 }
776
777 idx = la_ptr;
778
779 perf_sample_data_init(&data, 0);
780 for (j = 0; j < cpuc->n_events; j++) {
781 if (cpuc->current_idx[j] == idx)
782 break;
783 }
784
785 if (unlikely(j == cpuc->n_events)) {
786 /* This can occur if the event is disabled right on a PMC overflow. */
787 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
788 return;
789 }
790
791 event = cpuc->event[j];
792
793 if (unlikely(!event)) {
794 /* This should never occur! */
795 irq_err_count++;
796 pr_warning("PMI: No event at index %d!\n", idx);
797 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
798 return;
799 }
800
801 hwc = &event->hw;
802 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
803 data.period = event->hw.last_period;
804
805 if (alpha_perf_event_set_period(event, hwc, idx)) {
806 if (perf_event_overflow(event, 1, &data, regs)) {
807 /* Interrupts coming too quickly; "throttle" the
808 * counter, i.e., disable it for a little while.
809 */
810 cpuc->idx_mask &= ~(1UL<<idx);
811 }
812 }
813 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
814
815 return;
816}
817
818
819
820/*
821 * Init call to initialise performance events at kernel startup.
822 */
823void __init init_hw_perf_events(void)
824{
825 pr_info("Performance events: ");
826
827 if (!supported_cpu()) {
828 pr_cont("No support for your CPU.\n");
829 return;
830 }
831
832 pr_cont("Supported CPU type!\n");
833
834 /* Override performance counter IRQ vector */
835
836 perf_irq = alpha_perf_event_irq_handler;
837
838 /* And set up PMU specification */
839 alpha_pmu = &ev67_pmu;
840 perf_max_events = alpha_pmu->num_pmcs;
841}
842
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 1efbed82c0fd..eacceb26d9c8 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -41,6 +41,7 @@
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/bcd.h> 42#include <linux/bcd.h>
43#include <linux/profile.h> 43#include <linux/profile.h>
44#include <linux/perf_event.h>
44 45
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/io.h> 47#include <asm/io.h>
@@ -82,6 +83,26 @@ static struct {
82 83
83unsigned long est_cycle_freq; 84unsigned long est_cycle_freq;
84 85
86#ifdef CONFIG_PERF_EVENTS
87
88DEFINE_PER_CPU(u8, perf_event_pending);
89
90#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
91#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
92#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
93
94void set_perf_event_pending(void)
95{
96 set_perf_event_pending_flag();
97}
98
99#else /* CONFIG_PERF_EVENTS */
100
101#define test_perf_event_pending() 0
102#define clear_perf_event_pending()
103
104#endif /* CONFIG_PERF_EVENTS */
105
85 106
86static inline __u32 rpcc(void) 107static inline __u32 rpcc(void)
87{ 108{
@@ -175,6 +196,11 @@ irqreturn_t timer_interrupt(int irq, void *dev)
175 update_process_times(user_mode(get_irq_regs())); 196 update_process_times(user_mode(get_irq_regs()));
176#endif 197#endif
177 198
199 if (test_perf_event_pending()) {
200 clear_perf_event_pending();
201 perf_event_do_pending();
202 }
203
178 return IRQ_HANDLED; 204 return IRQ_HANDLED;
179} 205}
180 206