aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2011-04-28 11:27:54 -0400
committerWill Deacon <will.deacon@arm.com>2011-08-31 05:50:10 -0400
commit8a16b34e21199eb5fcf2c5050d3bc414fc5d6563 (patch)
tree40a6c739285b7c1f389bf5597d9ae746080c0736 /arch
parente1f431b57ef9e4a68281540933fa74865cbb7a74 (diff)
ARM: perf: add support for multiple PMUs
Currently, a single static instance of struct pmu is used when registering an ARM PMU with the main perf subsystem. This limits the ARM perf code to supporting a single PMU. This patch replaces the static struct pmu instance with a member variable on struct arm_pmu. This provides bidirectional mapping between the two structs, and therefore allows for support of multiple PMUs. The function 'to_arm_pmu' is provided for convenience. PMU-generic functions are also updated to use the new mapping, and PMU-generic initialisation of the member variables is moved into a new function: armpmu_init. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Jamie Iles <jamie@jamieiles.com> Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/perf_event.c63
1 files changed, 41 insertions, 22 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index b13bf23ceba3..7f31eff00b80 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,6 +59,7 @@ struct cpu_hw_events {
59static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 59static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
60 60
61struct arm_pmu { 61struct arm_pmu {
62 struct pmu pmu;
62 enum arm_perf_pmu_ids id; 63 enum arm_perf_pmu_ids id;
63 enum arm_pmu_type type; 64 enum arm_pmu_type type;
64 cpumask_t active_irqs; 65 cpumask_t active_irqs;
@@ -84,6 +85,8 @@ struct arm_pmu {
84 struct cpu_hw_events *(*get_hw_events)(void); 85 struct cpu_hw_events *(*get_hw_events)(void);
85}; 86};
86 87
88#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
89
87/* Set at runtime when we know what CPU type we are. */ 90/* Set at runtime when we know what CPU type we are. */
88static struct arm_pmu *armpmu; 91static struct arm_pmu *armpmu;
89 92
@@ -193,6 +196,7 @@ armpmu_event_set_period(struct perf_event *event,
193 struct hw_perf_event *hwc, 196 struct hw_perf_event *hwc,
194 int idx) 197 int idx)
195{ 198{
199 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
196 s64 left = local64_read(&hwc->period_left); 200 s64 left = local64_read(&hwc->period_left);
197 s64 period = hwc->sample_period; 201 s64 period = hwc->sample_period;
198 int ret = 0; 202 int ret = 0;
@@ -228,6 +232,7 @@ armpmu_event_update(struct perf_event *event,
228 struct hw_perf_event *hwc, 232 struct hw_perf_event *hwc,
229 int idx, int overflow) 233 int idx, int overflow)
230{ 234{
235 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
231 u64 delta, prev_raw_count, new_raw_count; 236 u64 delta, prev_raw_count, new_raw_count;
232 237
233again: 238again:
@@ -267,6 +272,7 @@ armpmu_read(struct perf_event *event)
267static void 272static void
268armpmu_stop(struct perf_event *event, int flags) 273armpmu_stop(struct perf_event *event, int flags)
269{ 274{
275 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
270 struct hw_perf_event *hwc = &event->hw; 276 struct hw_perf_event *hwc = &event->hw;
271 277
272 /* 278 /*
@@ -284,6 +290,7 @@ armpmu_stop(struct perf_event *event, int flags)
284static void 290static void
285armpmu_start(struct perf_event *event, int flags) 291armpmu_start(struct perf_event *event, int flags)
286{ 292{
293 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
287 struct hw_perf_event *hwc = &event->hw; 294 struct hw_perf_event *hwc = &event->hw;
288 295
289 /* 296 /*
@@ -308,6 +315,7 @@ armpmu_start(struct perf_event *event, int flags)
308static void 315static void
309armpmu_del(struct perf_event *event, int flags) 316armpmu_del(struct perf_event *event, int flags)
310{ 317{
318 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
311 struct cpu_hw_events *cpuc = armpmu->get_hw_events(); 319 struct cpu_hw_events *cpuc = armpmu->get_hw_events();
312 struct hw_perf_event *hwc = &event->hw; 320 struct hw_perf_event *hwc = &event->hw;
313 int idx = hwc->idx; 321 int idx = hwc->idx;
@@ -324,6 +332,7 @@ armpmu_del(struct perf_event *event, int flags)
324static int 332static int
325armpmu_add(struct perf_event *event, int flags) 333armpmu_add(struct perf_event *event, int flags)
326{ 334{
335 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
327 struct cpu_hw_events *cpuc = armpmu->get_hw_events(); 336 struct cpu_hw_events *cpuc = armpmu->get_hw_events();
328 struct hw_perf_event *hwc = &event->hw; 337 struct hw_perf_event *hwc = &event->hw;
329 int idx; 338 int idx;
@@ -358,12 +367,11 @@ out:
358 return err; 367 return err;
359} 368}
360 369
361static struct pmu pmu;
362
363static int 370static int
364validate_event(struct cpu_hw_events *cpuc, 371validate_event(struct cpu_hw_events *cpuc,
365 struct perf_event *event) 372 struct perf_event *event)
366{ 373{
374 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
367 struct hw_perf_event fake_event = event->hw; 375 struct hw_perf_event fake_event = event->hw;
368 struct pmu *leader_pmu = event->group_leader->pmu; 376 struct pmu *leader_pmu = event->group_leader->pmu;
369 377
@@ -397,6 +405,7 @@ validate_group(struct perf_event *event)
397 405
398static irqreturn_t armpmu_platform_irq(int irq, void *dev) 406static irqreturn_t armpmu_platform_irq(int irq, void *dev)
399{ 407{
408 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
400 struct platform_device *plat_device = armpmu->plat_device; 409 struct platform_device *plat_device = armpmu->plat_device;
401 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); 410 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
402 411
@@ -404,7 +413,7 @@ static irqreturn_t armpmu_platform_irq(int irq, void *dev)
404} 413}
405 414
406static void 415static void
407armpmu_release_hardware(void) 416armpmu_release_hardware(struct arm_pmu *armpmu)
408{ 417{
409 int i, irq, irqs; 418 int i, irq, irqs;
410 struct platform_device *pmu_device = armpmu->plat_device; 419 struct platform_device *pmu_device = armpmu->plat_device;
@@ -416,14 +425,14 @@ armpmu_release_hardware(void)
416 continue; 425 continue;
417 irq = platform_get_irq(pmu_device, i); 426 irq = platform_get_irq(pmu_device, i);
418 if (irq >= 0) 427 if (irq >= 0)
419 free_irq(irq, NULL); 428 free_irq(irq, armpmu);
420 } 429 }
421 430
422 release_pmu(armpmu->type); 431 release_pmu(armpmu->type);
423} 432}
424 433
425static int 434static int
426armpmu_reserve_hardware(void) 435armpmu_reserve_hardware(struct arm_pmu *armpmu)
427{ 436{
428 struct arm_pmu_platdata *plat; 437 struct arm_pmu_platdata *plat;
429 irq_handler_t handle_irq; 438 irq_handler_t handle_irq;
@@ -467,11 +476,11 @@ armpmu_reserve_hardware(void)
467 476
468 err = request_irq(irq, handle_irq, 477 err = request_irq(irq, handle_irq,
469 IRQF_DISABLED | IRQF_NOBALANCING, 478 IRQF_DISABLED | IRQF_NOBALANCING,
470 "arm-pmu", NULL); 479 "arm-pmu", armpmu);
471 if (err) { 480 if (err) {
472 pr_err("unable to request IRQ%d for ARM PMU counters\n", 481 pr_err("unable to request IRQ%d for ARM PMU counters\n",
473 irq); 482 irq);
474 armpmu_release_hardware(); 483 armpmu_release_hardware(armpmu);
475 return err; 484 return err;
476 } 485 }
477 486
@@ -484,11 +493,12 @@ armpmu_reserve_hardware(void)
484static void 493static void
485hw_perf_event_destroy(struct perf_event *event) 494hw_perf_event_destroy(struct perf_event *event)
486{ 495{
496 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
487 atomic_t *active_events = &armpmu->active_events; 497 atomic_t *active_events = &armpmu->active_events;
488 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; 498 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
489 499
490 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { 500 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
491 armpmu_release_hardware(); 501 armpmu_release_hardware(armpmu);
492 mutex_unlock(pmu_reserve_mutex); 502 mutex_unlock(pmu_reserve_mutex);
493 } 503 }
494} 504}
@@ -503,6 +513,7 @@ event_requires_mode_exclusion(struct perf_event_attr *attr)
503static int 513static int
504__hw_perf_event_init(struct perf_event *event) 514__hw_perf_event_init(struct perf_event *event)
505{ 515{
516 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
506 struct hw_perf_event *hwc = &event->hw; 517 struct hw_perf_event *hwc = &event->hw;
507 int mapping, err; 518 int mapping, err;
508 519
@@ -559,6 +570,7 @@ __hw_perf_event_init(struct perf_event *event)
559 570
560static int armpmu_event_init(struct perf_event *event) 571static int armpmu_event_init(struct perf_event *event)
561{ 572{
573 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
562 int err = 0; 574 int err = 0;
563 atomic_t *active_events = &armpmu->active_events; 575 atomic_t *active_events = &armpmu->active_events;
564 576
@@ -570,7 +582,7 @@ static int armpmu_event_init(struct perf_event *event)
570 if (!atomic_inc_not_zero(active_events)) { 582 if (!atomic_inc_not_zero(active_events)) {
571 mutex_lock(&armpmu->reserve_mutex); 583 mutex_lock(&armpmu->reserve_mutex);
572 if (atomic_read(active_events) == 0) 584 if (atomic_read(active_events) == 0)
573 err = armpmu_reserve_hardware(); 585 err = armpmu_reserve_hardware(armpmu);
574 586
575 if (!err) 587 if (!err)
576 atomic_inc(active_events); 588 atomic_inc(active_events);
@@ -589,6 +601,7 @@ static int armpmu_event_init(struct perf_event *event)
589 601
590static void armpmu_enable(struct pmu *pmu) 602static void armpmu_enable(struct pmu *pmu)
591{ 603{
604 struct arm_pmu *armpmu = to_arm_pmu(pmu);
592 /* Enable all of the perf events on hardware. */ 605 /* Enable all of the perf events on hardware. */
593 int idx, enabled = 0; 606 int idx, enabled = 0;
594 struct cpu_hw_events *cpuc = armpmu->get_hw_events(); 607 struct cpu_hw_events *cpuc = armpmu->get_hw_events();
@@ -609,24 +622,31 @@ static void armpmu_enable(struct pmu *pmu)
609 622
610static void armpmu_disable(struct pmu *pmu) 623static void armpmu_disable(struct pmu *pmu)
611{ 624{
625 struct arm_pmu *armpmu = to_arm_pmu(pmu);
612 armpmu->stop(); 626 armpmu->stop();
613} 627}
614 628
615static struct pmu pmu = {
616 .pmu_enable = armpmu_enable,
617 .pmu_disable = armpmu_disable,
618 .event_init = armpmu_event_init,
619 .add = armpmu_add,
620 .del = armpmu_del,
621 .start = armpmu_start,
622 .stop = armpmu_stop,
623 .read = armpmu_read,
624};
625
626static void __init armpmu_init(struct arm_pmu *armpmu) 629static void __init armpmu_init(struct arm_pmu *armpmu)
627{ 630{
628 atomic_set(&armpmu->active_events, 0); 631 atomic_set(&armpmu->active_events, 0);
629 mutex_init(&armpmu->reserve_mutex); 632 mutex_init(&armpmu->reserve_mutex);
633
634 armpmu->pmu = (struct pmu) {
635 .pmu_enable = armpmu_enable,
636 .pmu_disable = armpmu_disable,
637 .event_init = armpmu_event_init,
638 .add = armpmu_add,
639 .del = armpmu_del,
640 .start = armpmu_start,
641 .stop = armpmu_stop,
642 .read = armpmu_read,
643 };
644}
645
646static int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
647{
648 armpmu_init(armpmu);
649 return perf_pmu_register(&armpmu->pmu, name, type);
630} 650}
631 651
632/* Include the PMU-specific implementations. */ 652/* Include the PMU-specific implementations. */
@@ -751,8 +771,7 @@ init_hw_perf_events(void)
751 pr_info("enabled with %s PMU driver, %d counters available\n", 771 pr_info("enabled with %s PMU driver, %d counters available\n",
752 armpmu->name, armpmu->num_events); 772 armpmu->name, armpmu->num_events);
753 cpu_pmu_init(armpmu); 773 cpu_pmu_init(armpmu);
754 armpmu_init(armpmu); 774 armpmu_register(armpmu, "cpu", PERF_TYPE_RAW);
755 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
756 } else { 775 } else {
757 pr_info("no hardware support available\n"); 776 pr_info("no hardware support available\n");
758 } 777 }