aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHoan Tran <hotran@apm.com>2017-06-22 14:26:04 -0400
committerWill Deacon <will.deacon@arm.com>2017-06-22 15:16:18 -0400
commite35e0a04a9965bcb4fffe4375baaecd408ad2357 (patch)
tree6ccb568a1bebe6955be0538099f32711acbbc655
parent838955e2a3c010aff9089fd705ae2cd5638cdee8 (diff)
perf: xgene: Move PMU leaf functions into function pointer structure
This patch moves PMU leaf functions into a function pointer structure. It helps code maintain and expasion easier. Signed-off-by: Hoan Tran <hotran@apm.com> [Mark: remove redundant cast] Signed-off-by: Mark Rutland <mark.rutland@arm.com> [will: make xgene_pmu_ops static] Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/perf/xgene_pmu.c83
1 files changed, 65 insertions, 18 deletions
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 5ffd58028a62..53e9e31053d6 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -96,6 +96,23 @@ struct xgene_pmu_dev {
96 struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS]; 96 struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
97}; 97};
98 98
99struct xgene_pmu_ops {
100 void (*mask_int)(struct xgene_pmu *pmu);
101 void (*unmask_int)(struct xgene_pmu *pmu);
102 u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
103 void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
104 void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val);
105 void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val);
106 void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val);
107 void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
108 void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
109 void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
110 void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
111 void (*reset_counters)(struct xgene_pmu_dev *pmu_dev);
112 void (*start_counters)(struct xgene_pmu_dev *pmu_dev);
113 void (*stop_counters)(struct xgene_pmu_dev *pmu_dev);
114};
115
99struct xgene_pmu { 116struct xgene_pmu {
100 struct device *dev; 117 struct device *dev;
101 int version; 118 int version;
@@ -104,6 +121,7 @@ struct xgene_pmu {
104 u32 mc_active_mask; 121 u32 mc_active_mask;
105 cpumask_t cpu; 122 cpumask_t cpu;
106 raw_spinlock_t lock; 123 raw_spinlock_t lock;
124 const struct xgene_pmu_ops *ops;
107 struct list_head l3cpmus; 125 struct list_head l3cpmus;
108 struct list_head iobpmus; 126 struct list_head iobpmus;
109 struct list_head mcbpmus; 127 struct list_head mcbpmus;
@@ -392,13 +410,14 @@ static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
392 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); 410 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
393} 411}
394 412
395static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx) 413static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev,
414 int idx)
396{ 415{
397 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); 416 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
398} 417}
399 418
400static inline void 419static inline void
401xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val) 420xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
402{ 421{
403 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); 422 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
404} 423}
@@ -491,20 +510,22 @@ static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
491static void xgene_perf_pmu_enable(struct pmu *pmu) 510static void xgene_perf_pmu_enable(struct pmu *pmu)
492{ 511{
493 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); 512 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
513 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
494 int enabled = bitmap_weight(pmu_dev->cntr_assign_mask, 514 int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
495 pmu_dev->max_counters); 515 pmu_dev->max_counters);
496 516
497 if (!enabled) 517 if (!enabled)
498 return; 518 return;
499 519
500 xgene_pmu_start_counters(pmu_dev); 520 xgene_pmu->ops->start_counters(pmu_dev);
501} 521}
502 522
503static void xgene_perf_pmu_disable(struct pmu *pmu) 523static void xgene_perf_pmu_disable(struct pmu *pmu)
504{ 524{
505 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); 525 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
526 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
506 527
507 xgene_pmu_stop_counters(pmu_dev); 528 xgene_pmu->ops->stop_counters(pmu_dev);
508} 529}
509 530
510static int xgene_perf_event_init(struct perf_event *event) 531static int xgene_perf_event_init(struct perf_event *event)
@@ -572,27 +593,32 @@ static int xgene_perf_event_init(struct perf_event *event)
572static void xgene_perf_enable_event(struct perf_event *event) 593static void xgene_perf_enable_event(struct perf_event *event)
573{ 594{
574 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 595 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
596 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
575 597
576 xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event)); 598 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
577 xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event))); 599 GET_EVENTID(event));
600 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
578 if (pmu_dev->inf->type == PMU_TYPE_IOB) 601 if (pmu_dev->inf->type == PMU_TYPE_IOB)
579 xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event))); 602 xgene_pmu->ops->write_agent1msk(pmu_dev,
603 ~((u32)GET_AGENT1ID(event)));
580 604
581 xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event)); 605 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
582 xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event)); 606 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
583} 607}
584 608
585static void xgene_perf_disable_event(struct perf_event *event) 609static void xgene_perf_disable_event(struct perf_event *event)
586{ 610{
587 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 611 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
612 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
588 613
589 xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event)); 614 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
590 xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event)); 615 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
591} 616}
592 617
593static void xgene_perf_event_set_period(struct perf_event *event) 618static void xgene_perf_event_set_period(struct perf_event *event)
594{ 619{
595 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 620 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
621 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
596 struct hw_perf_event *hw = &event->hw; 622 struct hw_perf_event *hw = &event->hw;
597 /* 623 /*
598 * The X-Gene PMU counters have a period of 2^32. To account for the 624 * The X-Gene PMU counters have a period of 2^32. To account for the
@@ -603,18 +629,19 @@ static void xgene_perf_event_set_period(struct perf_event *event)
603 u64 val = 1ULL << 31; 629 u64 val = 1ULL << 31;
604 630
605 local64_set(&hw->prev_count, val); 631 local64_set(&hw->prev_count, val);
606 xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val); 632 xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val);
607} 633}
608 634
609static void xgene_perf_event_update(struct perf_event *event) 635static void xgene_perf_event_update(struct perf_event *event)
610{ 636{
611 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 637 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
638 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
612 struct hw_perf_event *hw = &event->hw; 639 struct hw_perf_event *hw = &event->hw;
613 u64 delta, prev_raw_count, new_raw_count; 640 u64 delta, prev_raw_count, new_raw_count;
614 641
615again: 642again:
616 prev_raw_count = local64_read(&hw->prev_count); 643 prev_raw_count = local64_read(&hw->prev_count);
617 new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event)); 644 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
618 645
619 if (local64_cmpxchg(&hw->prev_count, prev_raw_count, 646 if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
620 new_raw_count) != prev_raw_count) 647 new_raw_count) != prev_raw_count)
@@ -633,6 +660,7 @@ static void xgene_perf_read(struct perf_event *event)
633static void xgene_perf_start(struct perf_event *event, int flags) 660static void xgene_perf_start(struct perf_event *event, int flags)
634{ 661{
635 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 662 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
663 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
636 struct hw_perf_event *hw = &event->hw; 664 struct hw_perf_event *hw = &event->hw;
637 665
638 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED))) 666 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
@@ -646,8 +674,8 @@ static void xgene_perf_start(struct perf_event *event, int flags)
646 if (flags & PERF_EF_RELOAD) { 674 if (flags & PERF_EF_RELOAD) {
647 u64 prev_raw_count = local64_read(&hw->prev_count); 675 u64 prev_raw_count = local64_read(&hw->prev_count);
648 676
649 xgene_pmu_write_counter(pmu_dev, GET_CNTR(event), 677 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
650 (u32) prev_raw_count); 678 prev_raw_count);
651 } 679 }
652 680
653 xgene_perf_enable_event(event); 681 xgene_perf_enable_event(event);
@@ -736,8 +764,8 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
736 }; 764 };
737 765
738 /* Hardware counter init */ 766 /* Hardware counter init */
739 xgene_pmu_stop_counters(pmu_dev); 767 xgene_pmu->ops->stop_counters(pmu_dev);
740 xgene_pmu_reset_counters(pmu_dev); 768 xgene_pmu->ops->reset_counters(pmu_dev);
741 769
742 return perf_pmu_register(&pmu_dev->pmu, name, -1); 770 return perf_pmu_register(&pmu_dev->pmu, name, -1);
743} 771}
@@ -1255,6 +1283,23 @@ static const struct xgene_pmu_data xgene_pmu_v2_data = {
1255 .id = PCP_PMU_V2, 1283 .id = PCP_PMU_V2,
1256}; 1284};
1257 1285
1286static const struct xgene_pmu_ops xgene_pmu_ops = {
1287 .mask_int = xgene_pmu_mask_int,
1288 .unmask_int = xgene_pmu_unmask_int,
1289 .read_counter = xgene_pmu_read_counter32,
1290 .write_counter = xgene_pmu_write_counter32,
1291 .write_evttype = xgene_pmu_write_evttype,
1292 .write_agentmsk = xgene_pmu_write_agentmsk,
1293 .write_agent1msk = xgene_pmu_write_agent1msk,
1294 .enable_counter = xgene_pmu_enable_counter,
1295 .disable_counter = xgene_pmu_disable_counter,
1296 .enable_counter_int = xgene_pmu_enable_counter_int,
1297 .disable_counter_int = xgene_pmu_disable_counter_int,
1298 .reset_counters = xgene_pmu_reset_counters,
1299 .start_counters = xgene_pmu_start_counters,
1300 .stop_counters = xgene_pmu_stop_counters,
1301};
1302
1258static const struct of_device_id xgene_pmu_of_match[] = { 1303static const struct of_device_id xgene_pmu_of_match[] = {
1259 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data }, 1304 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
1260 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data }, 1305 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
@@ -1304,6 +1349,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
1304 if (version < 0) 1349 if (version < 0)
1305 return -ENODEV; 1350 return -ENODEV;
1306 1351
1352 xgene_pmu->ops = &xgene_pmu_ops;
1353
1307 INIT_LIST_HEAD(&xgene_pmu->l3cpmus); 1354 INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
1308 INIT_LIST_HEAD(&xgene_pmu->iobpmus); 1355 INIT_LIST_HEAD(&xgene_pmu->iobpmus);
1309 INIT_LIST_HEAD(&xgene_pmu->mcbpmus); 1356 INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
@@ -1362,7 +1409,7 @@ static int xgene_pmu_probe(struct platform_device *pdev)
1362 } 1409 }
1363 1410
1364 /* Enable interrupt */ 1411 /* Enable interrupt */
1365 xgene_pmu_unmask_int(xgene_pmu); 1412 xgene_pmu->ops->unmask_int(xgene_pmu);
1366 1413
1367 return 0; 1414 return 0;
1368 1415