diff options
Diffstat (limited to 'drivers/bus')
-rw-r--r-- | drivers/bus/arm-ccn.c | 273 |
1 files changed, 216 insertions, 57 deletions
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index aaa0f2a87118..df5f307f3244 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c | |||
@@ -166,13 +166,17 @@ struct arm_ccn_dt { | |||
166 | 166 | ||
167 | struct hrtimer hrtimer; | 167 | struct hrtimer hrtimer; |
168 | 168 | ||
169 | cpumask_t cpu; | ||
170 | struct notifier_block cpu_nb; | ||
171 | |||
169 | struct pmu pmu; | 172 | struct pmu pmu; |
170 | }; | 173 | }; |
171 | 174 | ||
172 | struct arm_ccn { | 175 | struct arm_ccn { |
173 | struct device *dev; | 176 | struct device *dev; |
174 | void __iomem *base; | 177 | void __iomem *base; |
175 | unsigned irq_used:1; | 178 | unsigned int irq; |
179 | |||
176 | unsigned sbas_present:1; | 180 | unsigned sbas_present:1; |
177 | unsigned sbsx_present:1; | 181 | unsigned sbsx_present:1; |
178 | 182 | ||
@@ -212,7 +216,7 @@ static int arm_ccn_node_to_xp_port(int node) | |||
212 | 216 | ||
213 | static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) | 217 | static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) |
214 | { | 218 | { |
215 | *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24)); | 219 | *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24)); |
216 | *config |= (node_xp << 0) | (type << 8) | (port << 24); | 220 | *config |= (node_xp << 0) | (type << 8) | (port << 24); |
217 | } | 221 | } |
218 | 222 | ||
@@ -336,6 +340,23 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, | |||
336 | if (event->mask) | 340 | if (event->mask) |
337 | res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", | 341 | res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", |
338 | event->mask); | 342 | event->mask); |
343 | |||
344 | /* Arguments required by an event */ | ||
345 | switch (event->type) { | ||
346 | case CCN_TYPE_CYCLES: | ||
347 | break; | ||
348 | case CCN_TYPE_XP: | ||
349 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
350 | ",xp=?,port=?,vc=?,dir=?"); | ||
351 | if (event->event == CCN_EVENT_WATCHPOINT) | ||
352 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
353 | ",cmp_l=?,cmp_h=?,mask=?"); | ||
354 | break; | ||
355 | default: | ||
356 | res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); | ||
357 | break; | ||
358 | } | ||
359 | |||
339 | res += snprintf(buf + res, PAGE_SIZE - res, "\n"); | 360 | res += snprintf(buf + res, PAGE_SIZE - res, "\n"); |
340 | 361 | ||
341 | return res; | 362 | return res; |
@@ -521,6 +542,25 @@ static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { | |||
521 | .attrs = arm_ccn_pmu_cmp_mask_attrs, | 542 | .attrs = arm_ccn_pmu_cmp_mask_attrs, |
522 | }; | 543 | }; |
523 | 544 | ||
545 | static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, | ||
546 | struct device_attribute *attr, char *buf) | ||
547 | { | ||
548 | struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); | ||
549 | |||
550 | return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); | ||
551 | } | ||
552 | |||
553 | static struct device_attribute arm_ccn_pmu_cpumask_attr = | ||
554 | __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL); | ||
555 | |||
556 | static struct attribute *arm_ccn_pmu_cpumask_attrs[] = { | ||
557 | &arm_ccn_pmu_cpumask_attr.attr, | ||
558 | NULL, | ||
559 | }; | ||
560 | |||
561 | static struct attribute_group arm_ccn_pmu_cpumask_attr_group = { | ||
562 | .attrs = arm_ccn_pmu_cpumask_attrs, | ||
563 | }; | ||
524 | 564 | ||
525 | /* | 565 | /* |
526 | * Default poll period is 10ms, which is way over the top anyway, | 566 | * Default poll period is 10ms, which is way over the top anyway, |
@@ -542,6 +582,7 @@ static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { | |||
542 | &arm_ccn_pmu_events_attr_group, | 582 | &arm_ccn_pmu_events_attr_group, |
543 | &arm_ccn_pmu_format_attr_group, | 583 | &arm_ccn_pmu_format_attr_group, |
544 | &arm_ccn_pmu_cmp_mask_attr_group, | 584 | &arm_ccn_pmu_cmp_mask_attr_group, |
585 | &arm_ccn_pmu_cpumask_attr_group, | ||
545 | NULL | 586 | NULL |
546 | }; | 587 | }; |
547 | 588 | ||
@@ -587,7 +628,65 @@ static int arm_ccn_pmu_type_eq(u32 a, u32 b) | |||
587 | return 0; | 628 | return 0; |
588 | } | 629 | } |
589 | 630 | ||
590 | static void arm_ccn_pmu_event_destroy(struct perf_event *event) | 631 | static int arm_ccn_pmu_event_alloc(struct perf_event *event) |
632 | { | ||
633 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
634 | struct hw_perf_event *hw = &event->hw; | ||
635 | u32 node_xp, type, event_id; | ||
636 | struct arm_ccn_component *source; | ||
637 | int bit; | ||
638 | |||
639 | node_xp = CCN_CONFIG_NODE(event->attr.config); | ||
640 | type = CCN_CONFIG_TYPE(event->attr.config); | ||
641 | event_id = CCN_CONFIG_EVENT(event->attr.config); | ||
642 | |||
643 | /* Allocate the cycle counter */ | ||
644 | if (type == CCN_TYPE_CYCLES) { | ||
645 | if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, | ||
646 | ccn->dt.pmu_counters_mask)) | ||
647 | return -EAGAIN; | ||
648 | |||
649 | hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; | ||
650 | ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /* Allocate an event counter */ | ||
656 | hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, | ||
657 | CCN_NUM_PMU_EVENT_COUNTERS); | ||
658 | if (hw->idx < 0) { | ||
659 | dev_dbg(ccn->dev, "No more counters available!\n"); | ||
660 | return -EAGAIN; | ||
661 | } | ||
662 | |||
663 | if (type == CCN_TYPE_XP) | ||
664 | source = &ccn->xp[node_xp]; | ||
665 | else | ||
666 | source = &ccn->node[node_xp]; | ||
667 | ccn->dt.pmu_counters[hw->idx].source = source; | ||
668 | |||
669 | /* Allocate an event source or a watchpoint */ | ||
670 | if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) | ||
671 | bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, | ||
672 | CCN_NUM_XP_WATCHPOINTS); | ||
673 | else | ||
674 | bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, | ||
675 | CCN_NUM_PMU_EVENTS); | ||
676 | if (bit < 0) { | ||
677 | dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", | ||
678 | node_xp); | ||
679 | clear_bit(hw->idx, ccn->dt.pmu_counters_mask); | ||
680 | return -EAGAIN; | ||
681 | } | ||
682 | hw->config_base = bit; | ||
683 | |||
684 | ccn->dt.pmu_counters[hw->idx].event = event; | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static void arm_ccn_pmu_event_release(struct perf_event *event) | ||
591 | { | 690 | { |
592 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | 691 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); |
593 | struct hw_perf_event *hw = &event->hw; | 692 | struct hw_perf_event *hw = &event->hw; |
@@ -616,15 +715,14 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
616 | struct arm_ccn *ccn; | 715 | struct arm_ccn *ccn; |
617 | struct hw_perf_event *hw = &event->hw; | 716 | struct hw_perf_event *hw = &event->hw; |
618 | u32 node_xp, type, event_id; | 717 | u32 node_xp, type, event_id; |
619 | int valid, bit; | 718 | int valid; |
620 | struct arm_ccn_component *source; | ||
621 | int i; | 719 | int i; |
720 | struct perf_event *sibling; | ||
622 | 721 | ||
623 | if (event->attr.type != event->pmu->type) | 722 | if (event->attr.type != event->pmu->type) |
624 | return -ENOENT; | 723 | return -ENOENT; |
625 | 724 | ||
626 | ccn = pmu_to_arm_ccn(event->pmu); | 725 | ccn = pmu_to_arm_ccn(event->pmu); |
627 | event->destroy = arm_ccn_pmu_event_destroy; | ||
628 | 726 | ||
629 | if (hw->sample_period) { | 727 | if (hw->sample_period) { |
630 | dev_warn(ccn->dev, "Sampling not supported!\n"); | 728 | dev_warn(ccn->dev, "Sampling not supported!\n"); |
@@ -642,6 +740,16 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
642 | dev_warn(ccn->dev, "Can't provide per-task data!\n"); | 740 | dev_warn(ccn->dev, "Can't provide per-task data!\n"); |
643 | return -EOPNOTSUPP; | 741 | return -EOPNOTSUPP; |
644 | } | 742 | } |
743 | /* | ||
744 | * Many perf core operations (eg. events rotation) operate on a | ||
745 | * single CPU context. This is obvious for CPU PMUs, where one | ||
746 | * expects the same sets of events being observed on all CPUs, | ||
747 | * but can lead to issues for off-core PMUs, like CCN, where each | ||
748 | * event could be theoretically assigned to a different CPU. To | ||
749 | * mitigate this, we enforce CPU assignment to one, selected | ||
750 | * processor (the one described in the "cpumask" attribute). | ||
751 | */ | ||
752 | event->cpu = cpumask_first(&ccn->dt.cpu); | ||
645 | 753 | ||
646 | node_xp = CCN_CONFIG_NODE(event->attr.config); | 754 | node_xp = CCN_CONFIG_NODE(event->attr.config); |
647 | type = CCN_CONFIG_TYPE(event->attr.config); | 755 | type = CCN_CONFIG_TYPE(event->attr.config); |
@@ -711,48 +819,20 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
711 | node_xp, type, port); | 819 | node_xp, type, port); |
712 | } | 820 | } |
713 | 821 | ||
714 | /* Allocate the cycle counter */ | 822 | /* |
715 | if (type == CCN_TYPE_CYCLES) { | 823 | * We must NOT create groups containing mixed PMUs, although software |
716 | if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, | 824 | * events are acceptable (for example to create a CCN group |
717 | ccn->dt.pmu_counters_mask)) | 825 | * periodically read when a hrtimer aka cpu-clock leader triggers). |
718 | return -EAGAIN; | 826 | */ |
719 | 827 | if (event->group_leader->pmu != event->pmu && | |
720 | hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; | 828 | !is_software_event(event->group_leader)) |
721 | ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; | 829 | return -EINVAL; |
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | /* Allocate an event counter */ | ||
727 | hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, | ||
728 | CCN_NUM_PMU_EVENT_COUNTERS); | ||
729 | if (hw->idx < 0) { | ||
730 | dev_warn(ccn->dev, "No more counters available!\n"); | ||
731 | return -EAGAIN; | ||
732 | } | ||
733 | |||
734 | if (type == CCN_TYPE_XP) | ||
735 | source = &ccn->xp[node_xp]; | ||
736 | else | ||
737 | source = &ccn->node[node_xp]; | ||
738 | ccn->dt.pmu_counters[hw->idx].source = source; | ||
739 | |||
740 | /* Allocate an event source or a watchpoint */ | ||
741 | if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) | ||
742 | bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, | ||
743 | CCN_NUM_XP_WATCHPOINTS); | ||
744 | else | ||
745 | bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, | ||
746 | CCN_NUM_PMU_EVENTS); | ||
747 | if (bit < 0) { | ||
748 | dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", | ||
749 | node_xp); | ||
750 | clear_bit(hw->idx, ccn->dt.pmu_counters_mask); | ||
751 | return -EAGAIN; | ||
752 | } | ||
753 | hw->config_base = bit; | ||
754 | 830 | ||
755 | ccn->dt.pmu_counters[hw->idx].event = event; | 831 | list_for_each_entry(sibling, &event->group_leader->sibling_list, |
832 | group_entry) | ||
833 | if (sibling->pmu != event->pmu && | ||
834 | !is_software_event(sibling)) | ||
835 | return -EINVAL; | ||
756 | 836 | ||
757 | return 0; | 837 | return 0; |
758 | } | 838 | } |
@@ -835,9 +915,15 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) | |||
835 | arm_ccn_pmu_read_counter(ccn, hw->idx)); | 915 | arm_ccn_pmu_read_counter(ccn, hw->idx)); |
836 | hw->state = 0; | 916 | hw->state = 0; |
837 | 917 | ||
838 | if (!ccn->irq_used) | 918 | /* |
839 | hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), | 919 | * Pin the timer, so that the overflows are handled by the chosen |
840 | HRTIMER_MODE_REL); | 920 | * event->cpu (this is the same one as presented in "cpumask" |
921 | * attribute). | ||
922 | */ | ||
923 | if (!ccn->irq) | ||
924 | __hrtimer_start_range_ns(&ccn->dt.hrtimer, | ||
925 | arm_ccn_pmu_timer_period(), 0, | ||
926 | HRTIMER_MODE_REL_PINNED, 0); | ||
841 | 927 | ||
842 | /* Set the DT bus input, engaging the counter */ | 928 | /* Set the DT bus input, engaging the counter */ |
843 | arm_ccn_pmu_xp_dt_config(event, 1); | 929 | arm_ccn_pmu_xp_dt_config(event, 1); |
@@ -852,7 +938,7 @@ static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) | |||
852 | /* Disable counting, setting the DT bus to pass-through mode */ | 938 | /* Disable counting, setting the DT bus to pass-through mode */ |
853 | arm_ccn_pmu_xp_dt_config(event, 0); | 939 | arm_ccn_pmu_xp_dt_config(event, 0); |
854 | 940 | ||
855 | if (!ccn->irq_used) | 941 | if (!ccn->irq) |
856 | hrtimer_cancel(&ccn->dt.hrtimer); | 942 | hrtimer_cancel(&ccn->dt.hrtimer); |
857 | 943 | ||
858 | /* Let the DT bus drain */ | 944 | /* Let the DT bus drain */ |
@@ -1014,8 +1100,13 @@ static void arm_ccn_pmu_event_config(struct perf_event *event) | |||
1014 | 1100 | ||
1015 | static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | 1101 | static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) |
1016 | { | 1102 | { |
1103 | int err; | ||
1017 | struct hw_perf_event *hw = &event->hw; | 1104 | struct hw_perf_event *hw = &event->hw; |
1018 | 1105 | ||
1106 | err = arm_ccn_pmu_event_alloc(event); | ||
1107 | if (err) | ||
1108 | return err; | ||
1109 | |||
1019 | arm_ccn_pmu_event_config(event); | 1110 | arm_ccn_pmu_event_config(event); |
1020 | 1111 | ||
1021 | hw->state = PERF_HES_STOPPED; | 1112 | hw->state = PERF_HES_STOPPED; |
@@ -1029,6 +1120,8 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | |||
1029 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) | 1120 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) |
1030 | { | 1121 | { |
1031 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); | 1122 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); |
1123 | |||
1124 | arm_ccn_pmu_event_release(event); | ||
1032 | } | 1125 | } |
1033 | 1126 | ||
1034 | static void arm_ccn_pmu_event_read(struct perf_event *event) | 1127 | static void arm_ccn_pmu_event_read(struct perf_event *event) |
@@ -1079,12 +1172,39 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) | |||
1079 | } | 1172 | } |
1080 | 1173 | ||
1081 | 1174 | ||
1175 | static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, | ||
1176 | unsigned long action, void *hcpu) | ||
1177 | { | ||
1178 | struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); | ||
1179 | struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); | ||
1180 | unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */ | ||
1181 | unsigned int target; | ||
1182 | |||
1183 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1184 | case CPU_DOWN_PREPARE: | ||
1185 | if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) | ||
1186 | break; | ||
1187 | target = cpumask_any_but(cpu_online_mask, cpu); | ||
1188 | if (target < 0) | ||
1189 | break; | ||
1190 | perf_pmu_migrate_context(&dt->pmu, cpu, target); | ||
1191 | cpumask_set_cpu(target, &dt->cpu); | ||
1192 | WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); | ||
1193 | default: | ||
1194 | break; | ||
1195 | } | ||
1196 | |||
1197 | return NOTIFY_OK; | ||
1198 | } | ||
1199 | |||
1200 | |||
1082 | static DEFINE_IDA(arm_ccn_pmu_ida); | 1201 | static DEFINE_IDA(arm_ccn_pmu_ida); |
1083 | 1202 | ||
1084 | static int arm_ccn_pmu_init(struct arm_ccn *ccn) | 1203 | static int arm_ccn_pmu_init(struct arm_ccn *ccn) |
1085 | { | 1204 | { |
1086 | int i; | 1205 | int i; |
1087 | char *name; | 1206 | char *name; |
1207 | int err; | ||
1088 | 1208 | ||
1089 | /* Initialize DT subsystem */ | 1209 | /* Initialize DT subsystem */ |
1090 | ccn->dt.base = ccn->base + CCN_REGION_SIZE; | 1210 | ccn->dt.base = ccn->base + CCN_REGION_SIZE; |
@@ -1136,20 +1256,58 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) | |||
1136 | }; | 1256 | }; |
1137 | 1257 | ||
1138 | /* No overflow interrupt? Have to use a timer instead. */ | 1258 | /* No overflow interrupt? Have to use a timer instead. */ |
1139 | if (!ccn->irq_used) { | 1259 | if (!ccn->irq) { |
1140 | dev_info(ccn->dev, "No access to interrupts, using timer.\n"); | 1260 | dev_info(ccn->dev, "No access to interrupts, using timer.\n"); |
1141 | hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, | 1261 | hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, |
1142 | HRTIMER_MODE_REL); | 1262 | HRTIMER_MODE_REL); |
1143 | ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; | 1263 | ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; |
1144 | } | 1264 | } |
1145 | 1265 | ||
1146 | return perf_pmu_register(&ccn->dt.pmu, name, -1); | 1266 | /* Pick one CPU which we will use to collect data from CCN... */ |
1267 | cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); | ||
1268 | |||
1269 | /* | ||
1270 | * ... and change the selection when it goes offline. Priority is | ||
1271 | * picked to have a chance to migrate events before perf is notified. | ||
1272 | */ | ||
1273 | ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier; | ||
1274 | ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1, | ||
1275 | err = register_cpu_notifier(&ccn->dt.cpu_nb); | ||
1276 | if (err) | ||
1277 | goto error_cpu_notifier; | ||
1278 | |||
1279 | /* Also make sure that the overflow interrupt is handled by this CPU */ | ||
1280 | if (ccn->irq) { | ||
1281 | err = irq_set_affinity(ccn->irq, &ccn->dt.cpu); | ||
1282 | if (err) { | ||
1283 | dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); | ||
1284 | goto error_set_affinity; | ||
1285 | } | ||
1286 | } | ||
1287 | |||
1288 | err = perf_pmu_register(&ccn->dt.pmu, name, -1); | ||
1289 | if (err) | ||
1290 | goto error_pmu_register; | ||
1291 | |||
1292 | return 0; | ||
1293 | |||
1294 | error_pmu_register: | ||
1295 | error_set_affinity: | ||
1296 | unregister_cpu_notifier(&ccn->dt.cpu_nb); | ||
1297 | error_cpu_notifier: | ||
1298 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); | ||
1299 | for (i = 0; i < ccn->num_xps; i++) | ||
1300 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); | ||
1301 | writel(0, ccn->dt.base + CCN_DT_PMCR); | ||
1302 | return err; | ||
1147 | } | 1303 | } |
1148 | 1304 | ||
1149 | static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) | 1305 | static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) |
1150 | { | 1306 | { |
1151 | int i; | 1307 | int i; |
1152 | 1308 | ||
1309 | irq_set_affinity(ccn->irq, cpu_possible_mask); | ||
1310 | unregister_cpu_notifier(&ccn->dt.cpu_nb); | ||
1153 | for (i = 0; i < ccn->num_xps; i++) | 1311 | for (i = 0; i < ccn->num_xps; i++) |
1154 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); | 1312 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); |
1155 | writel(0, ccn->dt.base + CCN_DT_PMCR); | 1313 | writel(0, ccn->dt.base + CCN_DT_PMCR); |
@@ -1285,6 +1443,7 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1285 | { | 1443 | { |
1286 | struct arm_ccn *ccn; | 1444 | struct arm_ccn *ccn; |
1287 | struct resource *res; | 1445 | struct resource *res; |
1446 | unsigned int irq; | ||
1288 | int err; | 1447 | int err; |
1289 | 1448 | ||
1290 | ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); | 1449 | ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); |
@@ -1309,6 +1468,7 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1309 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1468 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1310 | if (!res) | 1469 | if (!res) |
1311 | return -EINVAL; | 1470 | return -EINVAL; |
1471 | irq = res->start; | ||
1312 | 1472 | ||
1313 | /* Check if we can use the interrupt */ | 1473 | /* Check if we can use the interrupt */ |
1314 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, | 1474 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, |
@@ -1318,13 +1478,12 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1318 | /* Can set 'disable' bits, so can acknowledge interrupts */ | 1478 | /* Can set 'disable' bits, so can acknowledge interrupts */ |
1319 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, | 1479 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, |
1320 | ccn->base + CCN_MN_ERRINT_STATUS); | 1480 | ccn->base + CCN_MN_ERRINT_STATUS); |
1321 | err = devm_request_irq(ccn->dev, res->start, | 1481 | err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0, |
1322 | arm_ccn_irq_handler, 0, dev_name(ccn->dev), | 1482 | dev_name(ccn->dev), ccn); |
1323 | ccn); | ||
1324 | if (err) | 1483 | if (err) |
1325 | return err; | 1484 | return err; |
1326 | 1485 | ||
1327 | ccn->irq_used = 1; | 1486 | ccn->irq = irq; |
1328 | } | 1487 | } |
1329 | 1488 | ||
1330 | 1489 | ||