aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-03 11:58:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-03 11:58:35 -0400
commit7af8a0f8088831428051976cb06cc1e450f8bab5 (patch)
tree45289becbd10b1e421dc4b5a99ce6d7d90964c28 /drivers/perf
parentc8d2bc9bc39ebea8437fd974fdbc21847bb897a3 (diff)
parentdb68f3e7594aca77632d56c449bd36c6c931d59a (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "It's a bit all over the place this time with no "killer feature" to speak of. Support for mismatched cache line sizes should help people seeing whacky JIT failures on some SoCs, and the big.LITTLE perf updates have been a long time coming, but a lot of the changes here are cleanups. We stray outside arch/arm64 in a few areas: the arch/arm/ arch_timer workaround is acked by Russell, the DT/OF bits are acked by Rob, the arch_timer clocksource changes acked by Marc, CPU hotplug by tglx and jump_label by Peter (all CC'd). Summary: - Support for execute-only page permissions - Support for hibernate and DEBUG_PAGEALLOC - Support for heterogeneous systems with mismatches cache line sizes - Errata workarounds (A53 843419 update and QorIQ A-008585 timer bug) - arm64 PMU perf updates, including cpumasks for heterogeneous systems - Set UTS_MACHINE for building rpm packages - Yet another head.S tidy-up - Some cleanups and refactoring, particularly in the NUMA code - Lots of random, non-critical fixes across the board" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (100 commits) arm64: tlbflush.h: add __tlbi() macro arm64: Kconfig: remove SMP dependence for NUMA arm64: Kconfig: select OF/ACPI_NUMA under NUMA config arm64: fix dump_backtrace/unwind_frame with NULL tsk arm/arm64: arch_timer: Use archdata to indicate vdso suitability arm64: arch_timer: Work around QorIQ Erratum A-008585 arm64: arch_timer: Add device tree binding for A-008585 erratum arm64: Correctly bounds check virt_addr_valid arm64: migrate exception table users off module.h and onto extable.h arm64: pmu: Hoist pmu platform device name arm64: pmu: Probe default hw/cache counters arm64: pmu: add fallback probe table MAINTAINERS: Update ARM PMU PROFILING AND DEBUGGING entry arm64: Improve kprobes test for atomic sequence arm64/kvm: use alternative auto-nop arm64: use alternative auto-nop arm64: alternative: add auto-nop infrastructure arm64: lse: convert lse alternatives NOP padding to use __nops arm64: barriers: introduce nops and __nops macros for NOP sequences arm64: sysreg: replace open-coded mrs_s/msr_s with {read,write}_sysreg_s ...
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/arm_pmu.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f5e1008a223d..30370817bf13 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -534,6 +534,24 @@ static int armpmu_filter_match(struct perf_event *event)
534 return cpumask_test_cpu(cpu, &armpmu->supported_cpus); 534 return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
535} 535}
536 536
537static ssize_t armpmu_cpumask_show(struct device *dev,
538 struct device_attribute *attr, char *buf)
539{
540 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
541 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
542}
543
544static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
545
546static struct attribute *armpmu_common_attrs[] = {
547 &dev_attr_cpus.attr,
548 NULL,
549};
550
551static struct attribute_group armpmu_common_attr_group = {
552 .attrs = armpmu_common_attrs,
553};
554
537static void armpmu_init(struct arm_pmu *armpmu) 555static void armpmu_init(struct arm_pmu *armpmu)
538{ 556{
539 atomic_set(&armpmu->active_events, 0); 557 atomic_set(&armpmu->active_events, 0);
@@ -549,7 +567,10 @@ static void armpmu_init(struct arm_pmu *armpmu)
549 .stop = armpmu_stop, 567 .stop = armpmu_stop,
550 .read = armpmu_read, 568 .read = armpmu_read,
551 .filter_match = armpmu_filter_match, 569 .filter_match = armpmu_filter_match,
570 .attr_groups = armpmu->attr_groups,
552 }; 571 };
572 armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
573 &armpmu_common_attr_group;
553} 574}
554 575
555/* Set at runtime when we know what CPU type we are. */ 576/* Set at runtime when we know what CPU type we are. */
@@ -602,7 +623,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
602 irqs = min(pmu_device->num_resources, num_possible_cpus()); 623 irqs = min(pmu_device->num_resources, num_possible_cpus());
603 624
604 irq = platform_get_irq(pmu_device, 0); 625 irq = platform_get_irq(pmu_device, 0);
605 if (irq >= 0 && irq_is_percpu(irq)) { 626 if (irq > 0 && irq_is_percpu(irq)) {
606 on_each_cpu_mask(&cpu_pmu->supported_cpus, 627 on_each_cpu_mask(&cpu_pmu->supported_cpus,
607 cpu_pmu_disable_percpu_irq, &irq, 1); 628 cpu_pmu_disable_percpu_irq, &irq, 1);
608 free_percpu_irq(irq, &hw_events->percpu_pmu); 629 free_percpu_irq(irq, &hw_events->percpu_pmu);
@@ -616,7 +637,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
616 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) 637 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
617 continue; 638 continue;
618 irq = platform_get_irq(pmu_device, i); 639 irq = platform_get_irq(pmu_device, i);
619 if (irq >= 0) 640 if (irq > 0)
620 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 641 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
621 } 642 }
622 } 643 }
@@ -638,7 +659,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
638 } 659 }
639 660
640 irq = platform_get_irq(pmu_device, 0); 661 irq = platform_get_irq(pmu_device, 0);
641 if (irq >= 0 && irq_is_percpu(irq)) { 662 if (irq > 0 && irq_is_percpu(irq)) {
642 err = request_percpu_irq(irq, handler, "arm-pmu", 663 err = request_percpu_irq(irq, handler, "arm-pmu",
643 &hw_events->percpu_pmu); 664 &hw_events->percpu_pmu);
644 if (err) { 665 if (err) {
@@ -919,7 +940,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
919 940
920 /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ 941 /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
921 irq = platform_get_irq(pdev, i); 942 irq = platform_get_irq(pdev, i);
922 if (irq >= 0) { 943 if (irq > 0) {
923 bool spi = !irq_is_percpu(irq); 944 bool spi = !irq_is_percpu(irq);
924 945
925 if (i > 0 && spi != using_spi) { 946 if (i > 0 && spi != using_spi) {
@@ -970,7 +991,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
970 if (cpumask_weight(&pmu->supported_cpus) == 0) { 991 if (cpumask_weight(&pmu->supported_cpus) == 0) {
971 int irq = platform_get_irq(pdev, 0); 992 int irq = platform_get_irq(pdev, 0);
972 993
973 if (irq >= 0 && irq_is_percpu(irq)) { 994 if (irq > 0 && irq_is_percpu(irq)) {
974 /* If using PPIs, check the affinity of the partition */ 995 /* If using PPIs, check the affinity of the partition */
975 int ret; 996 int ret;
976 997
@@ -1029,7 +1050,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
1029 ret = of_pmu_irq_cfg(pmu); 1050 ret = of_pmu_irq_cfg(pmu);
1030 if (!ret) 1051 if (!ret)
1031 ret = init_fn(pmu); 1052 ret = init_fn(pmu);
1032 } else { 1053 } else if (probe_table) {
1033 cpumask_setall(&pmu->supported_cpus); 1054 cpumask_setall(&pmu->supported_cpus);
1034 ret = probe_current_pmu(pmu, probe_table); 1055 ret = probe_current_pmu(pmu, probe_table);
1035 } 1056 }
@@ -1039,6 +1060,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
1039 goto out_free; 1060 goto out_free;
1040 } 1061 }
1041 1062
1063
1042 ret = cpu_pmu_init(pmu); 1064 ret = cpu_pmu_init(pmu);
1043 if (ret) 1065 if (ret)
1044 goto out_free; 1066 goto out_free;