diff options
author | Ashok Kumar <ashoks@broadcom.com> | 2016-04-21 08:58:43 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2016-04-25 09:11:06 -0400 |
commit | bf2d4782e7500b6e3e6f606b17b596751bc14013 (patch) | |
tree | a9de4017c5883c337dd5c4675465cbcbfc24a6cd | |
parent | 0893f74545e615eda796c8d443cafee1959f3a73 (diff) |
arm64/perf: Access pmu register using <read/write>_sys_reg
changed pmu register access to make use of <read/write>_sys_reg
from sysreg.h instead of accessing them directly.
Signed-off-by: Ashok Kumar <ashoks@broadcom.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm64/kernel/perf_event.c | 33 |
1 files changed, 16 insertions, 17 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index d5a02bc75667..946ce4badb8e 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <asm/irq_regs.h> | 22 | #include <asm/irq_regs.h> |
23 | #include <asm/perf_event.h> | 23 | #include <asm/perf_event.h> |
24 | #include <asm/sysreg.h> | ||
24 | #include <asm/virt.h> | 25 | #include <asm/virt.h> |
25 | 26 | ||
26 | #include <linux/of.h> | 27 | #include <linux/of.h> |
@@ -476,16 +477,14 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { | |||
476 | 477 | ||
477 | static inline u32 armv8pmu_pmcr_read(void) | 478 | static inline u32 armv8pmu_pmcr_read(void) |
478 | { | 479 | { |
479 | u32 val; | 480 | return read_sysreg(pmcr_el0); |
480 | asm volatile("mrs %0, pmcr_el0" : "=r" (val)); | ||
481 | return val; | ||
482 | } | 481 | } |
483 | 482 | ||
484 | static inline void armv8pmu_pmcr_write(u32 val) | 483 | static inline void armv8pmu_pmcr_write(u32 val) |
485 | { | 484 | { |
486 | val &= ARMV8_PMU_PMCR_MASK; | 485 | val &= ARMV8_PMU_PMCR_MASK; |
487 | isb(); | 486 | isb(); |
488 | asm volatile("msr pmcr_el0, %0" :: "r" (val)); | 487 | write_sysreg(val, pmcr_el0); |
489 | } | 488 | } |
490 | 489 | ||
491 | static inline int armv8pmu_has_overflowed(u32 pmovsr) | 490 | static inline int armv8pmu_has_overflowed(u32 pmovsr) |
@@ -507,7 +506,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) | |||
507 | static inline int armv8pmu_select_counter(int idx) | 506 | static inline int armv8pmu_select_counter(int idx) |
508 | { | 507 | { |
509 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); | 508 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
510 | asm volatile("msr pmselr_el0, %0" :: "r" (counter)); | 509 | write_sysreg(counter, pmselr_el0); |
511 | isb(); | 510 | isb(); |
512 | 511 | ||
513 | return idx; | 512 | return idx; |
@@ -524,9 +523,9 @@ static inline u32 armv8pmu_read_counter(struct perf_event *event) | |||
524 | pr_err("CPU%u reading wrong counter %d\n", | 523 | pr_err("CPU%u reading wrong counter %d\n", |
525 | smp_processor_id(), idx); | 524 | smp_processor_id(), idx); |
526 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) | 525 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) |
527 | asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); | 526 | value = read_sysreg(pmccntr_el0); |
528 | else if (armv8pmu_select_counter(idx) == idx) | 527 | else if (armv8pmu_select_counter(idx) == idx) |
529 | asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); | 528 | value = read_sysreg(pmxevcntr_el0); |
530 | 529 | ||
531 | return value; | 530 | return value; |
532 | } | 531 | } |
@@ -548,47 +547,47 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) | |||
548 | */ | 547 | */ |
549 | u64 value64 = 0xffffffff00000000ULL | value; | 548 | u64 value64 = 0xffffffff00000000ULL | value; |
550 | 549 | ||
551 | asm volatile("msr pmccntr_el0, %0" :: "r" (value64)); | 550 | write_sysreg(value64, pmccntr_el0); |
552 | } else if (armv8pmu_select_counter(idx) == idx) | 551 | } else if (armv8pmu_select_counter(idx) == idx) |
553 | asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); | 552 | write_sysreg(value, pmxevcntr_el0); |
554 | } | 553 | } |
555 | 554 | ||
556 | static inline void armv8pmu_write_evtype(int idx, u32 val) | 555 | static inline void armv8pmu_write_evtype(int idx, u32 val) |
557 | { | 556 | { |
558 | if (armv8pmu_select_counter(idx) == idx) { | 557 | if (armv8pmu_select_counter(idx) == idx) { |
559 | val &= ARMV8_PMU_EVTYPE_MASK; | 558 | val &= ARMV8_PMU_EVTYPE_MASK; |
560 | asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); | 559 | write_sysreg(val, pmxevtyper_el0); |
561 | } | 560 | } |
562 | } | 561 | } |
563 | 562 | ||
564 | static inline int armv8pmu_enable_counter(int idx) | 563 | static inline int armv8pmu_enable_counter(int idx) |
565 | { | 564 | { |
566 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); | 565 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
567 | asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); | 566 | write_sysreg(BIT(counter), pmcntenset_el0); |
568 | return idx; | 567 | return idx; |
569 | } | 568 | } |
570 | 569 | ||
571 | static inline int armv8pmu_disable_counter(int idx) | 570 | static inline int armv8pmu_disable_counter(int idx) |
572 | { | 571 | { |
573 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); | 572 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
574 | asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); | 573 | write_sysreg(BIT(counter), pmcntenclr_el0); |
575 | return idx; | 574 | return idx; |
576 | } | 575 | } |
577 | 576 | ||
578 | static inline int armv8pmu_enable_intens(int idx) | 577 | static inline int armv8pmu_enable_intens(int idx) |
579 | { | 578 | { |
580 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); | 579 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
581 | asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); | 580 | write_sysreg(BIT(counter), pmintenset_el1); |
582 | return idx; | 581 | return idx; |
583 | } | 582 | } |
584 | 583 | ||
585 | static inline int armv8pmu_disable_intens(int idx) | 584 | static inline int armv8pmu_disable_intens(int idx) |
586 | { | 585 | { |
587 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); | 586 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
588 | asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); | 587 | write_sysreg(BIT(counter), pmintenclr_el1); |
589 | isb(); | 588 | isb(); |
590 | /* Clear the overflow flag in case an interrupt is pending. */ | 589 | /* Clear the overflow flag in case an interrupt is pending. */ |
591 | asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); | 590 | write_sysreg(BIT(counter), pmovsclr_el0); |
592 | isb(); | 591 | isb(); |
593 | 592 | ||
594 | return idx; | 593 | return idx; |
@@ -599,11 +598,11 @@ static inline u32 armv8pmu_getreset_flags(void) | |||
599 | u32 value; | 598 | u32 value; |
600 | 599 | ||
601 | /* Read */ | 600 | /* Read */ |
602 | asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); | 601 | value = read_sysreg(pmovsclr_el0); |
603 | 602 | ||
604 | /* Write to clear flags */ | 603 | /* Write to clear flags */ |
605 | value &= ARMV8_PMU_OVSR_MASK; | 604 | value &= ARMV8_PMU_OVSR_MASK; |
606 | asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); | 605 | write_sysreg(value, pmovsclr_el0); |
607 | 606 | ||
608 | return value; | 607 | return value; |
609 | } | 608 | } |