aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2019-04-08 11:49:07 -0400
committerWill Deacon <will.deacon@arm.com>2019-04-30 11:12:54 -0400
commit0ea415390cd345b7d09e8c9ebd4b68adfe873043 (patch)
tree42ee44177c52347a95d17106ce1d89bc87bad7ac
parenta862fc2254bdbcee3b5da4f730984e5d8393a2f1 (diff)
clocksource/arm_arch_timer: Use arch_timer_read_counter to access stable counters
Instead of always going via arch_counter_get_cntvct_stable to access the counter workaround, let's have arch_timer_read_counter point to the right method. For that, we need to track whether any CPU in the system has a workaround for the counter. This is done by having an atomic variable tracking this. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/include/asm/arch_timer.h14
-rw-r--r--arch/arm64/include/asm/arch_timer.h16
-rw-r--r--drivers/clocksource/arm_arch_timer.c48
3 files changed, 70 insertions, 8 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 3f0a0191f763..4b66ecd6be99 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -83,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void)
83 return val; 83 return val;
84} 84}
85 85
86static inline u64 arch_counter_get_cntpct(void) 86static inline u64 __arch_counter_get_cntpct(void)
87{ 87{
88 u64 cval; 88 u64 cval;
89 89
@@ -92,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void)
92 return cval; 92 return cval;
93} 93}
94 94
95static inline u64 arch_counter_get_cntvct(void) 95static inline u64 __arch_counter_get_cntpct_stable(void)
96{
97 return __arch_counter_get_cntpct();
98}
99
100static inline u64 __arch_counter_get_cntvct(void)
96{ 101{
97 u64 cval; 102 u64 cval;
98 103
@@ -101,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void)
101 return cval; 106 return cval;
102} 107}
103 108
109static inline u64 __arch_counter_get_cntvct_stable(void)
110{
111 return __arch_counter_get_cntvct();
112}
113
104static inline u32 arch_timer_get_cntkctl(void) 114static inline u32 arch_timer_get_cntkctl(void)
105{ 115{
106 u32 cntkctl; 116 u32 cntkctl;
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 5502ea049b63..48b2100f4aaa 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -174,18 +174,30 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
174 isb(); 174 isb();
175} 175}
176 176
177static inline u64 arch_counter_get_cntpct(void) 177static inline u64 __arch_counter_get_cntpct_stable(void)
178{ 178{
179 isb(); 179 isb();
180 return arch_timer_reg_read_stable(cntpct_el0); 180 return arch_timer_reg_read_stable(cntpct_el0);
181} 181}
182 182
183static inline u64 arch_counter_get_cntvct(void) 183static inline u64 __arch_counter_get_cntpct(void)
184{
185 isb();
186 return read_sysreg(cntpct_el0);
187}
188
189static inline u64 __arch_counter_get_cntvct_stable(void)
184{ 190{
185 isb(); 191 isb();
186 return arch_timer_reg_read_stable(cntvct_el0); 192 return arch_timer_reg_read_stable(cntvct_el0);
187} 193}
188 194
195static inline u64 __arch_counter_get_cntvct(void)
196{
197 isb();
198 return read_sysreg(cntvct_el0);
199}
200
189static inline int arch_timer_arch_init(void) 201static inline int arch_timer_arch_init(void)
190{ 202{
191 return 0; 203 return 0;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 8f22976247c0..27acc9eb0f7c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -152,6 +152,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
152 return val; 152 return val;
153} 153}
154 154
155static u64 arch_counter_get_cntpct_stable(void)
156{
157 return __arch_counter_get_cntpct_stable();
158}
159
160static u64 arch_counter_get_cntpct(void)
161{
162 return __arch_counter_get_cntpct();
163}
164
165static u64 arch_counter_get_cntvct_stable(void)
166{
167 return __arch_counter_get_cntvct_stable();
168}
169
170static u64 arch_counter_get_cntvct(void)
171{
172 return __arch_counter_get_cntvct();
173}
174
155/* 175/*
156 * Default to cp15 based access because arm64 uses this function for 176 * Default to cp15 based access because arm64 uses this function for
157 * sched_clock() before DT is probed and the cp15 method is guaranteed 177 * sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -365,6 +385,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
365DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); 385DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
366EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); 386EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
367 387
388static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
368 389
369static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, 390static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
370 struct clock_event_device *clk) 391 struct clock_event_device *clk)
@@ -535,6 +556,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
535 per_cpu(timer_unstable_counter_workaround, i) = wa; 556 per_cpu(timer_unstable_counter_workaround, i) = wa;
536 } 557 }
537 558
559 if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
560 atomic_set(&timer_unstable_counter_workaround_in_use, 1);
561
538 /* 562 /*
539 * Don't use the vdso fastpath if errata require using the 563 * Don't use the vdso fastpath if errata require using the
540 * out-of-line counter accessor. We may change our mind pretty 564 * out-of-line counter accessor. We may change our mind pretty
@@ -591,9 +615,15 @@ static bool arch_timer_this_cpu_has_cntvct_wa(void)
591{ 615{
592 return has_erratum_handler(read_cntvct_el0); 616 return has_erratum_handler(read_cntvct_el0);
593} 617}
618
619static bool arch_timer_counter_has_wa(void)
620{
621 return atomic_read(&timer_unstable_counter_workaround_in_use);
622}
594#else 623#else
595#define arch_timer_check_ool_workaround(t,a) do { } while(0) 624#define arch_timer_check_ool_workaround(t,a) do { } while(0)
596#define arch_timer_this_cpu_has_cntvct_wa() ({false;}) 625#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
626#define arch_timer_counter_has_wa() ({false;})
597#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ 627#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
598 628
599static __always_inline irqreturn_t timer_handler(const int access, 629static __always_inline irqreturn_t timer_handler(const int access,
@@ -942,12 +972,22 @@ static void __init arch_counter_register(unsigned type)
942 972
943 /* Register the CP15 based counter if we have one */ 973 /* Register the CP15 based counter if we have one */
944 if (type & ARCH_TIMER_TYPE_CP15) { 974 if (type & ARCH_TIMER_TYPE_CP15) {
975 u64 (*rd)(void);
976
945 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || 977 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
946 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) 978 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
947 arch_timer_read_counter = arch_counter_get_cntvct; 979 if (arch_timer_counter_has_wa())
948 else 980 rd = arch_counter_get_cntvct_stable;
949 arch_timer_read_counter = arch_counter_get_cntpct; 981 else
982 rd = arch_counter_get_cntvct;
983 } else {
984 if (arch_timer_counter_has_wa())
985 rd = arch_counter_get_cntpct_stable;
986 else
987 rd = arch_counter_get_cntpct;
988 }
950 989
990 arch_timer_read_counter = rd;
951 clocksource_counter.archdata.vdso_direct = vdso_default; 991 clocksource_counter.archdata.vdso_direct = vdso_default;
952 } else { 992 } else {
953 arch_timer_read_counter = arch_counter_get_cntvct_mem; 993 arch_timer_read_counter = arch_counter_get_cntvct_mem;