aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-02-06 04:39:13 -0500
committerVineet Gupta <vgupta@synopsys.com>2013-02-15 12:46:20 -0500
commit1e266629933bb3e40ac7db128f3b661f5bab56c1 (patch)
tree9167653a32f9402e5941e5afc24e3eb3019425c5 /arch/arc
parentd626f547dd0457ab36f6151673fcc78fc3c63eaa (diff)
ARC: 64bit RTSC timestamp hardware issue
The 64bit RTSC is not reliable, causing spurious "jumps" in higher word, making Linux timekeeping go bonkers. So as of now just use the lower 32bit timestamp. A cleaner approach would have been removing RTSC support altogether as the 32bit RTSC is equivalent to old TIMER1 based solution, but some customers can use the 32bit RTSC in SMP syn fashion (vs. TIMER1 which being incore can't be done easily). A fallout of this is sched_clock()'s hardware assisted version needs to go away since it can't use 32bit wrapping counter - instead we use the generic "weak" jiffies based version. Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/kernel/time.c38
1 files changed, 2 insertions, 36 deletions
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0ce0e6f76eb0..f13f72807aa5 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -76,7 +76,7 @@ static cycle_t arc_counter_read(struct clocksource *cs)
76 __asm__ __volatile( 76 __asm__ __volatile(
77 " .extCoreRegister tsch, 58, r, cannot_shortcut \n" 77 " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
78 " rtsc %0, 0 \n" 78 " rtsc %0, 0 \n"
79 " mov %1, tsch \n" /* TSCH is extn core reg 58 */ 79 " mov %1, 0 \n"
80 : "=r" (stamp.low), "=r" (stamp.high)); 80 : "=r" (stamp.low), "=r" (stamp.high));
81 81
82 arch_local_irq_restore(flags); 82 arch_local_irq_restore(flags);
@@ -88,7 +88,7 @@ static struct clocksource arc_counter = {
88 .name = "ARC RTSC", 88 .name = "ARC RTSC",
89 .rating = 300, 89 .rating = 300,
90 .read = arc_counter_read, 90 .read = arc_counter_read,
91 .mask = CLOCKSOURCE_MASK(64), 91 .mask = CLOCKSOURCE_MASK(32),
92 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 92 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
93}; 93};
94 94
@@ -263,37 +263,3 @@ void __init time_init(void)
263 if (machine_desc->init_time) 263 if (machine_desc->init_time)
264 machine_desc->init_time(); 264 machine_desc->init_time();
265} 265}
266
267#ifdef CONFIG_ARC_HAS_RTSC
268/*
269 * sched_clock math assist
270 * ns = cycles * (ns_per_sec / cpu_freq_hz)
271 * ns = cycles * (10^6 / cpu_freq_khz)
272 * ns = cycles * (10^6 * 2^SF / cpu_freq_khz) / 2^SF
273 * ns = cycles * cyc2ns_scale >> SF
274 */
275#define CYC2NS_SF 10 /* 2^10, carefully chosen */
276#define CYC2NS_SCALE ((1000000 << CYC2NS_SF) / (arc_get_core_freq() / 1000))
277
278static unsigned long long cycles2ns(unsigned long long cyc)
279{
280 return (cyc * CYC2NS_SCALE ) >> CYC2NS_SF;
281}
282
283/*
284 * Scheduler clock - a monotonically increasing clock in nanosec units.
285 * It's return value must NOT wrap around.
286 *
287 * - Since 32bit TIMER1 will overflow almost immediately (53sec @ 80MHz), it
288 * can't be used directly.
289 * - Using getrawmonotonic (TIMER1 based, but with state for last + current
290 * snapshots), is no-good either because of seqlock deadlock possibilities
291 * - So only with native 64bit timer we do this, otherwise fallback to generic
292 * jiffies based version - which despite not being fine grained gaurantees
293 * the monotonically increasing semantics.
294 */
295unsigned long long sched_clock(void)
296{
297 return cycles2ns(arc_counter_read(NULL));
298}
299#endif