aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-davinci/time.c
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2012-01-16 06:44:12 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2012-02-03 06:34:25 -0500
commit30c9c5b157b70d0a2f1fd0f37b936e63bade0d58 (patch)
treefaec1ec3ca6d0bd3d06c28cbc341f1ab3ddfa0d6 /arch/arm/mach-davinci/time.c
parent5d0ef6ae63092d9814bb5c35a9bf6fc1caf6ccfe (diff)
ARM: davinci: convert to common sched_clock() implementation
Davinci has its own sched_clock() implementation, which gets in the way of a single zImage. Moving to the common sched_clock framework makes the code slightly cleaner. Acked-by: Sekhar Nori <nsekhar@ti.com> Cc: Kevin Hilman <khilman@ti.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm/mach-davinci/time.c')
-rw-r--r--arch/arm/mach-davinci/time.c24
1 files changed, 7 insertions, 17 deletions
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index f2afb2de5494..75da315b6587 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -19,6 +19,7 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21 21
22#include <asm/sched_clock.h>
22#include <asm/mach/irq.h> 23#include <asm/mach/irq.h>
23#include <asm/mach/time.h> 24#include <asm/mach/time.h>
24 25
@@ -274,19 +275,9 @@ static cycle_t read_cycles(struct clocksource *cs)
274 return (cycles_t)timer32_read(t); 275 return (cycles_t)timer32_read(t);
275} 276}
276 277
277/*
278 * Kernel assumes that sched_clock can be called early but may not have
279 * things ready yet.
280 */
281static cycle_t read_dummy(struct clocksource *cs)
282{
283 return 0;
284}
285
286
287static struct clocksource clocksource_davinci = { 278static struct clocksource clocksource_davinci = {
288 .rating = 300, 279 .rating = 300,
289 .read = read_dummy, 280 .read = read_cycles,
290 .mask = CLOCKSOURCE_MASK(32), 281 .mask = CLOCKSOURCE_MASK(32),
291 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 282 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
292}; 283};
@@ -294,12 +285,9 @@ static struct clocksource clocksource_davinci = {
294/* 285/*
295 * Overwrite weak default sched_clock with something more precise 286 * Overwrite weak default sched_clock with something more precise
296 */ 287 */
297unsigned long long notrace sched_clock(void) 288static u32 notrace davinci_read_sched_clock(void)
298{ 289{
299 const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci); 290 return timer32_read(&timers[TID_CLOCKSOURCE]);
300
301 return clocksource_cyc2ns(cyc, clocksource_davinci.mult,
302 clocksource_davinci.shift);
303} 291}
304 292
305/* 293/*
@@ -399,12 +387,14 @@ static void __init davinci_timer_init(void)
399 davinci_clock_tick_rate = clk_get_rate(timer_clk); 387 davinci_clock_tick_rate = clk_get_rate(timer_clk);
400 388
401 /* setup clocksource */ 389 /* setup clocksource */
402 clocksource_davinci.read = read_cycles;
403 clocksource_davinci.name = id_to_name[clocksource_id]; 390 clocksource_davinci.name = id_to_name[clocksource_id];
404 if (clocksource_register_hz(&clocksource_davinci, 391 if (clocksource_register_hz(&clocksource_davinci,
405 davinci_clock_tick_rate)) 392 davinci_clock_tick_rate))
406 printk(err, clocksource_davinci.name); 393 printk(err, clocksource_davinci.name);
407 394
395 setup_sched_clock(davinci_read_sched_clock, 32,
396 davinci_clock_tick_rate);
397
408 /* setup clockevent */ 398 /* setup clockevent */
409 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id]; 399 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
410 clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC, 400 clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,