aboutsummaryrefslogtreecommitdiffstats
path: root/arch/avr32/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/kernel/time.c')
-rw-r--r--arch/avr32/kernel/time.c209
1 files changed, 80 insertions, 129 deletions
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index bf2f762e6a47..00a9862380ff 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -1,16 +1,12 @@
1/* 1/*
2 * Copyright (C) 2004-2007 Atmel Corporation 2 * Copyright (C) 2004-2007 Atmel Corporation
3 * 3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
10 */ 7 */
11
12#include <linux/clk.h> 8#include <linux/clk.h>
13#include <linux/clocksource.h> 9#include <linux/clockchips.h>
14#include <linux/time.h> 10#include <linux/time.h>
15#include <linux/module.h> 11#include <linux/module.h>
16#include <linux/interrupt.h> 12#include <linux/interrupt.h>
@@ -27,13 +23,10 @@
27#include <asm/io.h> 23#include <asm/io.h>
28#include <asm/sections.h> 24#include <asm/sections.h>
29 25
30/* how many counter cycles in a jiffy? */ 26#include <asm/arch/pm.h>
31static u32 cycles_per_jiffy;
32 27
33/* the count value for the next timer interrupt */
34static u32 expirelo;
35 28
36cycle_t __weak read_cycle_count(void) 29static cycle_t read_cycle_count(void)
37{ 30{
38 return (cycle_t)sysreg_read(COUNT); 31 return (cycle_t)sysreg_read(COUNT);
39} 32}
@@ -42,10 +35,11 @@ cycle_t __weak read_cycle_count(void)
42 * The architectural cycle count registers are a fine clocksource unless 35 * The architectural cycle count registers are a fine clocksource unless
43 * the system idle loop use sleep states like "idle": the CPU cycles 36 * the system idle loop use sleep states like "idle": the CPU cycles
44 * measured by COUNT (and COMPARE) don't happen during sleep states. 37 * measured by COUNT (and COMPARE) don't happen during sleep states.
38 * Their duration also changes if cpufreq changes the CPU clock rate.
45 * So we rate the clocksource using COUNT as very low quality. 39 * So we rate the clocksource using COUNT as very low quality.
46 */ 40 */
47struct clocksource __weak clocksource_avr32 = { 41static struct clocksource counter = {
48 .name = "avr32", 42 .name = "avr32_counter",
49 .rating = 50, 43 .rating = 50,
50 .read = read_cycle_count, 44 .read = read_cycle_count,
51 .mask = CLOCKSOURCE_MASK(32), 45 .mask = CLOCKSOURCE_MASK(32),
@@ -53,152 +47,109 @@ struct clocksource __weak clocksource_avr32 = {
53 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 47 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
54}; 48};
55 49
56irqreturn_t __weak timer_interrupt(int irq, void *dev_id); 50static irqreturn_t timer_interrupt(int irq, void *dev_id)
57
58struct irqaction timer_irqaction = {
59 .handler = timer_interrupt,
60 .flags = IRQF_DISABLED,
61 .name = "timer",
62};
63
64static void avr32_timer_ack(void)
65{
66 u32 count;
67
68 /* Ack this timer interrupt and set the next one */
69 expirelo += cycles_per_jiffy;
70 /* setting COMPARE to 0 stops the COUNT-COMPARE */
71 if (expirelo == 0) {
72 sysreg_write(COMPARE, expirelo + 1);
73 } else {
74 sysreg_write(COMPARE, expirelo);
75 }
76
77 /* Check to see if we have missed any timer interrupts */
78 count = sysreg_read(COUNT);
79 if ((count - expirelo) < 0x7fffffff) {
80 expirelo = count + cycles_per_jiffy;
81 sysreg_write(COMPARE, expirelo);
82 }
83}
84
85int __weak avr32_hpt_init(void)
86{ 51{
87 int ret; 52 struct clock_event_device *evdev = dev_id;
88 unsigned long mult, shift, count_hz;
89
90 count_hz = clk_get_rate(boot_cpu_data.clk);
91 shift = clocksource_avr32.shift;
92 mult = clocksource_hz2mult(count_hz, shift);
93 clocksource_avr32.mult = mult;
94 53
95 { 54 /*
96 u64 tmp; 55 * Disable the interrupt until the clockevent subsystem
56 * reprograms it.
57 */
58 sysreg_write(COMPARE, 0);
97 59
98 tmp = TICK_NSEC; 60 evdev->event_handler(evdev);
99 tmp <<= shift; 61 return IRQ_HANDLED;
100 tmp += mult / 2; 62}
101 do_div(tmp, mult);
102 63
103 cycles_per_jiffy = tmp; 64static struct irqaction timer_irqaction = {
104 } 65 .handler = timer_interrupt,
66 .flags = IRQF_TIMER | IRQF_DISABLED,
67 .name = "avr32_comparator",
68};
105 69
106 ret = setup_irq(0, &timer_irqaction); 70static int comparator_next_event(unsigned long delta,
107 if (ret) { 71 struct clock_event_device *evdev)
108 pr_debug("timer: could not request IRQ 0: %d\n", ret); 72{
109 return -ENODEV; 73 unsigned long flags;
110 }
111 74
112 printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, " 75 raw_local_irq_save(flags);
113 "%lu.%03lu MHz\n",
114 ((count_hz + 500) / 1000) / 1000,
115 ((count_hz + 500) / 1000) % 1000);
116 76
117 return 0; 77 /* The time to read COUNT then update COMPARE must be less
118} 78 * than the min_delta_ns value for this clockevent source.
79 */
80 sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1);
119 81
120/* 82 raw_local_irq_restore(flags);
121 * Taken from MIPS c0_hpt_timer_init().
122 *
123 * The reason COUNT is written twice is probably to make sure we don't get any
124 * timer interrupts while we are messing with the counter.
125 */
126int __weak avr32_hpt_start(void)
127{
128 u32 count = sysreg_read(COUNT);
129 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
130 sysreg_write(COUNT, expirelo - cycles_per_jiffy);
131 sysreg_write(COMPARE, expirelo);
132 sysreg_write(COUNT, count);
133 83
134 return 0; 84 return 0;
135} 85}
136 86
137/* 87static void comparator_mode(enum clock_event_mode mode,
138 * local_timer_interrupt() does profiling and process accounting on a 88 struct clock_event_device *evdev)
139 * per-CPU basis.
140 *
141 * In UP mode, it is invoked from the (global) timer_interrupt.
142 */
143void local_timer_interrupt(int irq, void *dev_id)
144{ 89{
145 if (current->pid) 90 switch (mode) {
146 profile_tick(CPU_PROFILING); 91 case CLOCK_EVT_MODE_ONESHOT:
147 update_process_times(user_mode(get_irq_regs())); 92 pr_debug("%s: start\n", evdev->name);
93 /* FALLTHROUGH */
94 case CLOCK_EVT_MODE_RESUME:
95 cpu_disable_idle_sleep();
96 break;
97 case CLOCK_EVT_MODE_UNUSED:
98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name);
101 cpu_enable_idle_sleep();
102 break;
103 default:
104 BUG();
105 }
148} 106}
149 107
150irqreturn_t __weak timer_interrupt(int irq, void *dev_id) 108static struct clock_event_device comparator = {
151{ 109 .name = "avr32_comparator",
152 /* ack timer interrupt and try to set next interrupt */ 110 .features = CLOCK_EVT_FEAT_ONESHOT,
153 avr32_timer_ack(); 111 .shift = 16,
154 112 .rating = 50,
155 /* 113 .cpumask = CPU_MASK_CPU0,
156 * Call the generic timer interrupt handler 114 .set_next_event = comparator_next_event,
157 */ 115 .set_mode = comparator_mode,
158 write_seqlock(&xtime_lock); 116};
159 do_timer(1);
160 write_sequnlock(&xtime_lock);
161
162 /*
163 * In UP mode, we call local_timer_interrupt() to do profiling
164 * and process accounting.
165 *
166 * SMP is not supported yet.
167 */
168 local_timer_interrupt(irq, dev_id);
169
170 return IRQ_HANDLED;
171}
172 117
173void __init time_init(void) 118void __init time_init(void)
174{ 119{
120 unsigned long counter_hz;
175 int ret; 121 int ret;
176 122
177 /*
178 * Make sure we don't get any COMPARE interrupts before we can
179 * handle them.
180 */
181 sysreg_write(COMPARE, 0);
182
183 xtime.tv_sec = mktime(2007, 1, 1, 0, 0, 0); 123 xtime.tv_sec = mktime(2007, 1, 1, 0, 0, 0);
184 xtime.tv_nsec = 0; 124 xtime.tv_nsec = 0;
185 125
186 set_normalized_timespec(&wall_to_monotonic, 126 set_normalized_timespec(&wall_to_monotonic,
187 -xtime.tv_sec, -xtime.tv_nsec); 127 -xtime.tv_sec, -xtime.tv_nsec);
188 128
189 ret = avr32_hpt_init(); 129 /* figure rate for counter */
190 if (ret) { 130 counter_hz = clk_get_rate(boot_cpu_data.clk);
191 pr_debug("timer: failed setup: %d\n", ret); 131 counter.mult = clocksource_hz2mult(counter_hz, counter.shift);
192 return;
193 }
194 132
195 ret = clocksource_register(&clocksource_avr32); 133 ret = clocksource_register(&counter);
196 if (ret) 134 if (ret)
197 pr_debug("timer: could not register clocksource: %d\n", ret); 135 pr_debug("timer: could not register clocksource: %d\n", ret);
198 136
199 ret = avr32_hpt_start(); 137 /* setup COMPARE clockevent */
200 if (ret) { 138 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
201 pr_debug("timer: failed starting: %d\n", ret); 139 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
202 return; 140 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
141
142 sysreg_write(COMPARE, 0);
143 timer_irqaction.dev_id = &comparator;
144
145 ret = setup_irq(0, &timer_irqaction);
146 if (ret)
147 pr_debug("timer: could not request IRQ 0: %d\n", ret);
148 else {
149 clockevents_register_device(&comparator);
150
151 pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name,
152 ((counter_hz + 500) / 1000) / 1000,
153 ((counter_hz + 500) / 1000) % 1000);
203 } 154 }
204} 155}