aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/time_32.c
diff options
context:
space:
mode:
authorTkhai Kirill <tkhai@yandex.ru>2012-04-04 15:49:26 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-15 13:28:50 -0400
commit62f082830d63cf753ed0dab16f8d3b2d0ffc7f43 (patch)
tree39770d13d3dbff835eb3500c6a913da5c784fec3 /arch/sparc/kernel/time_32.c
parent472bc4f2ad164a5aac2e85d891c4faecfc5d62c4 (diff)
sparc32: generic clockevent support
The kernel uses l14 timers as clockevents. l10 timer is used as clocksource if platform master_l10_counter isn't constantly zero. The clocksource is continuous, so it's possible to use high resolution timers. l10 timer is also used as clockevent on UP configurations. This realization is for sun4m, sun4d, sun4c, microsparc-IIep and LEON platforms. The appropriate LEON changes was made by Konrad Eisele. In case of sun4m's oneshot mode, profile irq is zeroed in smp4m_percpu_timer_interrupt(). It is maybe needless (double, triple etc overflow does nothing). sun4d is able to have oneshot mode too, but I haven't any way to test it. So code of its percpu timer handler is made as much equal to the current code as possible. The patch is tested on sun4m box in SMP mode by me, and tested by Konrad on leon in up mode (leon smp is broken atm - due to other reasons). Signed-off-by: Tkhai Kirill <tkhai@yandex.ru> Tested-by: Konrad Eisele <konrad@gaisler.com> [leon up] [sam: revised patch to provide generic support for leon] Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/time_32.c')
-rw-r--r--arch/sparc/kernel/time_32.c215
1 files changed, 176 insertions, 39 deletions
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 68e0284bf3f3..89e890bc0941 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -26,6 +26,8 @@
26#include <linux/rtc.h> 26#include <linux/rtc.h>
27#include <linux/rtc/m48t59.h> 27#include <linux/rtc/m48t59.h>
28#include <linux/timex.h> 28#include <linux/timex.h>
29#include <linux/clocksource.h>
30#include <linux/clockchips.h>
29#include <linux/init.h> 31#include <linux/init.h>
30#include <linux/pci.h> 32#include <linux/pci.h>
31#include <linux/ioport.h> 33#include <linux/ioport.h>
@@ -44,9 +46,21 @@
44#include <asm/page.h> 46#include <asm/page.h>
45#include <asm/pcic.h> 47#include <asm/pcic.h>
46#include <asm/irq_regs.h> 48#include <asm/irq_regs.h>
49#include <asm/setup.h>
47 50
48#include "irq.h" 51#include "irq.h"
49 52
53static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
54static __volatile__ u64 timer_cs_internal_counter = 0;
55static char timer_cs_enabled = 0;
56
57static struct clock_event_device timer_ce;
58static char timer_ce_enabled = 0;
59
60#ifdef CONFIG_SMP
61DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent);
62#endif
63
50DEFINE_SPINLOCK(rtc_lock); 64DEFINE_SPINLOCK(rtc_lock);
51EXPORT_SYMBOL(rtc_lock); 65EXPORT_SYMBOL(rtc_lock);
52 66
@@ -75,36 +89,167 @@ EXPORT_SYMBOL(profile_pc);
75 89
76__volatile__ unsigned int *master_l10_counter; 90__volatile__ unsigned int *master_l10_counter;
77 91
78u32 (*do_arch_gettimeoffset)(void);
79
80int update_persistent_clock(struct timespec now) 92int update_persistent_clock(struct timespec now)
81{ 93{
82 return set_rtc_mmss(now.tv_sec); 94 return set_rtc_mmss(now.tv_sec);
83} 95}
84 96
85/* 97irqreturn_t notrace timer_interrupt(int dummy, void *dev_id)
86 * timer_interrupt() needs to keep up the real-time clock, 98{
87 * as well as call the "xtime_update()" routine every clocktick 99 if (timer_cs_enabled) {
88 */ 100 write_seqlock(&timer_cs_lock);
101 timer_cs_internal_counter++;
102 clear_clock_irq();
103 write_sequnlock(&timer_cs_lock);
104 } else {
105 clear_clock_irq();
106 }
89 107
90#define TICK_SIZE (tick_nsec / 1000) 108 if (timer_ce_enabled)
109 timer_ce.event_handler(&timer_ce);
91 110
92static irqreturn_t timer_interrupt(int dummy, void *dev_id) 111 return IRQ_HANDLED;
112}
113
114static void timer_ce_set_mode(enum clock_event_mode mode,
115 struct clock_event_device *evt)
93{ 116{
94#ifndef CONFIG_SMP 117 switch (mode) {
95 profile_tick(CPU_PROFILING); 118 case CLOCK_EVT_MODE_PERIODIC:
96#endif 119 case CLOCK_EVT_MODE_RESUME:
120 timer_ce_enabled = 1;
121 break;
122 case CLOCK_EVT_MODE_SHUTDOWN:
123 timer_ce_enabled = 0;
124 break;
125 default:
126 break;
127 }
128 smp_mb();
129}
130
131static __init void setup_timer_ce(void)
132{
133 struct clock_event_device *ce = &timer_ce;
134
135 BUG_ON(smp_processor_id() != boot_cpu_id);
136
137 ce->name = "timer_ce";
138 ce->rating = 100;
139 ce->features = CLOCK_EVT_FEAT_PERIODIC;
140 ce->set_mode = timer_ce_set_mode;
141 ce->cpumask = cpu_possible_mask;
142 ce->shift = 32;
143 ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
144 ce->shift);
145 clockevents_register_device(ce);
146}
97 147
98 clear_clock_irq(); 148static unsigned int sbus_cycles_offset(void)
149{
150 unsigned int val, offset;
99 151
100 xtime_update(1); 152 val = *master_l10_counter;
153 offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
101 154
102#ifndef CONFIG_SMP 155 /* Limit hit? */
103 update_process_times(user_mode(get_irq_regs())); 156 if (val & TIMER_LIMIT_BIT)
104#endif 157 offset += sparc_config.cs_period;
105 return IRQ_HANDLED; 158
159 return offset;
106} 160}
107 161
162static cycle_t timer_cs_read(struct clocksource *cs)
163{
164 unsigned int seq, offset;
165 u64 cycles;
166
167 do {
168 seq = read_seqbegin(&timer_cs_lock);
169
170 cycles = timer_cs_internal_counter;
171 offset = sparc_config.get_cycles_offset();
172 } while (read_seqretry(&timer_cs_lock, seq));
173
174 /* Count absolute cycles */
175 cycles *= sparc_config.cs_period;
176 cycles += offset;
177
178 return cycles;
179}
180
181static struct clocksource timer_cs = {
182 .name = "timer_cs",
183 .rating = 100,
184 .read = timer_cs_read,
185 .mask = CLOCKSOURCE_MASK(64),
186 .shift = 2,
187 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
188};
189
190static __init int setup_timer_cs(void)
191{
192 timer_cs_enabled = 1;
193 timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
194 timer_cs.shift);
195
196 return clocksource_register(&timer_cs);
197}
198
199#ifdef CONFIG_SMP
200static void percpu_ce_setup(enum clock_event_mode mode,
201 struct clock_event_device *evt)
202{
203 int cpu = __first_cpu(evt->cpumask);
204
205 switch (mode) {
206 case CLOCK_EVT_MODE_PERIODIC:
207 load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ);
208 break;
209 case CLOCK_EVT_MODE_ONESHOT:
210 case CLOCK_EVT_MODE_SHUTDOWN:
211 case CLOCK_EVT_MODE_UNUSED:
212 load_profile_irq(cpu, 0);
213 break;
214 default:
215 break;
216 }
217}
218
219static int percpu_ce_set_next_event(unsigned long delta,
220 struct clock_event_device *evt)
221{
222 int cpu = __first_cpu(evt->cpumask);
223 unsigned int next = (unsigned int)delta;
224
225 load_profile_irq(cpu, next);
226 return 0;
227}
228
229void register_percpu_ce(int cpu)
230{
231 struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
232 unsigned int features = CLOCK_EVT_FEAT_PERIODIC;
233
234 if (sparc_config.features & FEAT_L14_ONESHOT)
235 features |= CLOCK_EVT_FEAT_ONESHOT;
236
237 ce->name = "percpu_ce";
238 ce->rating = 200;
239 ce->features = features;
240 ce->set_mode = percpu_ce_setup;
241 ce->set_next_event = percpu_ce_set_next_event;
242 ce->cpumask = cpumask_of(cpu);
243 ce->shift = 32;
244 ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
245 ce->shift);
246 ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce);
247 ce->min_delta_ns = clockevent_delta2ns(100, ce);
248
249 clockevents_register_device(ce);
250}
251#endif
252
108static unsigned char mostek_read_byte(struct device *dev, u32 ofs) 253static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
109{ 254{
110 struct platform_device *pdev = to_platform_device(dev); 255 struct platform_device *pdev = to_platform_device(dev);
@@ -195,38 +340,30 @@ static int __init clock_init(void)
195 */ 340 */
196fs_initcall(clock_init); 341fs_initcall(clock_init);
197 342
198 343static void __init sparc32_late_time_init(void)
199u32 sbus_do_gettimeoffset(void)
200{ 344{
201 unsigned long val = *master_l10_counter; 345 if (sparc_config.features & FEAT_L10_CLOCKEVENT)
202 unsigned long usec = (val >> 10) & 0x1fffff; 346 setup_timer_ce();
203 347 if (sparc_config.features & FEAT_L10_CLOCKSOURCE)
204 /* Limit hit? */ 348 setup_timer_cs();
205 if (val & 0x80000000) 349#ifdef CONFIG_SMP
206 usec += 1000000 / HZ; 350 register_percpu_ce(smp_processor_id());
207 351#endif
208 return usec * 1000;
209} 352}
210 353
211 354static void __init sbus_time_init(void)
212u32 arch_gettimeoffset(void)
213{ 355{
214 if (unlikely(!do_arch_gettimeoffset)) 356 sparc_config.get_cycles_offset = sbus_cycles_offset;
215 return 0; 357 sparc_config.init_timers();
216 return do_arch_gettimeoffset();
217} 358}
218 359
219static void __init sbus_time_init(void) 360void __init time_init(void)
220{ 361{
221 do_arch_gettimeoffset = sbus_do_gettimeoffset;
222
223 btfixup(); 362 btfixup();
224 363
225 sparc_config.init_timers(timer_interrupt); 364 sparc_config.features = 0;
226} 365 late_time_init = sparc32_late_time_init;
227 366
228void __init time_init(void)
229{
230 if (pcic_present()) 367 if (pcic_present())
231 pci_time_init(); 368 pci_time_init();
232 else 369 else