diff options
Diffstat (limited to 'arch/ia64/kernel/time.c')
-rw-r--r-- | arch/ia64/kernel/time.c | 96 |
1 files changed, 88 insertions, 8 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 3486fe7d6e65..627785c48ea9 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/efi.h> | 20 | #include <linux/efi.h> |
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/clocksource.h> | ||
22 | 23 | ||
23 | #include <asm/machvec.h> | 24 | #include <asm/machvec.h> |
24 | #include <asm/delay.h> | 25 | #include <asm/delay.h> |
@@ -28,6 +29,16 @@ | |||
28 | #include <asm/sections.h> | 29 | #include <asm/sections.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | 31 | ||
32 | #include "fsyscall_gtod_data.h" | ||
33 | |||
34 | static cycle_t itc_get_cycles(void); | ||
35 | |||
36 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { | ||
37 | .lock = SEQLOCK_UNLOCKED, | ||
38 | }; | ||
39 | |||
40 | struct itc_jitter_data_t itc_jitter_data; | ||
41 | |||
31 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ | 42 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
32 | 43 | ||
33 | #ifdef CONFIG_IA64_DEBUG_IRQ | 44 | #ifdef CONFIG_IA64_DEBUG_IRQ |
@@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip); | |||
37 | 48 | ||
38 | #endif | 49 | #endif |
39 | 50 | ||
40 | static struct time_interpolator itc_interpolator = { | 51 | static struct clocksource clocksource_itc = { |
41 | .shift = 16, | 52 | .name = "itc", |
42 | .mask = 0xffffffffffffffffLL, | 53 | .rating = 350, |
43 | .source = TIME_SOURCE_CPU | 54 | .read = itc_get_cycles, |
55 | .mask = 0xffffffffffffffff, | ||
56 | .mult = 0, /*to be caluclated*/ | ||
57 | .shift = 16, | ||
58 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
44 | }; | 59 | }; |
60 | static struct clocksource *itc_clocksource; | ||
45 | 61 | ||
46 | static irqreturn_t | 62 | static irqreturn_t |
47 | timer_interrupt (int irq, void *dev_id) | 63 | timer_interrupt (int irq, void *dev_id) |
@@ -210,8 +226,6 @@ ia64_init_itm (void) | |||
210 | + itc_freq/2)/itc_freq; | 226 | + itc_freq/2)/itc_freq; |
211 | 227 | ||
212 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { | 228 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { |
213 | itc_interpolator.frequency = local_cpu_data->itc_freq; | ||
214 | itc_interpolator.drift = itc_drift; | ||
215 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
216 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. | 230 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. |
217 | * Jitter compensation requires a cmpxchg which may limit | 231 | * Jitter compensation requires a cmpxchg which may limit |
@@ -223,15 +237,50 @@ ia64_init_itm (void) | |||
223 | * even going backward) if the ITC offsets between the individual CPUs | 237 | * even going backward) if the ITC offsets between the individual CPUs |
224 | * are too large. | 238 | * are too large. |
225 | */ | 239 | */ |
226 | if (!nojitter) itc_interpolator.jitter = 1; | 240 | if (!nojitter) |
241 | itc_jitter_data.itc_jitter = 1; | ||
227 | #endif | 242 | #endif |
228 | register_time_interpolator(&itc_interpolator); | ||
229 | } | 243 | } |
230 | 244 | ||
231 | /* Setup the CPU local timer tick */ | 245 | /* Setup the CPU local timer tick */ |
232 | ia64_cpu_local_tick(); | 246 | ia64_cpu_local_tick(); |
247 | |||
248 | if (!itc_clocksource) { | ||
249 | /* Sort out mult/shift values: */ | ||
250 | clocksource_itc.mult = | ||
251 | clocksource_hz2mult(local_cpu_data->itc_freq, | ||
252 | clocksource_itc.shift); | ||
253 | clocksource_register(&clocksource_itc); | ||
254 | itc_clocksource = &clocksource_itc; | ||
255 | } | ||
233 | } | 256 | } |
234 | 257 | ||
258 | static cycle_t itc_get_cycles() | ||
259 | { | ||
260 | u64 lcycle, now, ret; | ||
261 | |||
262 | if (!itc_jitter_data.itc_jitter) | ||
263 | return get_cycles(); | ||
264 | |||
265 | lcycle = itc_jitter_data.itc_lastcycle; | ||
266 | now = get_cycles(); | ||
267 | if (lcycle && time_after(lcycle, now)) | ||
268 | return lcycle; | ||
269 | |||
270 | /* | ||
271 | * Keep track of the last timer value returned. | ||
272 | * In an SMP environment, you could lose out in contention of | ||
273 | * cmpxchg. If so, your cmpxchg returns new value which the | ||
274 | * winner of contention updated to. Use the new value instead. | ||
275 | */ | ||
276 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); | ||
277 | if (unlikely(ret != lcycle)) | ||
278 | return ret; | ||
279 | |||
280 | return now; | ||
281 | } | ||
282 | |||
283 | |||
235 | static struct irqaction timer_irqaction = { | 284 | static struct irqaction timer_irqaction = { |
236 | .handler = timer_interrupt, | 285 | .handler = timer_interrupt, |
237 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, | 286 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, |
@@ -307,3 +356,34 @@ ia64_setup_printk_clock(void) | |||
307 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) | 356 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) |
308 | ia64_printk_clock = ia64_itc_printk_clock; | 357 | ia64_printk_clock = ia64_itc_printk_clock; |
309 | } | 358 | } |
359 | |||
360 | void update_vsyscall(struct timespec *wall, struct clocksource *c) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); | ||
365 | |||
366 | /* copy fsyscall clock data */ | ||
367 | fsyscall_gtod_data.clk_mask = c->mask; | ||
368 | fsyscall_gtod_data.clk_mult = c->mult; | ||
369 | fsyscall_gtod_data.clk_shift = c->shift; | ||
370 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | ||
371 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | ||
372 | |||
373 | /* copy kernel time structures */ | ||
374 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; | ||
375 | fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; | ||
376 | fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec | ||
377 | + wall->tv_sec; | ||
378 | fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec | ||
379 | + wall->tv_nsec; | ||
380 | |||
381 | /* normalize */ | ||
382 | while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { | ||
383 | fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; | ||
384 | fsyscall_gtod_data.monotonic_time.tv_sec++; | ||
385 | } | ||
386 | |||
387 | write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); | ||
388 | } | ||
389 | |||