aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/Kconfig8
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/apic.c291
-rw-r--r--arch/i386/kernel/hpet.c496
-rw-r--r--arch/i386/kernel/i8253.c96
-rw-r--r--arch/i386/kernel/i8259.c6
-rw-r--r--arch/i386/kernel/smpboot.c5
-rw-r--r--arch/i386/kernel/time.c70
-rw-r--r--arch/i386/kernel/time_hpet.c497
-rw-r--r--arch/i386/mach-default/setup.c8
-rw-r--r--drivers/acpi/processor_idle.c38
-rw-r--r--include/asm-i386/apic.h5
-rw-r--r--include/asm-i386/hpet.h16
-rw-r--r--include/asm-i386/i8253.h15
-rw-r--r--include/asm-i386/mach-default/do_timer.h78
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h27
-rw-r--r--include/asm-i386/mpspec.h1
17 files changed, 811 insertions, 847 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 458b3aad3eb3..490be6f77bbf 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -22,6 +22,14 @@ config CLOCKSOURCE_WATCHDOG
22 bool 22 bool
23 default y 23 default y
24 24
25config GENERIC_CLOCKEVENTS
26 bool
27 default y
28
29config GENERIC_CLOCKEVENTS_BROADCAST
30 bool
31 default y
32
25config LOCKDEP_SUPPORT 33config LOCKDEP_SUPPORT
26 bool 34 bool
27 default y 35 default y
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index c2b3b79dc436..4ae3dcf1d2f0 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o
32obj-$(CONFIG_MODULES) += module.o 32obj-$(CONFIG_MODULES) += module.o
33obj-y += sysenter.o vsyscall.o 33obj-y += sysenter.o vsyscall.o
34obj-$(CONFIG_ACPI_SRAT) += srat.o 34obj-$(CONFIG_ACPI_SRAT) += srat.o
35obj-$(CONFIG_HPET_TIMER) += time_hpet.o
36obj-$(CONFIG_EFI) += efi.o efi_stub.o 35obj-$(CONFIG_EFI) += efi.o efi_stub.o
37obj-$(CONFIG_DOUBLEFAULT) += doublefault.o 36obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
38obj-$(CONFIG_VM86) += vm86.o 37obj-$(CONFIG_VM86) += vm86.o
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index b56448f214a7..e98b5c750bdf 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -25,6 +25,7 @@
25#include <linux/kernel_stat.h> 25#include <linux/kernel_stat.h>
26#include <linux/sysdev.h> 26#include <linux/sysdev.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/clockchips.h>
28#include <linux/module.h> 29#include <linux/module.h>
29 30
30#include <asm/atomic.h> 31#include <asm/atomic.h>
@@ -52,28 +53,44 @@
52#endif 53#endif
53 54
54/* 55/*
55 * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
56 * IPIs in place of local APIC timers
57 */
58static cpumask_t timer_bcast_ipi;
59
60/*
61 * Knob to control our willingness to enable the local APIC. 56 * Knob to control our willingness to enable the local APIC.
62 * 57 *
63 * -1=force-disable, +1=force-enable 58 * -1=force-disable, +1=force-enable
64 */ 59 */
65static int enable_local_apic __initdata = 0; 60static int enable_local_apic __initdata = 0;
66 61
62/* Enable local APIC timer for highres/dyntick on UP */
63static int enable_local_apic_timer __initdata = 0;
64
67/* 65/*
68 * Debug level, exported for io_apic.c 66 * Debug level, exported for io_apic.c
69 */ 67 */
70int apic_verbosity; 68int apic_verbosity;
71 69
72static void apic_pm_activate(void); 70static unsigned int calibration_result;
73 71
72static int lapic_next_event(unsigned long delta,
73 struct clock_event_device *evt);
74static void lapic_timer_setup(enum clock_event_mode mode,
75 struct clock_event_device *evt);
76static void lapic_timer_broadcast(cpumask_t mask);
77static void apic_pm_activate(void);
74 78
75/* Using APIC to generate smp_local_timer_interrupt? */ 79/*
76int using_apic_timer __read_mostly = 0; 80 * The local apic timer can be used for any function which is CPU local.
81 */
82static struct clock_event_device lapic_clockevent = {
83 .name = "lapic",
84 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
85 | CLOCK_EVT_FEAT_C3STOP,
86 .shift = 32,
87 .set_mode = lapic_timer_setup,
88 .set_next_event = lapic_next_event,
89 .broadcast = lapic_timer_broadcast,
90 .rating = 100,
91 .irq = -1,
92};
93static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
77 94
78/* Local APIC was disabled by the BIOS and enabled by the kernel */ 95/* Local APIC was disabled by the BIOS and enabled by the kernel */
79static int enabled_via_apicbase; 96static int enabled_via_apicbase;
@@ -152,6 +169,11 @@ int lapic_get_maxlvt(void)
152 */ 169 */
153 170
154/* 171/*
172 * FIXME: Move this to i8253.h. There is no need to keep the access to
173 * the PIT scattered all around the place -tglx
174 */
175
176/*
155 * The timer chip is already set up at HZ interrupts per second here, 177 * The timer chip is already set up at HZ interrupts per second here,
156 * but we do not accept timer interrupts yet. We only allow the BP 178 * but we do not accept timer interrupts yet. We only allow the BP
157 * to calibrate. 179 * to calibrate.
@@ -209,16 +231,17 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
209 231
210#define APIC_DIVISOR 16 232#define APIC_DIVISOR 16
211 233
212static void __setup_APIC_LVTT(unsigned int clocks) 234static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
213{ 235{
214 unsigned int lvtt_value, tmp_value; 236 unsigned int lvtt_value, tmp_value;
215 int cpu = smp_processor_id();
216 237
217 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; 238 lvtt_value = LOCAL_TIMER_VECTOR;
239 if (!oneshot)
240 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
218 if (!lapic_is_integrated()) 241 if (!lapic_is_integrated())
219 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); 242 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
220 243
221 if (cpu_isset(cpu, timer_bcast_ipi)) 244 if (!irqen)
222 lvtt_value |= APIC_LVT_MASKED; 245 lvtt_value |= APIC_LVT_MASKED;
223 246
224 apic_write_around(APIC_LVTT, lvtt_value); 247 apic_write_around(APIC_LVTT, lvtt_value);
@@ -231,31 +254,80 @@ static void __setup_APIC_LVTT(unsigned int clocks)
231 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) 254 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
232 | APIC_TDR_DIV_16); 255 | APIC_TDR_DIV_16);
233 256
234 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); 257 if (!oneshot)
258 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
259}
260
261/*
262 * Program the next event, relative to now
263 */
264static int lapic_next_event(unsigned long delta,
265 struct clock_event_device *evt)
266{
267 apic_write_around(APIC_TMICT, delta);
268 return 0;
235} 269}
236 270
237static void __devinit setup_APIC_timer(unsigned int clocks) 271/*
272 * Setup the lapic timer in periodic or oneshot mode
273 */
274static void lapic_timer_setup(enum clock_event_mode mode,
275 struct clock_event_device *evt)
238{ 276{
239 unsigned long flags; 277 unsigned long flags;
278 unsigned int v;
240 279
241 local_irq_save(flags); 280 local_irq_save(flags);
242 281
243 /* 282 switch (mode) {
244 * Wait for IRQ0's slice: 283 case CLOCK_EVT_MODE_PERIODIC:
245 */ 284 case CLOCK_EVT_MODE_ONESHOT:
246 wait_timer_tick(); 285 __setup_APIC_LVTT(calibration_result,
247 286 mode != CLOCK_EVT_MODE_PERIODIC, 1);
248 __setup_APIC_LVTT(clocks); 287 break;
288 case CLOCK_EVT_MODE_UNUSED:
289 case CLOCK_EVT_MODE_SHUTDOWN:
290 v = apic_read(APIC_LVTT);
291 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
292 apic_write_around(APIC_LVTT, v);
293 break;
294 }
249 295
250 local_irq_restore(flags); 296 local_irq_restore(flags);
251} 297}
252 298
253/* 299/*
300 * Local APIC timer broadcast function
301 */
302static void lapic_timer_broadcast(cpumask_t mask)
303{
304#ifdef CONFIG_SMP
305 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
306#endif
307}
308
309/*
310 * Setup the local APIC timer for this CPU. Copy the initilized values
311 * of the boot CPU and register the clock event in the framework.
312 */
313static void __devinit setup_APIC_timer(void)
314{
315 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
316
317 memcpy(levt, &lapic_clockevent, sizeof(*levt));
318 levt->cpumask = cpumask_of_cpu(smp_processor_id());
319
320 clockevents_register_device(levt);
321}
322
323/*
254 * In this function we calibrate APIC bus clocks to the external 324 * In this function we calibrate APIC bus clocks to the external
255 * timer. Unfortunately we cannot use jiffies and the timer irq 325 * timer. Unfortunately we cannot use jiffies and the timer irq
256 * to calibrate, since some later bootup code depends on getting 326 * to calibrate, since some later bootup code depends on getting
257 * the first irq? Ugh. 327 * the first irq? Ugh.
258 * 328 *
329 * TODO: Fix this rather than saying "Ugh" -tglx
330 *
259 * We want to do the calibration only once since we 331 * We want to do the calibration only once since we
260 * want to have local timer irqs syncron. CPUs connected 332 * want to have local timer irqs syncron. CPUs connected
261 * by the same APIC bus have the very same bus frequency. 333 * by the same APIC bus have the very same bus frequency.
@@ -278,7 +350,7 @@ static int __init calibrate_APIC_clock(void)
278 * value into the APIC clock, we just want to get the 350 * value into the APIC clock, we just want to get the
279 * counter running for calibration. 351 * counter running for calibration.
280 */ 352 */
281 __setup_APIC_LVTT(1000000000); 353 __setup_APIC_LVTT(1000000000, 0, 0);
282 354
283 /* 355 /*
284 * The timer chip counts down to zero. Let's wait 356 * The timer chip counts down to zero. Let's wait
@@ -315,6 +387,17 @@ static int __init calibrate_APIC_clock(void)
315 387
316 result = (tt1-tt2)*APIC_DIVISOR/LOOPS; 388 result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
317 389
390 /* Calculate the scaled math multiplication factor */
391 lapic_clockevent.mult = div_sc(tt1-tt2, TICK_NSEC * LOOPS, 32);
392 lapic_clockevent.max_delta_ns =
393 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
394 lapic_clockevent.min_delta_ns =
395 clockevent_delta2ns(0xF, &lapic_clockevent);
396
397 apic_printk(APIC_VERBOSE, "..... tt1-tt2 %ld\n", tt1 - tt2);
398 apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
399 apic_printk(APIC_VERBOSE, "..... calibration result: %ld\n", result);
400
318 if (cpu_has_tsc) 401 if (cpu_has_tsc)
319 apic_printk(APIC_VERBOSE, "..... CPU clock speed is " 402 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
320 "%ld.%04ld MHz.\n", 403 "%ld.%04ld MHz.\n",
@@ -329,13 +412,10 @@ static int __init calibrate_APIC_clock(void)
329 return result; 412 return result;
330} 413}
331 414
332static unsigned int calibration_result;
333
334void __init setup_boot_APIC_clock(void) 415void __init setup_boot_APIC_clock(void)
335{ 416{
336 unsigned long flags; 417 unsigned long flags;
337 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); 418 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
338 using_apic_timer = 1;
339 419
340 local_irq_save(flags); 420 local_irq_save(flags);
341 421
@@ -343,97 +423,47 @@ void __init setup_boot_APIC_clock(void)
343 /* 423 /*
344 * Now set up the timer for real. 424 * Now set up the timer for real.
345 */ 425 */
346 setup_APIC_timer(calibration_result); 426 setup_APIC_timer();
347 427
348 local_irq_restore(flags); 428 local_irq_restore(flags);
349} 429}
350 430
351void __devinit setup_secondary_APIC_clock(void) 431void __devinit setup_secondary_APIC_clock(void)
352{ 432{
353 setup_APIC_timer(calibration_result); 433 setup_APIC_timer();
354}
355
356void disable_APIC_timer(void)
357{
358 if (using_apic_timer) {
359 unsigned long v;
360
361 v = apic_read(APIC_LVTT);
362 /*
363 * When an illegal vector value (0-15) is written to an LVT
364 * entry and delivery mode is Fixed, the APIC may signal an
365 * illegal vector error, with out regard to whether the mask
366 * bit is set or whether an interrupt is actually seen on
367 * input.
368 *
369 * Boot sequence might call this function when the LVTT has
370 * '0' vector value. So make sure vector field is set to
371 * valid value.
372 */
373 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
374 apic_write_around(APIC_LVTT, v);
375 }
376}
377
378void enable_APIC_timer(void)
379{
380 int cpu = smp_processor_id();
381
382 if (using_apic_timer && !cpu_isset(cpu, timer_bcast_ipi)) {
383 unsigned long v;
384
385 v = apic_read(APIC_LVTT);
386 apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
387 }
388}
389
390void switch_APIC_timer_to_ipi(void *cpumask)
391{
392 cpumask_t mask = *(cpumask_t *)cpumask;
393 int cpu = smp_processor_id();
394
395 if (cpu_isset(cpu, mask) &&
396 !cpu_isset(cpu, timer_bcast_ipi)) {
397 disable_APIC_timer();
398 cpu_set(cpu, timer_bcast_ipi);
399 }
400}
401EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
402
403void switch_ipi_to_APIC_timer(void *cpumask)
404{
405 cpumask_t mask = *(cpumask_t *)cpumask;
406 int cpu = smp_processor_id();
407
408 if (cpu_isset(cpu, mask) &&
409 cpu_isset(cpu, timer_bcast_ipi)) {
410 cpu_clear(cpu, timer_bcast_ipi);
411 enable_APIC_timer();
412 }
413} 434}
414EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
415 435
416/* 436/*
417 * Local timer interrupt handler. It does both profiling and 437 * The guts of the apic timer interrupt
418 * process statistics/rescheduling.
419 */ 438 */
420inline void smp_local_timer_interrupt(void) 439static void local_apic_timer_interrupt(void)
421{ 440{
422 profile_tick(CPU_PROFILING); 441 int cpu = smp_processor_id();
423#ifdef CONFIG_SMP 442 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
424 update_process_times(user_mode_vm(get_irq_regs()));
425#endif
426 443
427 /* 444 /*
428 * We take the 'long' return path, and there every subsystem 445 * Normally we should not be here till LAPIC has been
429 * grabs the apropriate locks (kernel lock/ irq lock). 446 * initialized but in some cases like kdump, its possible that
447 * there is a pending LAPIC timer interrupt from previous
448 * kernel's context and is delivered in new kernel the moment
449 * interrupts are enabled.
430 * 450 *
431 * we might want to decouple profiling from the 'long path', 451 * Interrupts are enabled early and LAPIC is setup much later,
432 * and do the profiling totally in assembly. 452 * hence its possible that when we get here evt->event_handler
433 * 453 * is NULL. Check for event_handler being NULL and discard
434 * Currently this isn't too much of an issue (performance wise), 454 * the interrupt as spurious.
435 * we can take more than 100K local irqs per second on a 100 MHz P5.
436 */ 455 */
456 if (!evt->event_handler) {
457 printk(KERN_WARNING
458 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
459 /* Switch it off */
460 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
461 return;
462 }
463
464 per_cpu(irq_stat, cpu).apic_timer_irqs++;
465
466 evt->event_handler(evt);
437} 467}
438 468
439/* 469/*
@@ -445,15 +475,9 @@ inline void smp_local_timer_interrupt(void)
445 * interrupt as well. Thus we cannot inline the local irq ... ] 475 * interrupt as well. Thus we cannot inline the local irq ... ]
446 */ 476 */
447 477
448fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) 478void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
449{ 479{
450 struct pt_regs *old_regs = set_irq_regs(regs); 480 struct pt_regs *old_regs = set_irq_regs(regs);
451 int cpu = smp_processor_id();
452
453 /*
454 * the NMI deadlock-detector uses this.
455 */
456 per_cpu(irq_stat, cpu).apic_timer_irqs++;
457 481
458 /* 482 /*
459 * NOTE! We'd better ACK the irq immediately, 483 * NOTE! We'd better ACK the irq immediately,
@@ -467,41 +491,10 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
467 */ 491 */
468 exit_idle(); 492 exit_idle();
469 irq_enter(); 493 irq_enter();
470 smp_local_timer_interrupt(); 494 local_apic_timer_interrupt();
471 irq_exit(); 495 irq_exit();
472 set_irq_regs(old_regs);
473}
474 496
475#ifndef CONFIG_SMP 497 set_irq_regs(old_regs);
476static void up_apic_timer_interrupt_call(void)
477{
478 int cpu = smp_processor_id();
479
480 /*
481 * the NMI deadlock-detector uses this.
482 */
483 per_cpu(irq_stat, cpu).apic_timer_irqs++;
484
485 smp_local_timer_interrupt();
486}
487#endif
488
489void smp_send_timer_broadcast_ipi(void)
490{
491 cpumask_t mask;
492
493 cpus_and(mask, cpu_online_map, timer_bcast_ipi);
494 if (!cpus_empty(mask)) {
495#ifdef CONFIG_SMP
496 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
497#else
498 /*
499 * We can directly call the apic timer interrupt handler
500 * in UP case. Minus all irq related functions
501 */
502 up_apic_timer_interrupt_call();
503#endif
504 }
505} 498}
506 499
507int setup_profiling_timer(unsigned int multiplier) 500int setup_profiling_timer(unsigned int multiplier)
@@ -914,6 +907,11 @@ void __devinit setup_local_APIC(void)
914 printk(KERN_INFO "No ESR for 82489DX.\n"); 907 printk(KERN_INFO "No ESR for 82489DX.\n");
915 } 908 }
916 909
910 /* Disable the local apic timer */
911 value = apic_read(APIC_LVTT);
912 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
913 apic_write_around(APIC_LVTT, value);
914
917 setup_apic_nmi_watchdog(NULL); 915 setup_apic_nmi_watchdog(NULL);
918 apic_pm_activate(); 916 apic_pm_activate();
919} 917}
@@ -1128,6 +1126,13 @@ static int __init parse_nolapic(char *arg)
1128} 1126}
1129early_param("nolapic", parse_nolapic); 1127early_param("nolapic", parse_nolapic);
1130 1128
1129static int __init apic_enable_lapic_timer(char *str)
1130{
1131 enable_local_apic_timer = 1;
1132 return 0;
1133}
1134early_param("lapictimer", apic_enable_lapic_timer);
1135
1131static int __init apic_set_verbosity(char *str) 1136static int __init apic_set_verbosity(char *str)
1132{ 1137{
1133 if (strcmp("debug", str) == 0) 1138 if (strcmp("debug", str) == 0)
@@ -1147,7 +1152,7 @@ __setup("apic=", apic_set_verbosity);
1147/* 1152/*
1148 * This interrupt should _never_ happen with our APIC/SMP architecture 1153 * This interrupt should _never_ happen with our APIC/SMP architecture
1149 */ 1154 */
1150fastcall void smp_spurious_interrupt(struct pt_regs *regs) 1155void smp_spurious_interrupt(struct pt_regs *regs)
1151{ 1156{
1152 unsigned long v; 1157 unsigned long v;
1153 1158
@@ -1171,7 +1176,7 @@ fastcall void smp_spurious_interrupt(struct pt_regs *regs)
1171/* 1176/*
1172 * This interrupt should never happen with our APIC/SMP architecture 1177 * This interrupt should never happen with our APIC/SMP architecture
1173 */ 1178 */
1174fastcall void smp_error_interrupt(struct pt_regs *regs) 1179void smp_error_interrupt(struct pt_regs *regs)
1175{ 1180{
1176 unsigned long v, v1; 1181 unsigned long v, v1;
1177 1182
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 7d2739fff3a3..e1006b7acc9e 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -1,4 +1,5 @@
1#include <linux/clocksource.h> 1#include <linux/clocksource.h>
2#include <linux/clockchips.h>
2#include <linux/errno.h> 3#include <linux/errno.h>
3#include <linux/hpet.h> 4#include <linux/hpet.h>
4#include <linux/init.h> 5#include <linux/init.h>
@@ -6,17 +7,278 @@
6#include <asm/hpet.h> 7#include <asm/hpet.h>
7#include <asm/io.h> 8#include <asm/io.h>
8 9
10extern struct clock_event_device *global_clock_event;
11
9#define HPET_MASK CLOCKSOURCE_MASK(32) 12#define HPET_MASK CLOCKSOURCE_MASK(32)
10#define HPET_SHIFT 22 13#define HPET_SHIFT 22
11 14
12/* FSEC = 10^-15 NSEC = 10^-9 */ 15/* FSEC = 10^-15 NSEC = 10^-9 */
13#define FSEC_PER_NSEC 1000000 16#define FSEC_PER_NSEC 1000000
14 17
15static void __iomem *hpet_ptr; 18/*
19 * HPET address is set in acpi/boot.c, when an ACPI entry exists
20 */
21unsigned long hpet_address;
22static void __iomem * hpet_virt_address;
23
24static inline unsigned long hpet_readl(unsigned long a)
25{
26 return readl(hpet_virt_address + a);
27}
28
29static inline void hpet_writel(unsigned long d, unsigned long a)
30{
31 writel(d, hpet_virt_address + a);
32}
33
34/*
35 * HPET command line enable / disable
36 */
37static int boot_hpet_disable;
38
39static int __init hpet_setup(char* str)
40{
41 if (str) {
42 if (!strncmp("disable", str, 7))
43 boot_hpet_disable = 1;
44 }
45 return 1;
46}
47__setup("hpet=", hpet_setup);
48
49static inline int is_hpet_capable(void)
50{
51 return (!boot_hpet_disable && hpet_address);
52}
53
54/*
55 * HPET timer interrupt enable / disable
56 */
57static int hpet_legacy_int_enabled;
58
59/**
60 * is_hpet_enabled - check whether the hpet timer interrupt is enabled
61 */
62int is_hpet_enabled(void)
63{
64 return is_hpet_capable() && hpet_legacy_int_enabled;
65}
66
67/*
68 * When the hpet driver (/dev/hpet) is enabled, we need to reserve
69 * timer 0 and timer 1 in case of RTC emulation.
70 */
71#ifdef CONFIG_HPET
72static void hpet_reserve_platform_timers(unsigned long id)
73{
74 struct hpet __iomem *hpet = hpet_virt_address;
75 struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
76 unsigned int nrtimers, i;
77 struct hpet_data hd;
78
79 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
80
81 memset(&hd, 0, sizeof (hd));
82 hd.hd_phys_address = hpet_address;
83 hd.hd_address = hpet_virt_address;
84 hd.hd_nirqs = nrtimers;
85 hd.hd_flags = HPET_DATA_PLATFORM;
86 hpet_reserve_timer(&hd, 0);
87
88#ifdef CONFIG_HPET_EMULATE_RTC
89 hpet_reserve_timer(&hd, 1);
90#endif
91
92 hd.hd_irq[0] = HPET_LEGACY_8254;
93 hd.hd_irq[1] = HPET_LEGACY_RTC;
94
95 for (i = 2; i < nrtimers; timer++, i++)
96 hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
97 Tn_INT_ROUTE_CNF_SHIFT;
98
99 hpet_alloc(&hd);
100
101}
102#else
103static void hpet_reserve_platform_timers(unsigned long id) { }
104#endif
105
106/*
107 * Common hpet info
108 */
109static unsigned long hpet_period;
110
111static void hpet_set_mode(enum clock_event_mode mode,
112 struct clock_event_device *evt);
113static int hpet_next_event(unsigned long delta,
114 struct clock_event_device *evt);
115
116/*
117 * The hpet clock event device
118 */
119static struct clock_event_device hpet_clockevent = {
120 .name = "hpet",
121 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
122 .set_mode = hpet_set_mode,
123 .set_next_event = hpet_next_event,
124 .shift = 32,
125 .irq = 0,
126};
127
128static void hpet_start_counter(void)
129{
130 unsigned long cfg = hpet_readl(HPET_CFG);
131
132 cfg &= ~HPET_CFG_ENABLE;
133 hpet_writel(cfg, HPET_CFG);
134 hpet_writel(0, HPET_COUNTER);
135 hpet_writel(0, HPET_COUNTER + 4);
136 cfg |= HPET_CFG_ENABLE;
137 hpet_writel(cfg, HPET_CFG);
138}
139
140static void hpet_enable_int(void)
141{
142 unsigned long cfg = hpet_readl(HPET_CFG);
143
144 cfg |= HPET_CFG_LEGACY;
145 hpet_writel(cfg, HPET_CFG);
146 hpet_legacy_int_enabled = 1;
147}
148
149static void hpet_set_mode(enum clock_event_mode mode,
150 struct clock_event_device *evt)
151{
152 unsigned long cfg, cmp, now;
153 uint64_t delta;
154
155 switch(mode) {
156 case CLOCK_EVT_MODE_PERIODIC:
157 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
158 delta >>= hpet_clockevent.shift;
159 now = hpet_readl(HPET_COUNTER);
160 cmp = now + (unsigned long) delta;
161 cfg = hpet_readl(HPET_T0_CFG);
162 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
163 HPET_TN_SETVAL | HPET_TN_32BIT;
164 hpet_writel(cfg, HPET_T0_CFG);
165 /*
166 * The first write after writing TN_SETVAL to the
167 * config register sets the counter value, the second
168 * write sets the period.
169 */
170 hpet_writel(cmp, HPET_T0_CMP);
171 udelay(1);
172 hpet_writel((unsigned long) delta, HPET_T0_CMP);
173 break;
174
175 case CLOCK_EVT_MODE_ONESHOT:
176 cfg = hpet_readl(HPET_T0_CFG);
177 cfg &= ~HPET_TN_PERIODIC;
178 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
179 hpet_writel(cfg, HPET_T0_CFG);
180 break;
181
182 case CLOCK_EVT_MODE_UNUSED:
183 case CLOCK_EVT_MODE_SHUTDOWN:
184 cfg = hpet_readl(HPET_T0_CFG);
185 cfg &= ~HPET_TN_ENABLE;
186 hpet_writel(cfg, HPET_T0_CFG);
187 break;
188 }
189}
190
191static int hpet_next_event(unsigned long delta,
192 struct clock_event_device *evt)
193{
194 unsigned long cnt;
195
196 cnt = hpet_readl(HPET_COUNTER);
197 cnt += delta;
198 hpet_writel(cnt, HPET_T0_CMP);
199
200 return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0);
201}
202
203/*
204 * Try to setup the HPET timer
205 */
206int __init hpet_enable(void)
207{
208 unsigned long id;
209 uint64_t hpet_freq;
210
211 if (!is_hpet_capable())
212 return 0;
213
214 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
215
216 /*
217 * Read the period and check for a sane value:
218 */
219 hpet_period = hpet_readl(HPET_PERIOD);
220 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
221 goto out_nohpet;
222
223 /*
224 * The period is a femto seconds value. We need to calculate the
225 * scaled math multiplication factor for nanosecond to hpet tick
226 * conversion.
227 */
228 hpet_freq = 1000000000000000ULL;
229 do_div(hpet_freq, hpet_period);
230 hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
231 NSEC_PER_SEC, 32);
232 /* Calculate the min / max delta */
233 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
234 &hpet_clockevent);
235 hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
236 &hpet_clockevent);
237
238 /*
239 * Read the HPET ID register to retrieve the IRQ routing
240 * information and the number of channels
241 */
242 id = hpet_readl(HPET_ID);
243
244#ifdef CONFIG_HPET_EMULATE_RTC
245 /*
246 * The legacy routing mode needs at least two channels, tick timer
247 * and the rtc emulation channel.
248 */
249 if (!(id & HPET_ID_NUMBER))
250 goto out_nohpet;
251#endif
252
253 /* Start the counter */
254 hpet_start_counter();
255
256 if (id & HPET_ID_LEGSUP) {
257 hpet_enable_int();
258 hpet_reserve_platform_timers(id);
259 /*
260 * Start hpet with the boot cpu mask and make it
261 * global after the IO_APIC has been initialized.
262 */
263 hpet_clockevent.cpumask =cpumask_of_cpu(0);
264 clockevents_register_device(&hpet_clockevent);
265 global_clock_event = &hpet_clockevent;
266 return 1;
267 }
268 return 0;
16 269
270out_nohpet:
271 iounmap(hpet_virt_address);
272 hpet_virt_address = NULL;
273 return 0;
274}
275
276/*
277 * Clock source related code
278 */
17static cycle_t read_hpet(void) 279static cycle_t read_hpet(void)
18{ 280{
19 return (cycle_t)readl(hpet_ptr); 281 return (cycle_t)hpet_readl(HPET_COUNTER);
20} 282}
21 283
22static struct clocksource clocksource_hpet = { 284static struct clocksource clocksource_hpet = {
@@ -24,28 +286,17 @@ static struct clocksource clocksource_hpet = {
24 .rating = 250, 286 .rating = 250,
25 .read = read_hpet, 287 .read = read_hpet,
26 .mask = HPET_MASK, 288 .mask = HPET_MASK,
27 .mult = 0, /* set below */
28 .shift = HPET_SHIFT, 289 .shift = HPET_SHIFT,
29 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 290 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
30}; 291};
31 292
32static int __init init_hpet_clocksource(void) 293static int __init init_hpet_clocksource(void)
33{ 294{
34 unsigned long hpet_period;
35 void __iomem* hpet_base;
36 u64 tmp; 295 u64 tmp;
37 int err;
38 296
39 if (!is_hpet_enabled()) 297 if (!hpet_virt_address)
40 return -ENODEV; 298 return -ENODEV;
41 299
42 /* calculate the hpet address: */
43 hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
44 hpet_ptr = hpet_base + HPET_COUNTER;
45
46 /* calculate the frequency: */
47 hpet_period = readl(hpet_base + HPET_PERIOD);
48
49 /* 300 /*
50 * hpet period is in femto seconds per cycle 301 * hpet period is in femto seconds per cycle
51 * so we need to convert this to ns/cyc units 302 * so we need to convert this to ns/cyc units
@@ -61,11 +312,218 @@ static int __init init_hpet_clocksource(void)
61 do_div(tmp, FSEC_PER_NSEC); 312 do_div(tmp, FSEC_PER_NSEC);
62 clocksource_hpet.mult = (u32)tmp; 313 clocksource_hpet.mult = (u32)tmp;
63 314
64 err = clocksource_register(&clocksource_hpet); 315 return clocksource_register(&clocksource_hpet);
65 if (err)
66 iounmap(hpet_base);
67
68 return err;
69} 316}
70 317
71module_init(init_hpet_clocksource); 318module_init(init_hpet_clocksource);
319
320#ifdef CONFIG_HPET_EMULATE_RTC
321
322/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
323 * is enabled, we support RTC interrupt functionality in software.
324 * RTC has 3 kinds of interrupts:
325 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
326 * is updated
327 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
328 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
329 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
330 * (1) and (2) above are implemented using polling at a frequency of
331 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
332 * overhead. (DEFAULT_RTC_INT_FREQ)
333 * For (3), we use interrupts at 64Hz or user specified periodic
334 * frequency, whichever is higher.
335 */
336#include <linux/mc146818rtc.h>
337#include <linux/rtc.h>
338
339#define DEFAULT_RTC_INT_FREQ 64
340#define DEFAULT_RTC_SHIFT 6
341#define RTC_NUM_INTS 1
342
343static unsigned long hpet_rtc_flags;
344static unsigned long hpet_prev_update_sec;
345static struct rtc_time hpet_alarm_time;
346static unsigned long hpet_pie_count;
347static unsigned long hpet_t1_cmp;
348static unsigned long hpet_default_delta;
349static unsigned long hpet_pie_delta;
350static unsigned long hpet_pie_limit;
351
352/*
353 * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
354 * is not supported by all HPET implementations for timer 1.
355 *
356 * hpet_rtc_timer_init() is called when the rtc is initialized.
357 */
358int hpet_rtc_timer_init(void)
359{
360 unsigned long cfg, cnt, delta, flags;
361
362 if (!is_hpet_enabled())
363 return 0;
364
365 if (!hpet_default_delta) {
366 uint64_t clc;
367
368 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
369 clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
370 hpet_default_delta = (unsigned long) clc;
371 }
372
373 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
374 delta = hpet_default_delta;
375 else
376 delta = hpet_pie_delta;
377
378 local_irq_save(flags);
379
380 cnt = delta + hpet_readl(HPET_COUNTER);
381 hpet_writel(cnt, HPET_T1_CMP);
382 hpet_t1_cmp = cnt;
383
384 cfg = hpet_readl(HPET_T1_CFG);
385 cfg &= ~HPET_TN_PERIODIC;
386 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
387 hpet_writel(cfg, HPET_T1_CFG);
388
389 local_irq_restore(flags);
390
391 return 1;
392}
393
394/*
395 * The functions below are called from rtc driver.
396 * Return 0 if HPET is not being used.
397 * Otherwise do the necessary changes and return 1.
398 */
399int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
400{
401 if (!is_hpet_enabled())
402 return 0;
403
404 hpet_rtc_flags &= ~bit_mask;
405 return 1;
406}
407
408int hpet_set_rtc_irq_bit(unsigned long bit_mask)
409{
410 unsigned long oldbits = hpet_rtc_flags;
411
412 if (!is_hpet_enabled())
413 return 0;
414
415 hpet_rtc_flags |= bit_mask;
416
417 if (!oldbits)
418 hpet_rtc_timer_init();
419
420 return 1;
421}
422
423int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
424 unsigned char sec)
425{
426 if (!is_hpet_enabled())
427 return 0;
428
429 hpet_alarm_time.tm_hour = hrs;
430 hpet_alarm_time.tm_min = min;
431 hpet_alarm_time.tm_sec = sec;
432
433 return 1;
434}
435
436int hpet_set_periodic_freq(unsigned long freq)
437{
438 uint64_t clc;
439
440 if (!is_hpet_enabled())
441 return 0;
442
443 if (freq <= DEFAULT_RTC_INT_FREQ)
444 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
445 else {
446 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
447 do_div(clc, freq);
448 clc >>= hpet_clockevent.shift;
449 hpet_pie_delta = (unsigned long) clc;
450 }
451 return 1;
452}
453
454int hpet_rtc_dropped_irq(void)
455{
456 return is_hpet_enabled();
457}
458
459static void hpet_rtc_timer_reinit(void)
460{
461 unsigned long cfg, delta;
462 int lost_ints = -1;
463
464 if (unlikely(!hpet_rtc_flags)) {
465 cfg = hpet_readl(HPET_T1_CFG);
466 cfg &= ~HPET_TN_ENABLE;
467 hpet_writel(cfg, HPET_T1_CFG);
468 return;
469 }
470
471 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
472 delta = hpet_default_delta;
473 else
474 delta = hpet_pie_delta;
475
476 /*
477 * Increment the comparator value until we are ahead of the
478 * current count.
479 */
480 do {
481 hpet_t1_cmp += delta;
482 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
483 lost_ints++;
484 } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
485
486 if (lost_ints) {
487 if (hpet_rtc_flags & RTC_PIE)
488 hpet_pie_count += lost_ints;
489 if (printk_ratelimit())
490 printk(KERN_WARNING "rtc: lost %d interrupts\n",
491 lost_ints);
492 }
493}
494
495irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
496{
497 struct rtc_time curr_time;
498 unsigned long rtc_int_flag = 0;
499
500 hpet_rtc_timer_reinit();
501
502 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
503 rtc_get_rtc_time(&curr_time);
504
505 if (hpet_rtc_flags & RTC_UIE &&
506 curr_time.tm_sec != hpet_prev_update_sec) {
507 rtc_int_flag = RTC_UF;
508 hpet_prev_update_sec = curr_time.tm_sec;
509 }
510
511 if (hpet_rtc_flags & RTC_PIE &&
512 ++hpet_pie_count >= hpet_pie_limit) {
513 rtc_int_flag |= RTC_PF;
514 hpet_pie_count = 0;
515 }
516
517 if (hpet_rtc_flags & RTC_PIE &&
518 (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
519 (curr_time.tm_min == hpet_alarm_time.tm_min) &&
520 (curr_time.tm_hour == hpet_alarm_time.tm_hour))
521 rtc_int_flag |= RTC_AF;
522
523 if (rtc_int_flag) {
524 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
525 rtc_interrupt(rtc_int_flag, dev_id);
526 }
527 return IRQ_HANDLED;
528}
529#endif
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index 9a0060b92e32..a6bc7bb38834 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -2,7 +2,7 @@
2 * i8253.c 8253/PIT functions 2 * i8253.c 8253/PIT functions
3 * 3 *
4 */ 4 */
5#include <linux/clocksource.h> 5#include <linux/clockchips.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/jiffies.h> 7#include <linux/jiffies.h>
8#include <linux/sysdev.h> 8#include <linux/sysdev.h>
@@ -19,17 +19,97 @@
19DEFINE_SPINLOCK(i8253_lock); 19DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock); 20EXPORT_SYMBOL(i8253_lock);
21 21
22void setup_pit_timer(void) 22/*
23 * HPET replaces the PIT, when enabled. So we need to know, which of
24 * the two timers is used
25 */
26struct clock_event_device *global_clock_event;
27
28/*
29 * Initialize the PIT timer.
30 *
31 * This is also called after resume to bring the PIT into operation again.
32 */
33static void init_pit_timer(enum clock_event_mode mode,
34 struct clock_event_device *evt)
35{
36 unsigned long flags;
37
38 spin_lock_irqsave(&i8253_lock, flags);
39
40 switch(mode) {
41 case CLOCK_EVT_MODE_PERIODIC:
42 /* binary, mode 2, LSB/MSB, ch 0 */
43 outb_p(0x34, PIT_MODE);
44 udelay(10);
45 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
46 udelay(10);
47 outb(LATCH >> 8 , PIT_CH0); /* MSB */
48 break;
49
50 case CLOCK_EVT_MODE_ONESHOT:
51 case CLOCK_EVT_MODE_SHUTDOWN:
52 case CLOCK_EVT_MODE_UNUSED:
53 /* One shot setup */
54 outb_p(0x38, PIT_MODE);
55 udelay(10);
56 break;
57 }
58 spin_unlock_irqrestore(&i8253_lock, flags);
59}
60
61/*
62 * Program the next event in oneshot mode
63 *
64 * Delta is given in PIT ticks
65 */
66static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
23{ 67{
24 unsigned long flags; 68 unsigned long flags;
25 69
26 spin_lock_irqsave(&i8253_lock, flags); 70 spin_lock_irqsave(&i8253_lock, flags);
27 outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ 71 outb_p(delta & 0xff , PIT_CH0); /* LSB */
28 udelay(10); 72 outb(delta >> 8 , PIT_CH0); /* MSB */
29 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
30 udelay(10);
31 outb(LATCH >> 8 , PIT_CH0); /* MSB */
32 spin_unlock_irqrestore(&i8253_lock, flags); 73 spin_unlock_irqrestore(&i8253_lock, flags);
74
75 return 0;
76}
77
78/*
79 * On UP the PIT can serve all of the possible timer functions. On SMP systems
80 * it can be solely used for the global tick.
81 *
82 * The profiling and update capabilites are switched off once the local apic is
83 * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
84 * !using_apic_timer decisions in do_timer_interrupt_hook()
85 */
86struct clock_event_device pit_clockevent = {
87 .name = "pit",
88 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
89 .set_mode = init_pit_timer,
90 .set_next_event = pit_next_event,
91 .shift = 32,
92 .irq = 0,
93};
94
95/*
96 * Initialize the conversion factor and the min/max deltas of the clock event
97 * structure and register the clock event source with the framework.
98 */
99void __init setup_pit_timer(void)
100{
101 /*
102 * Start pit with the boot cpu mask and make it global after the
103 * IO_APIC has been initialized.
104 */
105 pit_clockevent.cpumask = cpumask_of_cpu(0);
106 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
107 pit_clockevent.max_delta_ns =
108 clockevent_delta2ns(0x7FFF, &pit_clockevent);
109 pit_clockevent.min_delta_ns =
110 clockevent_delta2ns(0xF, &pit_clockevent);
111 clockevents_register_device(&pit_clockevent);
112 global_clock_event = &pit_clockevent;
33} 113}
34 114
35/* 115/*
@@ -46,7 +126,7 @@ static cycle_t pit_read(void)
46 static u32 old_jifs; 126 static u32 old_jifs;
47 127
48 spin_lock_irqsave(&i8253_lock, flags); 128 spin_lock_irqsave(&i8253_lock, flags);
49 /* 129 /*
50 * Although our caller may have the read side of xtime_lock, 130 * Although our caller may have the read side of xtime_lock,
51 * this is now a seqlock, and we are cheating in this routine 131 * this is now a seqlock, and we are cheating in this routine
52 * by having side effects on state that we cannot undo if 132 * by having side effects on state that we cannot undo if
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index c8d45821c788..255b1af9a054 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -410,12 +410,6 @@ void __init native_init_IRQ(void)
410 intr_init_hook(); 410 intr_init_hook();
411 411
412 /* 412 /*
413 * Set the clock to HZ Hz, we already have a valid
414 * vector now:
415 */
416 setup_pit_timer();
417
418 /*
419 * External FPU? Set up irq13 if so, for 413 * External FPU? Set up irq13 if so, for
420 * original braindamaged IBM FERR coupling. 414 * original braindamaged IBM FERR coupling.
421 */ 415 */
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 6cdd941fc2f2..48bfcaa13ecc 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -287,9 +287,7 @@ static void __cpuinit smp_callin(void)
287 /* 287 /*
288 * Save our processor parameters 288 * Save our processor parameters
289 */ 289 */
290 smp_store_cpu_info(cpuid); 290 smp_store_cpu_info(cpuid);
291
292 disable_APIC_timer();
293 291
294 /* 292 /*
295 * Allow the master to continue. 293 * Allow the master to continue.
@@ -408,7 +406,6 @@ static void __cpuinit start_secondary(void *unused)
408 enable_NMI_through_LVT0(NULL); 406 enable_NMI_through_LVT0(NULL);
409 enable_8259A_irq(0); 407 enable_8259A_irq(0);
410 } 408 }
411 enable_APIC_timer();
412 /* 409 /*
413 * low-memory mappings have been cleared, flush them from 410 * low-memory mappings have been cleared, flush them from
414 * the local TLBs too. 411 * the local TLBs too.
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 044c17572eef..a5350059557a 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -159,15 +159,6 @@ EXPORT_SYMBOL(profile_pc);
159 */ 159 */
160irqreturn_t timer_interrupt(int irq, void *dev_id) 160irqreturn_t timer_interrupt(int irq, void *dev_id)
161{ 161{
162 /*
163 * Here we are in the timer irq handler. We just have irqs locally
164 * disabled but we don't know if the timer_bh is running on the other
165 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
166 * the irq version of write_lock because as just said we have irq
167 * locally disabled. -arca
168 */
169 write_seqlock(&xtime_lock);
170
171#ifdef CONFIG_X86_IO_APIC 162#ifdef CONFIG_X86_IO_APIC
172 if (timer_ack) { 163 if (timer_ack) {
173 /* 164 /*
@@ -186,7 +177,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
186 177
187 do_timer_interrupt_hook(); 178 do_timer_interrupt_hook();
188 179
189
190 if (MCA_bus) { 180 if (MCA_bus) {
191 /* The PS/2 uses level-triggered interrupts. You can't 181 /* The PS/2 uses level-triggered interrupts. You can't
192 turn them off, nor would you want to (any attempt to 182 turn them off, nor would you want to (any attempt to
@@ -201,13 +191,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
201 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ 191 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */
202 } 192 }
203 193
204 write_sequnlock(&xtime_lock);
205
206#ifdef CONFIG_X86_LOCAL_APIC
207 if (using_apic_timer)
208 smp_send_timer_broadcast_ipi();
209#endif
210
211 return IRQ_HANDLED; 194 return IRQ_HANDLED;
212} 195}
213 196
@@ -277,63 +260,16 @@ void notify_arch_cmos_timer(void)
277 mod_timer(&sync_cmos_timer, jiffies + 1); 260 mod_timer(&sync_cmos_timer, jiffies + 1);
278} 261}
279 262
280static int timer_resume(struct sys_device *dev)
281{
282#ifdef CONFIG_HPET_TIMER
283 if (is_hpet_enabled())
284 hpet_reenable();
285#endif
286 setup_pit_timer();
287 touch_softlockup_watchdog();
288 return 0;
289}
290
291static struct sysdev_class timer_sysclass = {
292 .resume = timer_resume,
293 set_kset_name("timer"),
294};
295
296
297/* XXX this driverfs stuff should probably go elsewhere later -john */
298static struct sys_device device_timer = {
299 .id = 0,
300 .cls = &timer_sysclass,
301};
302
303static int time_init_device(void)
304{
305 int error = sysdev_class_register(&timer_sysclass);
306 if (!error)
307 error = sysdev_register(&device_timer);
308 return error;
309}
310
311device_initcall(time_init_device);
312
313#ifdef CONFIG_HPET_TIMER
314extern void (*late_time_init)(void); 263extern void (*late_time_init)(void);
315/* Duplicate of time_init() below, with hpet_enable part added */ 264/* Duplicate of time_init() below, with hpet_enable part added */
316static void __init hpet_time_init(void) 265static void __init hpet_time_init(void)
317{ 266{
318 if ((hpet_enable() >= 0) && hpet_use_timer) { 267 if (!hpet_enable())
319 printk("Using HPET for base-timer\n"); 268 setup_pit_timer();
320 }
321
322 do_time_init(); 269 do_time_init();
323} 270}
324#endif
325 271
326void __init time_init(void) 272void __init time_init(void)
327{ 273{
328#ifdef CONFIG_HPET_TIMER 274 late_time_init = hpet_time_init;
329 if (is_hpet_capable()) {
330 /*
331 * HPET initialization needs to do memory-mapped io. So, let
332 * us do a late initialization after mem_init().
333 */
334 late_time_init = hpet_time_init;
335 return;
336 }
337#endif
338 do_time_init();
339} 275}
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
deleted file mode 100644
index 1e4702dfcd01..000000000000
--- a/arch/i386/kernel/time_hpet.c
+++ /dev/null
@@ -1,497 +0,0 @@
1/*
2 * linux/arch/i386/kernel/time_hpet.c
3 * This code largely copied from arch/x86_64/kernel/time.c
4 * See that file for credits.
5 *
6 * 2003-06-30 Venkatesh Pallipadi - Additional changes for HPET support
7 */
8
9#include <linux/errno.h>
10#include <linux/kernel.h>
11#include <linux/param.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <linux/smp.h>
15
16#include <asm/timer.h>
17#include <asm/fixmap.h>
18#include <asm/apic.h>
19
20#include <linux/timex.h>
21
22#include <asm/hpet.h>
23#include <linux/hpet.h>
24
25static unsigned long hpet_period; /* fsecs / HPET clock */
26unsigned long hpet_tick; /* hpet clks count per tick */
27unsigned long hpet_address; /* hpet memory map physical address */
28int hpet_use_timer;
29
30static int use_hpet; /* can be used for runtime check of hpet */
31static int boot_hpet_disable; /* boottime override for HPET timer */
32static void __iomem * hpet_virt_address; /* hpet kernel virtual address */
33
34#define FSEC_TO_USEC (1000000000UL)
35
36int hpet_readl(unsigned long a)
37{
38 return readl(hpet_virt_address + a);
39}
40
41static void hpet_writel(unsigned long d, unsigned long a)
42{
43 writel(d, hpet_virt_address + a);
44}
45
46#ifdef CONFIG_X86_LOCAL_APIC
47/*
48 * HPET counters dont wrap around on every tick. They just change the
49 * comparator value and continue. Next tick can be caught by checking
50 * for a change in the comparator value. Used in apic.c.
51 */
52static void __devinit wait_hpet_tick(void)
53{
54 unsigned int start_cmp_val, end_cmp_val;
55
56 start_cmp_val = hpet_readl(HPET_T0_CMP);
57 do {
58 end_cmp_val = hpet_readl(HPET_T0_CMP);
59 } while (start_cmp_val == end_cmp_val);
60}
61#endif
62
63static int hpet_timer_stop_set_go(unsigned long tick)
64{
65 unsigned int cfg;
66
67 /*
68 * Stop the timers and reset the main counter.
69 */
70 cfg = hpet_readl(HPET_CFG);
71 cfg &= ~HPET_CFG_ENABLE;
72 hpet_writel(cfg, HPET_CFG);
73 hpet_writel(0, HPET_COUNTER);
74 hpet_writel(0, HPET_COUNTER + 4);
75
76 if (hpet_use_timer) {
77 /*
78 * Set up timer 0, as periodic with first interrupt to happen at
79 * hpet_tick, and period also hpet_tick.
80 */
81 cfg = hpet_readl(HPET_T0_CFG);
82 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
83 HPET_TN_SETVAL | HPET_TN_32BIT;
84 hpet_writel(cfg, HPET_T0_CFG);
85
86 /*
87 * The first write after writing TN_SETVAL to the config register sets
88 * the counter value, the second write sets the threshold.
89 */
90 hpet_writel(tick, HPET_T0_CMP);
91 hpet_writel(tick, HPET_T0_CMP);
92 }
93 /*
94 * Go!
95 */
96 cfg = hpet_readl(HPET_CFG);
97 if (hpet_use_timer)
98 cfg |= HPET_CFG_LEGACY;
99 cfg |= HPET_CFG_ENABLE;
100 hpet_writel(cfg, HPET_CFG);
101
102 return 0;
103}
104
105/*
106 * Check whether HPET was found by ACPI boot parse. If yes setup HPET
107 * counter 0 for kernel base timer.
108 */
109int __init hpet_enable(void)
110{
111 unsigned int id;
112 unsigned long tick_fsec_low, tick_fsec_high; /* tick in femto sec */
113 unsigned long hpet_tick_rem;
114
115 if (boot_hpet_disable)
116 return -1;
117
118 if (!hpet_address) {
119 return -1;
120 }
121 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
122 /*
123 * Read the period, compute tick and quotient.
124 */
125 id = hpet_readl(HPET_ID);
126
127 /*
128 * We are checking for value '1' or more in number field if
129 * CONFIG_HPET_EMULATE_RTC is set because we will need an
130 * additional timer for RTC emulation.
131 * However, we can do with one timer otherwise using the
132 * the single HPET timer for system time.
133 */
134#ifdef CONFIG_HPET_EMULATE_RTC
135 if (!(id & HPET_ID_NUMBER)) {
136 iounmap(hpet_virt_address);
137 hpet_virt_address = NULL;
138 return -1;
139 }
140#endif
141
142
143 hpet_period = hpet_readl(HPET_PERIOD);
144 if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) {
145 iounmap(hpet_virt_address);
146 hpet_virt_address = NULL;
147 return -1;
148 }
149
150 /*
151 * 64 bit math
152 * First changing tick into fsec
153 * Then 64 bit div to find number of hpet clk per tick
154 */
155 ASM_MUL64_REG(tick_fsec_low, tick_fsec_high,
156 KERNEL_TICK_USEC, FSEC_TO_USEC);
157 ASM_DIV64_REG(hpet_tick, hpet_tick_rem,
158 hpet_period, tick_fsec_low, tick_fsec_high);
159
160 if (hpet_tick_rem > (hpet_period >> 1))
161 hpet_tick++; /* rounding the result */
162
163 hpet_use_timer = id & HPET_ID_LEGSUP;
164
165 if (hpet_timer_stop_set_go(hpet_tick)) {
166 iounmap(hpet_virt_address);
167 hpet_virt_address = NULL;
168 return -1;
169 }
170
171 use_hpet = 1;
172
173#ifdef CONFIG_HPET
174 {
175 struct hpet_data hd;
176 unsigned int ntimer;
177
178 memset(&hd, 0, sizeof (hd));
179
180 ntimer = hpet_readl(HPET_ID);
181 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
182 ntimer++;
183
184 /*
185 * Register with driver.
186 * Timer0 and Timer1 is used by platform.
187 */
188 hd.hd_phys_address = hpet_address;
189 hd.hd_address = hpet_virt_address;
190 hd.hd_nirqs = ntimer;
191 hd.hd_flags = HPET_DATA_PLATFORM;
192 hpet_reserve_timer(&hd, 0);
193#ifdef CONFIG_HPET_EMULATE_RTC
194 hpet_reserve_timer(&hd, 1);
195#endif
196 hd.hd_irq[0] = HPET_LEGACY_8254;
197 hd.hd_irq[1] = HPET_LEGACY_RTC;
198 if (ntimer > 2) {
199 struct hpet __iomem *hpet;
200 struct hpet_timer __iomem *timer;
201 int i;
202
203 hpet = hpet_virt_address;
204
205 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
206 timer++, i++)
207 hd.hd_irq[i] = (timer->hpet_config &
208 Tn_INT_ROUTE_CNF_MASK) >>
209 Tn_INT_ROUTE_CNF_SHIFT;
210
211 }
212
213 hpet_alloc(&hd);
214 }
215#endif
216
217#ifdef CONFIG_X86_LOCAL_APIC
218 if (hpet_use_timer)
219 wait_timer_tick = wait_hpet_tick;
220#endif
221 return 0;
222}
223
224int hpet_reenable(void)
225{
226 return hpet_timer_stop_set_go(hpet_tick);
227}
228
229int is_hpet_enabled(void)
230{
231 return use_hpet;
232}
233
234int is_hpet_capable(void)
235{
236 if (!boot_hpet_disable && hpet_address)
237 return 1;
238 return 0;
239}
240
241static int __init hpet_setup(char* str)
242{
243 if (str) {
244 if (!strncmp("disable", str, 7))
245 boot_hpet_disable = 1;
246 }
247 return 1;
248}
249
250__setup("hpet=", hpet_setup);
251
252#ifdef CONFIG_HPET_EMULATE_RTC
253/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
254 * is enabled, we support RTC interrupt functionality in software.
255 * RTC has 3 kinds of interrupts:
256 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
257 * is updated
258 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
259 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
260 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
261 * (1) and (2) above are implemented using polling at a frequency of
262 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
263 * overhead. (DEFAULT_RTC_INT_FREQ)
264 * For (3), we use interrupts at 64Hz or user specified periodic
265 * frequency, whichever is higher.
266 */
267#include <linux/mc146818rtc.h>
268#include <linux/rtc.h>
269
270#define DEFAULT_RTC_INT_FREQ 64
271#define RTC_NUM_INTS 1
272
273static unsigned long UIE_on;
274static unsigned long prev_update_sec;
275
276static unsigned long AIE_on;
277static struct rtc_time alarm_time;
278
279static unsigned long PIE_on;
280static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
281static unsigned long PIE_count;
282
283static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
284static unsigned int hpet_t1_cmp; /* cached comparator register */
285
286/*
287 * Timer 1 for RTC, we do not use periodic interrupt feature,
288 * even if HPET supports periodic interrupts on Timer 1.
289 * The reason being, to set up a periodic interrupt in HPET, we need to
290 * stop the main counter. And if we do that everytime someone diables/enables
291 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
292 * So, for the time being, simulate the periodic interrupt in software.
293 *
294 * hpet_rtc_timer_init() is called for the first time and during subsequent
295 * interuppts reinit happens through hpet_rtc_timer_reinit().
296 */
297int hpet_rtc_timer_init(void)
298{
299 unsigned int cfg, cnt;
300 unsigned long flags;
301
302 if (!is_hpet_enabled())
303 return 0;
304 /*
305 * Set the counter 1 and enable the interrupts.
306 */
307 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
308 hpet_rtc_int_freq = PIE_freq;
309 else
310 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
311
312 local_irq_save(flags);
313
314 cnt = hpet_readl(HPET_COUNTER);
315 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
316 hpet_writel(cnt, HPET_T1_CMP);
317 hpet_t1_cmp = cnt;
318
319 cfg = hpet_readl(HPET_T1_CFG);
320 cfg &= ~HPET_TN_PERIODIC;
321 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
322 hpet_writel(cfg, HPET_T1_CFG);
323
324 local_irq_restore(flags);
325
326 return 1;
327}
328
329static void hpet_rtc_timer_reinit(void)
330{
331 unsigned int cfg, cnt, ticks_per_int, lost_ints;
332
333 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
334 cfg = hpet_readl(HPET_T1_CFG);
335 cfg &= ~HPET_TN_ENABLE;
336 hpet_writel(cfg, HPET_T1_CFG);
337 return;
338 }
339
340 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
341 hpet_rtc_int_freq = PIE_freq;
342 else
343 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
344
345 /* It is more accurate to use the comparator value than current count.*/
346 ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq;
347 hpet_t1_cmp += ticks_per_int;
348 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
349
350 /*
351 * If the interrupt handler was delayed too long, the write above tries
352 * to schedule the next interrupt in the past and the hardware would
353 * not interrupt until the counter had wrapped around.
354 * So we have to check that the comparator wasn't set to a past time.
355 */
356 cnt = hpet_readl(HPET_COUNTER);
357 if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) {
358 lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1;
359 /* Make sure that, even with the time needed to execute
360 * this code, the next scheduled interrupt has been moved
361 * back to the future: */
362 lost_ints++;
363
364 hpet_t1_cmp += lost_ints * ticks_per_int;
365 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
366
367 if (PIE_on)
368 PIE_count += lost_ints;
369
370 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
371 hpet_rtc_int_freq);
372 }
373}
374
375/*
376 * The functions below are called from rtc driver.
377 * Return 0 if HPET is not being used.
378 * Otherwise do the necessary changes and return 1.
379 */
380int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
381{
382 if (!is_hpet_enabled())
383 return 0;
384
385 if (bit_mask & RTC_UIE)
386 UIE_on = 0;
387 if (bit_mask & RTC_PIE)
388 PIE_on = 0;
389 if (bit_mask & RTC_AIE)
390 AIE_on = 0;
391
392 return 1;
393}
394
395int hpet_set_rtc_irq_bit(unsigned long bit_mask)
396{
397 int timer_init_reqd = 0;
398
399 if (!is_hpet_enabled())
400 return 0;
401
402 if (!(PIE_on | AIE_on | UIE_on))
403 timer_init_reqd = 1;
404
405 if (bit_mask & RTC_UIE) {
406 UIE_on = 1;
407 }
408 if (bit_mask & RTC_PIE) {
409 PIE_on = 1;
410 PIE_count = 0;
411 }
412 if (bit_mask & RTC_AIE) {
413 AIE_on = 1;
414 }
415
416 if (timer_init_reqd)
417 hpet_rtc_timer_init();
418
419 return 1;
420}
421
422int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
423{
424 if (!is_hpet_enabled())
425 return 0;
426
427 alarm_time.tm_hour = hrs;
428 alarm_time.tm_min = min;
429 alarm_time.tm_sec = sec;
430
431 return 1;
432}
433
434int hpet_set_periodic_freq(unsigned long freq)
435{
436 if (!is_hpet_enabled())
437 return 0;
438
439 PIE_freq = freq;
440 PIE_count = 0;
441
442 return 1;
443}
444
445int hpet_rtc_dropped_irq(void)
446{
447 if (!is_hpet_enabled())
448 return 0;
449
450 return 1;
451}
452
453irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
454{
455 struct rtc_time curr_time;
456 unsigned long rtc_int_flag = 0;
457 int call_rtc_interrupt = 0;
458
459 hpet_rtc_timer_reinit();
460
461 if (UIE_on | AIE_on) {
462 rtc_get_rtc_time(&curr_time);
463 }
464 if (UIE_on) {
465 if (curr_time.tm_sec != prev_update_sec) {
466 /* Set update int info, call real rtc int routine */
467 call_rtc_interrupt = 1;
468 rtc_int_flag = RTC_UF;
469 prev_update_sec = curr_time.tm_sec;
470 }
471 }
472 if (PIE_on) {
473 PIE_count++;
474 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
475 /* Set periodic int info, call real rtc int routine */
476 call_rtc_interrupt = 1;
477 rtc_int_flag |= RTC_PF;
478 PIE_count = 0;
479 }
480 }
481 if (AIE_on) {
482 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
483 (curr_time.tm_min == alarm_time.tm_min) &&
484 (curr_time.tm_hour == alarm_time.tm_hour)) {
485 /* Set alarm int info, call real rtc int routine */
486 call_rtc_interrupt = 1;
487 rtc_int_flag |= RTC_AF;
488 }
489 }
490 if (call_rtc_interrupt) {
491 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
492 rtc_interrupt(rtc_int_flag, dev_id);
493 }
494 return IRQ_HANDLED;
495}
496#endif
497
diff --git a/arch/i386/mach-default/setup.c b/arch/i386/mach-default/setup.c
index cc2f519b2f7f..c78816210706 100644
--- a/arch/i386/mach-default/setup.c
+++ b/arch/i386/mach-default/setup.c
@@ -79,7 +79,12 @@ void __init trap_init_hook(void)
79{ 79{
80} 80}
81 81
82static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL}; 82static struct irqaction irq0 = {
83 .handler = timer_interrupt,
84 .flags = IRQF_DISABLED | IRQF_NOBALANCING,
85 .mask = CPU_MASK_NONE,
86 .name = "timer"
87};
83 88
84/** 89/**
85 * time_init_hook - do any specific initialisations for the system timer. 90 * time_init_hook - do any specific initialisations for the system timer.
@@ -90,6 +95,7 @@ static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE,
90 **/ 95 **/
91void __init time_init_hook(void) 96void __init time_init_hook(void)
92{ 97{
98 irq0.mask = cpumask_of_cpu(0);
93 setup_irq(0, &irq0); 99 setup_irq(0, &irq0);
94} 100}
95 101
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 4ea6d8b20d17..8206fc1ecc58 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -39,6 +39,7 @@
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/sched.h> /* need_resched() */ 40#include <linux/sched.h> /* need_resched() */
41#include <linux/latency.h> 41#include <linux/latency.h>
42#include <linux/clockchips.h>
42 43
43/* 44/*
44 * Include the apic definitions for x86 to have the APIC timer related defines 45 * Include the apic definitions for x86 to have the APIC timer related defines
@@ -274,12 +275,40 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
274 275
275static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 276static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
276{ 277{
278#ifdef CONFIG_GENERIC_CLOCKEVENTS
279 unsigned long reason;
280
281 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
282 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
283
284 clockevents_notify(reason, &pr->id);
285#else
277 cpumask_t mask = cpumask_of_cpu(pr->id); 286 cpumask_t mask = cpumask_of_cpu(pr->id);
278 287
279 if (pr->power.timer_broadcast_on_state < INT_MAX) 288 if (pr->power.timer_broadcast_on_state < INT_MAX)
280 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); 289 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
281 else 290 else
282 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); 291 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
292#endif
293}
294
295/* Power(C) State timer broadcast control */
296static void acpi_state_timer_broadcast(struct acpi_processor *pr,
297 struct acpi_processor_cx *cx,
298 int broadcast)
299{
300#ifdef CONFIG_GENERIC_CLOCKEVENTS
301
302 int state = cx - pr->power.states;
303
304 if (state >= pr->power.timer_broadcast_on_state) {
305 unsigned long reason;
306
307 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
308 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
309 clockevents_notify(reason, &pr->id);
310 }
311#endif
283} 312}
284 313
285#else 314#else
@@ -287,6 +316,11 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
287static void acpi_timer_check_state(int state, struct acpi_processor *pr, 316static void acpi_timer_check_state(int state, struct acpi_processor *pr,
288 struct acpi_processor_cx *cstate) { } 317 struct acpi_processor_cx *cstate) { }
289static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 318static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
319static void acpi_state_timer_broadcast(struct acpi_processor *pr,
320 struct acpi_processor_cx *cx,
321 int broadcast)
322{
323}
290 324
291#endif 325#endif
292 326
@@ -434,6 +468,7 @@ static void acpi_processor_idle(void)
434 /* Get start time (ticks) */ 468 /* Get start time (ticks) */
435 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 469 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
436 /* Invoke C2 */ 470 /* Invoke C2 */
471 acpi_state_timer_broadcast(pr, cx, 1);
437 acpi_cstate_enter(cx); 472 acpi_cstate_enter(cx);
438 /* Get end time (ticks) */ 473 /* Get end time (ticks) */
439 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 474 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -448,6 +483,7 @@ static void acpi_processor_idle(void)
448 /* Compute time (ticks) that we were actually asleep */ 483 /* Compute time (ticks) that we were actually asleep */
449 sleep_ticks = 484 sleep_ticks =
450 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; 485 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
486 acpi_state_timer_broadcast(pr, cx, 0);
451 break; 487 break;
452 488
453 case ACPI_STATE_C3: 489 case ACPI_STATE_C3:
@@ -469,6 +505,7 @@ static void acpi_processor_idle(void)
469 /* Get start time (ticks) */ 505 /* Get start time (ticks) */
470 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 506 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
471 /* Invoke C3 */ 507 /* Invoke C3 */
508 acpi_state_timer_broadcast(pr, cx, 1);
472 acpi_cstate_enter(cx); 509 acpi_cstate_enter(cx);
473 /* Get end time (ticks) */ 510 /* Get end time (ticks) */
474 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 511 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -488,6 +525,7 @@ static void acpi_processor_idle(void)
488 /* Compute time (ticks) that we were actually asleep */ 525 /* Compute time (ticks) that we were actually asleep */
489 sleep_ticks = 526 sleep_ticks =
490 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; 527 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
528 acpi_state_timer_broadcast(pr, cx, 0);
491 break; 529 break;
492 530
493 default: 531 default:
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index ccf64fa7b935..a595fe054272 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -113,14 +113,9 @@ extern void smp_local_timer_interrupt (void);
113extern void setup_boot_APIC_clock (void); 113extern void setup_boot_APIC_clock (void);
114extern void setup_secondary_APIC_clock (void); 114extern void setup_secondary_APIC_clock (void);
115extern int APIC_init_uniprocessor (void); 115extern int APIC_init_uniprocessor (void);
116extern void disable_APIC_timer(void);
117extern void enable_APIC_timer(void);
118 116
119extern void enable_NMI_through_LVT0 (void * dummy); 117extern void enable_NMI_through_LVT0 (void * dummy);
120 118
121void smp_send_timer_broadcast_ipi(void);
122void switch_APIC_timer_to_ipi(void *cpumask);
123void switch_ipi_to_APIC_timer(void *cpumask);
124#define ARCH_APICTIMER_STOPS_ON_C3 1 119#define ARCH_APICTIMER_STOPS_ON_C3 1
125 120
126extern int timer_over_8254; 121extern int timer_over_8254;
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index e47be9a56cc2..fc03cf9de5c4 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -90,16 +90,19 @@
90#define HPET_MIN_PERIOD (100000UL) 90#define HPET_MIN_PERIOD (100000UL)
91#define HPET_TICK_RATE (HZ * 100000UL) 91#define HPET_TICK_RATE (HZ * 100000UL)
92 92
93extern unsigned long hpet_tick; /* hpet clks count per tick */
94extern unsigned long hpet_address; /* hpet memory map physical address */ 93extern unsigned long hpet_address; /* hpet memory map physical address */
95extern int hpet_use_timer; 94extern int is_hpet_enabled(void);
96 95
96#ifdef CONFIG_X86_64
97extern unsigned long hpet_tick; /* hpet clks count per tick */
98extern int hpet_use_timer;
97extern int hpet_rtc_timer_init(void); 99extern int hpet_rtc_timer_init(void);
98extern int hpet_enable(void); 100extern int hpet_enable(void);
99extern int hpet_reenable(void);
100extern int is_hpet_enabled(void);
101extern int is_hpet_capable(void); 101extern int is_hpet_capable(void);
102extern int hpet_readl(unsigned long a); 102extern int hpet_readl(unsigned long a);
103#else
104extern int hpet_enable(void);
105#endif
103 106
104#ifdef CONFIG_HPET_EMULATE_RTC 107#ifdef CONFIG_HPET_EMULATE_RTC
105extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); 108extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
@@ -110,5 +113,10 @@ extern int hpet_rtc_dropped_irq(void);
110extern int hpet_rtc_timer_init(void); 113extern int hpet_rtc_timer_init(void);
111extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); 114extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
112#endif /* CONFIG_HPET_EMULATE_RTC */ 115#endif /* CONFIG_HPET_EMULATE_RTC */
116
117#else
118
119static inline int hpet_enable(void) { return 0; }
120
113#endif /* CONFIG_HPET_TIMER */ 121#endif /* CONFIG_HPET_TIMER */
114#endif /* _I386_HPET_H */ 122#endif /* _I386_HPET_H */
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 015d8df07690..6cb0dd4dcdde 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -1,6 +1,21 @@
1#ifndef __ASM_I8253_H__ 1#ifndef __ASM_I8253_H__
2#define __ASM_I8253_H__ 2#define __ASM_I8253_H__
3 3
4#include <linux/clockchips.h>
5
4extern spinlock_t i8253_lock; 6extern spinlock_t i8253_lock;
5 7
8extern struct clock_event_device *global_clock_event;
9
10/**
11 * pit_interrupt_hook - hook into timer tick
12 * @regs: standard registers from interrupt
13 *
14 * Call the global clock event handler.
15 **/
16static inline void pit_interrupt_hook(void)
17{
18 global_clock_event->event_handler(global_clock_event);
19}
20
6#endif /* __ASM_I8253_H__ */ 21#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 7d606e3364ae..56e5689863ae 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -1,86 +1,16 @@
1/* defines for inline arch setup functions */ 1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
2 3
3#include <asm/apic.h>
4#include <asm/i8259.h> 4#include <asm/i8259.h>
5#include <asm/i8253.h>
5 6
6/** 7/**
7 * do_timer_interrupt_hook - hook into timer tick 8 * do_timer_interrupt_hook - hook into timer tick
8 * @regs: standard registers from interrupt
9 * 9 *
10 * Description: 10 * Call the pit clock event handler. see asm/i8253.h
11 * This hook is called immediately after the timer interrupt is ack'd.
12 * It's primary purpose is to allow architectures that don't possess
13 * individual per CPU clocks (like the CPU APICs supply) to broadcast the
14 * timer interrupt as a means of triggering reschedules etc.
15 **/ 11 **/
16 12
17static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
18{ 14{
19 do_timer(1); 15 pit_interrupt_hook();
20#ifndef CONFIG_SMP
21 update_process_times(user_mode_vm(get_irq_regs()));
22#endif
23/*
24 * In the SMP case we use the local APIC timer interrupt to do the
25 * profiling, except when we simulate SMP mode on a uniprocessor
26 * system, in that case we have to call the local interrupt handler.
27 */
28#ifndef CONFIG_X86_LOCAL_APIC
29 profile_tick(CPU_PROFILING);
30#else
31 if (!using_apic_timer)
32 smp_local_timer_interrupt();
33#endif
34}
35
36
37/* you can safely undefine this if you don't have the Neptune chipset */
38
39#define BUGGY_NEPTUN_TIMER
40
41/**
42 * do_timer_overflow - process a detected timer overflow condition
43 * @count: hardware timer interrupt count on overflow
44 *
45 * Description:
46 * This call is invoked when the jiffies count has not incremented but
47 * the hardware timer interrupt has. It means that a timer tick interrupt
48 * came along while the previous one was pending, thus a tick was missed
49 **/
50static inline int do_timer_overflow(int count)
51{
52 int i;
53
54 spin_lock(&i8259A_lock);
55 /*
56 * This is tricky when I/O APICs are used;
57 * see do_timer_interrupt().
58 */
59 i = inb(0x20);
60 spin_unlock(&i8259A_lock);
61
62 /* assumption about timer being IRQ0 */
63 if (i & 0x01) {
64 /*
65 * We cannot detect lost timer interrupts ...
66 * well, that's why we call them lost, don't we? :)
67 * [hmm, on the Pentium and Alpha we can ... sort of]
68 */
69 count -= LATCH;
70 } else {
71#ifdef BUGGY_NEPTUN_TIMER
72 /*
73 * for the Neptun bug we know that the 'latch'
74 * command doesn't latch the high and low value
75 * of the counter atomically. Thus we have to
76 * substract 256 from the counter
77 * ... funny, isnt it? :)
78 */
79
80 count -= 256;
81#else
82 printk("do_slow_gettimeoffset(): hardware timer problem?\n");
83#endif
84 }
85 return count;
86} 16}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 04e69c104a74..60f9dcc15d54 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -1,25 +1,18 @@
1/* defines for inline arch setup functions */ 1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
2#include <asm/voyager.h> 4#include <asm/voyager.h>
5#include <asm/i8253.h>
3 6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 * @regs: standard registers from interrupt
10 *
11 * Call the pit clock event handler. see asm/i8253.h
12 **/
4static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
5{ 14{
6 do_timer(1); 15 pit_interrupt_hook();
7#ifndef CONFIG_SMP
8 update_process_times(user_mode_vm(irq_regs));
9#endif
10
11 voyager_timer_interrupt(); 16 voyager_timer_interrupt();
12} 17}
13 18
14static inline int do_timer_overflow(int count)
15{
16 /* can't read the ISR, just assume 1 tick
17 overflow */
18 if(count > LATCH || count < 0) {
19 printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH);
20 count = LATCH;
21 }
22 count -= LATCH;
23
24 return count;
25}
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 770bf6da8c3d..f21349399d14 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -23,7 +23,6 @@ extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
23extern int mpc_default_type; 23extern int mpc_default_type;
24extern unsigned long mp_lapic_addr; 24extern unsigned long mp_lapic_addr;
25extern int pic_mode; 25extern int pic_mode;
26extern int using_apic_timer;
27 26
28#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
29extern void mp_register_lapic (u8 id, u8 enabled); 28extern void mp_register_lapic (u8 id, u8 enabled);