diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/kernel/hpet.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/kernel/hpet.c')
-rw-r--r-- | arch/x86/kernel/hpet.c | 147 |
1 files changed, 50 insertions, 97 deletions
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7494999141b3..6781765b3a0d 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #define HPET_DEV_FSB_CAP 0x1000 | 27 | #define HPET_DEV_FSB_CAP 0x1000 |
28 | #define HPET_DEV_PERI_CAP 0x2000 | 28 | #define HPET_DEV_PERI_CAP 0x2000 |
29 | 29 | ||
30 | #define HPET_MIN_CYCLES 128 | ||
31 | #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) | ||
32 | |||
30 | #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) | 33 | #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) |
31 | 34 | ||
32 | /* | 35 | /* |
@@ -214,7 +217,7 @@ static void hpet_reserve_platform_timers(unsigned int id) { } | |||
214 | /* | 217 | /* |
215 | * Common hpet info | 218 | * Common hpet info |
216 | */ | 219 | */ |
217 | static unsigned long hpet_period; | 220 | static unsigned long hpet_freq; |
218 | 221 | ||
219 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | 222 | static void hpet_legacy_set_mode(enum clock_event_mode mode, |
220 | struct clock_event_device *evt); | 223 | struct clock_event_device *evt); |
@@ -229,7 +232,6 @@ static struct clock_event_device hpet_clockevent = { | |||
229 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 232 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
230 | .set_mode = hpet_legacy_set_mode, | 233 | .set_mode = hpet_legacy_set_mode, |
231 | .set_next_event = hpet_legacy_next_event, | 234 | .set_next_event = hpet_legacy_next_event, |
232 | .shift = 32, | ||
233 | .irq = 0, | 235 | .irq = 0, |
234 | .rating = 50, | 236 | .rating = 50, |
235 | }; | 237 | }; |
@@ -287,27 +289,12 @@ static void hpet_legacy_clockevent_register(void) | |||
287 | hpet_enable_legacy_int(); | 289 | hpet_enable_legacy_int(); |
288 | 290 | ||
289 | /* | 291 | /* |
290 | * The mult factor is defined as (include/linux/clockchips.h) | ||
291 | * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) | ||
292 | * hpet_period is in units of femtoseconds (per cycle), so | ||
293 | * mult/2^shift = cyc/ns = 10^6/hpet_period | ||
294 | * mult = (10^6 * 2^shift)/hpet_period | ||
295 | * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period | ||
296 | */ | ||
297 | hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, | ||
298 | hpet_period, hpet_clockevent.shift); | ||
299 | /* Calculate the min / max delta */ | ||
300 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | ||
301 | &hpet_clockevent); | ||
302 | /* 5 usec minimum reprogramming delta. */ | ||
303 | hpet_clockevent.min_delta_ns = 5000; | ||
304 | |||
305 | /* | ||
306 | * Start hpet with the boot cpu mask and make it | 292 | * Start hpet with the boot cpu mask and make it |
307 | * global after the IO_APIC has been initialized. | 293 | * global after the IO_APIC has been initialized. |
308 | */ | 294 | */ |
309 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); | 295 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); |
310 | clockevents_register_device(&hpet_clockevent); | 296 | clockevents_config_and_register(&hpet_clockevent, hpet_freq, |
297 | HPET_MIN_PROG_DELTA, 0x7FFFFFFF); | ||
311 | global_clock_event = &hpet_clockevent; | 298 | global_clock_event = &hpet_clockevent; |
312 | printk(KERN_DEBUG "hpet clockevent registered\n"); | 299 | printk(KERN_DEBUG "hpet clockevent registered\n"); |
313 | } | 300 | } |
@@ -380,44 +367,37 @@ static int hpet_next_event(unsigned long delta, | |||
380 | struct clock_event_device *evt, int timer) | 367 | struct clock_event_device *evt, int timer) |
381 | { | 368 | { |
382 | u32 cnt; | 369 | u32 cnt; |
370 | s32 res; | ||
383 | 371 | ||
384 | cnt = hpet_readl(HPET_COUNTER); | 372 | cnt = hpet_readl(HPET_COUNTER); |
385 | cnt += (u32) delta; | 373 | cnt += (u32) delta; |
386 | hpet_writel(cnt, HPET_Tn_CMP(timer)); | 374 | hpet_writel(cnt, HPET_Tn_CMP(timer)); |
387 | 375 | ||
388 | /* | 376 | /* |
389 | * We need to read back the CMP register on certain HPET | 377 | * HPETs are a complete disaster. The compare register is |
390 | * implementations (ATI chipsets) which seem to delay the | 378 | * based on a equal comparison and neither provides a less |
391 | * transfer of the compare register into the internal compare | 379 | * than or equal functionality (which would require to take |
392 | * logic. With small deltas this might actually be too late as | 380 | * the wraparound into account) nor a simple count down event |
393 | * the counter could already be higher than the compare value | 381 | * mode. Further the write to the comparator register is |
394 | * at that point and we would wait for the next hpet interrupt | 382 | * delayed internally up to two HPET clock cycles in certain |
395 | * forever. We found out that reading the CMP register back | 383 | * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even |
396 | * forces the transfer so we can rely on the comparison with | 384 | * longer delays. We worked around that by reading back the |
397 | * the counter register below. If the read back from the | 385 | * compare register, but that required another workaround for |
398 | * compare register does not match the value we programmed | 386 | * ICH9,10 chips where the first readout after write can |
399 | * then we might have a real hardware problem. We can not do | 387 | * return the old stale value. We already had a minimum |
400 | * much about it here, but at least alert the user/admin with | 388 | * programming delta of 5us enforced, but a NMI or SMI hitting |
401 | * a prominent warning. | 389 | * between the counter readout and the comparator write can |
402 | * | 390 | * move us behind that point easily. Now instead of reading |
403 | * An erratum on some chipsets (ICH9,..), results in | 391 | * the compare register back several times, we make the ETIME |
404 | * comparator read immediately following a write returning old | 392 | * decision based on the following: Return ETIME if the |
405 | * value. Workaround for this is to read this value second | 393 | * counter value after the write is less than HPET_MIN_CYCLES |
406 | * time, when first read returns old value. | 394 | * away from the event or if the counter is already ahead of |
407 | * | 395 | * the event. The minimum programming delta for the generic |
408 | * In fact the write to the comparator register is delayed up | 396 | * clockevents code is set to 1.5 * HPET_MIN_CYCLES. |
409 | * to two HPET cycles so the workaround we tried to restrict | ||
410 | * the readback to those known to be borked ATI chipsets | ||
411 | * failed miserably. So we give up on optimizations forever | ||
412 | * and penalize all HPET incarnations unconditionally. | ||
413 | */ | 397 | */ |
414 | if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { | 398 | res = (s32)(cnt - hpet_readl(HPET_COUNTER)); |
415 | if (hpet_readl(HPET_Tn_CMP(timer)) != cnt) | ||
416 | printk_once(KERN_WARNING | ||
417 | "hpet: compare register read back failed.\n"); | ||
418 | } | ||
419 | 399 | ||
420 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 400 | return res < HPET_MIN_CYCLES ? -ETIME : 0; |
421 | } | 401 | } |
422 | 402 | ||
423 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | 403 | static void hpet_legacy_set_mode(enum clock_event_mode mode, |
@@ -440,9 +420,9 @@ static int hpet_legacy_next_event(unsigned long delta, | |||
440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | 420 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); |
441 | static struct hpet_dev *hpet_devs; | 421 | static struct hpet_dev *hpet_devs; |
442 | 422 | ||
443 | void hpet_msi_unmask(unsigned int irq) | 423 | void hpet_msi_unmask(struct irq_data *data) |
444 | { | 424 | { |
445 | struct hpet_dev *hdev = get_irq_data(irq); | 425 | struct hpet_dev *hdev = data->handler_data; |
446 | unsigned int cfg; | 426 | unsigned int cfg; |
447 | 427 | ||
448 | /* unmask it */ | 428 | /* unmask it */ |
@@ -451,10 +431,10 @@ void hpet_msi_unmask(unsigned int irq) | |||
451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 431 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
452 | } | 432 | } |
453 | 433 | ||
454 | void hpet_msi_mask(unsigned int irq) | 434 | void hpet_msi_mask(struct irq_data *data) |
455 | { | 435 | { |
436 | struct hpet_dev *hdev = data->handler_data; | ||
456 | unsigned int cfg; | 437 | unsigned int cfg; |
457 | struct hpet_dev *hdev = get_irq_data(irq); | ||
458 | 438 | ||
459 | /* mask it */ | 439 | /* mask it */ |
460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | 440 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); |
@@ -462,18 +442,14 @@ void hpet_msi_mask(unsigned int irq) | |||
462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 442 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
463 | } | 443 | } |
464 | 444 | ||
465 | void hpet_msi_write(unsigned int irq, struct msi_msg *msg) | 445 | void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) |
466 | { | 446 | { |
467 | struct hpet_dev *hdev = get_irq_data(irq); | ||
468 | |||
469 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | 447 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); |
470 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | 448 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); |
471 | } | 449 | } |
472 | 450 | ||
473 | void hpet_msi_read(unsigned int irq, struct msi_msg *msg) | 451 | void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) |
474 | { | 452 | { |
475 | struct hpet_dev *hdev = get_irq_data(irq); | ||
476 | |||
477 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | 453 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); |
478 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | 454 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); |
479 | msg->address_hi = 0; | 455 | msg->address_hi = 0; |
@@ -510,7 +486,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
510 | if (!irq) | 486 | if (!irq) |
511 | return -EINVAL; | 487 | return -EINVAL; |
512 | 488 | ||
513 | set_irq_data(irq, dev); | 489 | irq_set_handler_data(irq, dev); |
514 | 490 | ||
515 | if (hpet_setup_msi_irq(irq)) | 491 | if (hpet_setup_msi_irq(irq)) |
516 | return -EINVAL; | 492 | return -EINVAL; |
@@ -556,7 +532,6 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
556 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | 532 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) |
557 | { | 533 | { |
558 | struct clock_event_device *evt = &hdev->evt; | 534 | struct clock_event_device *evt = &hdev->evt; |
559 | uint64_t hpet_freq; | ||
560 | 535 | ||
561 | WARN_ON(cpu != smp_processor_id()); | 536 | WARN_ON(cpu != smp_processor_id()); |
562 | if (!(hdev->flags & HPET_DEV_VALID)) | 537 | if (!(hdev->flags & HPET_DEV_VALID)) |
@@ -578,24 +553,10 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |||
578 | 553 | ||
579 | evt->set_mode = hpet_msi_set_mode; | 554 | evt->set_mode = hpet_msi_set_mode; |
580 | evt->set_next_event = hpet_msi_next_event; | 555 | evt->set_next_event = hpet_msi_next_event; |
581 | evt->shift = 32; | ||
582 | |||
583 | /* | ||
584 | * The period is a femto seconds value. We need to calculate the | ||
585 | * scaled math multiplication factor for nanosecond to hpet tick | ||
586 | * conversion. | ||
587 | */ | ||
588 | hpet_freq = FSEC_PER_SEC; | ||
589 | do_div(hpet_freq, hpet_period); | ||
590 | evt->mult = div_sc((unsigned long) hpet_freq, | ||
591 | NSEC_PER_SEC, evt->shift); | ||
592 | /* Calculate the max delta */ | ||
593 | evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); | ||
594 | /* 5 usec minimum reprogramming delta. */ | ||
595 | evt->min_delta_ns = 5000; | ||
596 | |||
597 | evt->cpumask = cpumask_of(hdev->cpu); | 556 | evt->cpumask = cpumask_of(hdev->cpu); |
598 | clockevents_register_device(evt); | 557 | |
558 | clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, | ||
559 | 0x7FFFFFFF); | ||
599 | } | 560 | } |
600 | 561 | ||
601 | #ifdef CONFIG_HPET | 562 | #ifdef CONFIG_HPET |
@@ -726,7 +687,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, | |||
726 | 687 | ||
727 | switch (action & 0xf) { | 688 | switch (action & 0xf) { |
728 | case CPU_ONLINE: | 689 | case CPU_ONLINE: |
729 | INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); | 690 | INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); |
730 | init_completion(&work.complete); | 691 | init_completion(&work.complete); |
731 | /* FIXME: add schedule_work_on() */ | 692 | /* FIXME: add schedule_work_on() */ |
732 | schedule_delayed_work_on(cpu, &work.work, 0); | 693 | schedule_delayed_work_on(cpu, &work.work, 0); |
@@ -799,7 +760,6 @@ static struct clocksource clocksource_hpet = { | |||
799 | static int hpet_clocksource_register(void) | 760 | static int hpet_clocksource_register(void) |
800 | { | 761 | { |
801 | u64 start, now; | 762 | u64 start, now; |
802 | u64 hpet_freq; | ||
803 | cycle_t t1; | 763 | cycle_t t1; |
804 | 764 | ||
805 | /* Start the counter */ | 765 | /* Start the counter */ |
@@ -826,24 +786,7 @@ static int hpet_clocksource_register(void) | |||
826 | return -ENODEV; | 786 | return -ENODEV; |
827 | } | 787 | } |
828 | 788 | ||
829 | /* | ||
830 | * The definition of mult is (include/linux/clocksource.h) | ||
831 | * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc | ||
832 | * so we first need to convert hpet_period to ns/cyc units: | ||
833 | * mult/2^shift = ns/cyc = hpet_period/10^6 | ||
834 | * mult = (hpet_period * 2^shift)/10^6 | ||
835 | * mult = (hpet_period << shift)/FSEC_PER_NSEC | ||
836 | */ | ||
837 | |||
838 | /* Need to convert hpet_period (fsec/cyc) to cyc/sec: | ||
839 | * | ||
840 | * cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc) | ||
841 | * cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period | ||
842 | */ | ||
843 | hpet_freq = FSEC_PER_SEC; | ||
844 | do_div(hpet_freq, hpet_period); | ||
845 | clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); | 789 | clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); |
846 | |||
847 | return 0; | 790 | return 0; |
848 | } | 791 | } |
849 | 792 | ||
@@ -852,7 +795,9 @@ static int hpet_clocksource_register(void) | |||
852 | */ | 795 | */ |
853 | int __init hpet_enable(void) | 796 | int __init hpet_enable(void) |
854 | { | 797 | { |
798 | unsigned long hpet_period; | ||
855 | unsigned int id; | 799 | unsigned int id; |
800 | u64 freq; | ||
856 | int i; | 801 | int i; |
857 | 802 | ||
858 | if (!is_hpet_capable()) | 803 | if (!is_hpet_capable()) |
@@ -891,6 +836,14 @@ int __init hpet_enable(void) | |||
891 | goto out_nohpet; | 836 | goto out_nohpet; |
892 | 837 | ||
893 | /* | 838 | /* |
839 | * The period is a femto seconds value. Convert it to a | ||
840 | * frequency. | ||
841 | */ | ||
842 | freq = FSEC_PER_SEC; | ||
843 | do_div(freq, hpet_period); | ||
844 | hpet_freq = freq; | ||
845 | |||
846 | /* | ||
894 | * Read the HPET ID register to retrieve the IRQ routing | 847 | * Read the HPET ID register to retrieve the IRQ routing |
895 | * information and the number of channels | 848 | * information and the number of channels |
896 | */ | 849 | */ |