aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-09-05 21:02:18 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-16 10:53:08 -0400
commit26afe5f2fbf06ea0765aaa316640c4dd472310c0 (patch)
treeaa9592aa1df9c30058904f3964af72be592c0f63 /arch
parent4588c1f0354ac96a358b3f9e8e4331c51cf3336f (diff)
x86: HPET_MSI Initialise per-cpu HPET timers
Initialize a per CPU HPET MSI timer when possible. We retain the HPET timer 0 (IRQ 0) and timer 1 (IRQ 8) as is when legacy mode is being used. We setup the remaining HPET timers as per CPU MSI based timers. This per CPU timer will eliminate the need for timer broadcasting with IRQ 0 when there is non-functional LAPIC timer across CPU deep C-states. If there are more CPUs than number of available timers, CPUs that do not find any timer to use will continue using LAPIC and IRQ 0 broadcast. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/hpet.c295
1 files changed, 293 insertions, 2 deletions
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 03d3655734b4..31e9191b7e19 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -21,10 +21,19 @@
21 NSEC = 10^-9 */ 21 NSEC = 10^-9 */
22#define FSEC_PER_NSEC 1000000L 22#define FSEC_PER_NSEC 1000000L
23 23
24#define HPET_DEV_USED_BIT 2
25#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
26#define HPET_DEV_VALID 0x8
27#define HPET_DEV_FSB_CAP 0x1000
28#define HPET_DEV_PERI_CAP 0x2000
29
30#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
31
24/* 32/*
25 * HPET address is set in acpi/boot.c, when an ACPI entry exists 33 * HPET address is set in acpi/boot.c, when an ACPI entry exists
26 */ 34 */
27unsigned long hpet_address; 35unsigned long hpet_address;
36unsigned long hpet_num_timers;
28static void __iomem *hpet_virt_address; 37static void __iomem *hpet_virt_address;
29 38
30struct hpet_dev { 39struct hpet_dev {
@@ -36,6 +45,10 @@ struct hpet_dev {
36 char name[10]; 45 char name[10];
37}; 46};
38 47
48static struct hpet_dev *hpet_devs;
49
50static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
51
39unsigned long hpet_readl(unsigned long a) 52unsigned long hpet_readl(unsigned long a)
40{ 53{
41 return readl(hpet_virt_address + a); 54 return readl(hpet_virt_address + a);
@@ -145,6 +158,16 @@ static void hpet_reserve_platform_timers(unsigned long id)
145 Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; 158 Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
146 } 159 }
147 160
161 for (i = 0; i < nrtimers; i++) {
162 struct hpet_dev *hdev = &hpet_devs[i];
163
164 if (!(hdev->flags & HPET_DEV_VALID))
165 continue;
166
167 hd.hd_irq[hdev->num] = hdev->irq;
168 hpet_reserve_timer(&hd, hdev->num);
169 }
170
148 hpet_alloc(&hd); 171 hpet_alloc(&hd);
149 172
150} 173}
@@ -238,6 +261,8 @@ static void hpet_legacy_clockevent_register(void)
238 printk(KERN_DEBUG "hpet clockevent registered\n"); 261 printk(KERN_DEBUG "hpet clockevent registered\n");
239} 262}
240 263
264static int hpet_setup_msi_irq(unsigned int irq);
265
241static void hpet_set_mode(enum clock_event_mode mode, 266static void hpet_set_mode(enum clock_event_mode mode,
242 struct clock_event_device *evt, int timer) 267 struct clock_event_device *evt, int timer)
243{ 268{
@@ -279,7 +304,15 @@ static void hpet_set_mode(enum clock_event_mode mode,
279 break; 304 break;
280 305
281 case CLOCK_EVT_MODE_RESUME: 306 case CLOCK_EVT_MODE_RESUME:
282 hpet_enable_legacy_int(); 307 if (timer == 0) {
308 hpet_enable_legacy_int();
309 } else {
310 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
311 hpet_setup_msi_irq(hdev->irq);
312 disable_irq(hdev->irq);
313 irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu));
314 enable_irq(hdev->irq);
315 }
283 break; 316 break;
284 } 317 }
285} 318}
@@ -318,7 +351,7 @@ static int hpet_legacy_next_event(unsigned long delta,
318/* 351/*
319 * HPET MSI Support 352 * HPET MSI Support
320 */ 353 */
321 354#ifdef CONFIG_PCI_MSI
322void hpet_msi_unmask(unsigned int irq) 355void hpet_msi_unmask(unsigned int irq)
323{ 356{
324 struct hpet_dev *hdev = get_irq_data(irq); 357 struct hpet_dev *hdev = get_irq_data(irq);
@@ -358,6 +391,253 @@ void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
358 msg->address_hi = 0; 391 msg->address_hi = 0;
359} 392}
360 393
394static void hpet_msi_set_mode(enum clock_event_mode mode,
395 struct clock_event_device *evt)
396{
397 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
398 hpet_set_mode(mode, evt, hdev->num);
399}
400
401static int hpet_msi_next_event(unsigned long delta,
402 struct clock_event_device *evt)
403{
404 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
405 return hpet_next_event(delta, evt, hdev->num);
406}
407
408static int hpet_setup_msi_irq(unsigned int irq)
409{
410 if (arch_setup_hpet_msi(irq)) {
411 destroy_irq(irq);
412 return -EINVAL;
413 }
414 return 0;
415}
416
417static int hpet_assign_irq(struct hpet_dev *dev)
418{
419 unsigned int irq;
420
421 irq = create_irq();
422 if (!irq)
423 return -EINVAL;
424
425 set_irq_data(irq, dev);
426
427 if (hpet_setup_msi_irq(irq))
428 return -EINVAL;
429
430 dev->irq = irq;
431 return 0;
432}
433
434static irqreturn_t hpet_interrupt_handler(int irq, void *data)
435{
436 struct hpet_dev *dev = (struct hpet_dev *)data;
437 struct clock_event_device *hevt = &dev->evt;
438
439 if (!hevt->event_handler) {
440 printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
441 dev->num);
442 return IRQ_HANDLED;
443 }
444
445 hevt->event_handler(hevt);
446 return IRQ_HANDLED;
447}
448
449static int hpet_setup_irq(struct hpet_dev *dev)
450{
451
452 if (request_irq(dev->irq, hpet_interrupt_handler,
453 IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev))
454 return -1;
455
456 disable_irq(dev->irq);
457 irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu));
458 enable_irq(dev->irq);
459
460 return 0;
461}
462
463/* This should be called in specific @cpu */
464static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
465{
466 struct clock_event_device *evt = &hdev->evt;
467 uint64_t hpet_freq;
468
469 WARN_ON(cpu != smp_processor_id());
470 if (!(hdev->flags & HPET_DEV_VALID))
471 return;
472
473 if (hpet_setup_msi_irq(hdev->irq))
474 return;
475
476 hdev->cpu = cpu;
477 per_cpu(cpu_hpet_dev, cpu) = hdev;
478 evt->name = hdev->name;
479 hpet_setup_irq(hdev);
480 evt->irq = hdev->irq;
481
482 evt->rating = 110;
483 evt->features = CLOCK_EVT_FEAT_ONESHOT;
484 if (hdev->flags & HPET_DEV_PERI_CAP)
485 evt->features |= CLOCK_EVT_FEAT_PERIODIC;
486
487 evt->set_mode = hpet_msi_set_mode;
488 evt->set_next_event = hpet_msi_next_event;
489 evt->shift = 32;
490
491 /*
492 * The period is a femto seconds value. We need to calculate the
493 * scaled math multiplication factor for nanosecond to hpet tick
494 * conversion.
495 */
496 hpet_freq = 1000000000000000ULL;
497 do_div(hpet_freq, hpet_period);
498 evt->mult = div_sc((unsigned long) hpet_freq,
499 NSEC_PER_SEC, evt->shift);
500 /* Calculate the max delta */
501 evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
502 /* 5 usec minimum reprogramming delta. */
503 evt->min_delta_ns = 5000;
504
505 evt->cpumask = cpumask_of_cpu(hdev->cpu);
506 clockevents_register_device(evt);
507}
508
509#ifdef CONFIG_HPET
510/* Reserve at least one timer for userspace (/dev/hpet) */
511#define RESERVE_TIMERS 1
512#else
513#define RESERVE_TIMERS 0
514#endif
515void hpet_msi_capability_lookup(unsigned int start_timer)
516{
517 unsigned int id;
518 unsigned int num_timers;
519 unsigned int num_timers_used = 0;
520 int i;
521
522 id = hpet_readl(HPET_ID);
523
524 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
525 num_timers++; /* Value read out starts from 0 */
526
527 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
528 if (!hpet_devs)
529 return;
530
531 hpet_num_timers = num_timers;
532
533 for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
534 struct hpet_dev *hdev = &hpet_devs[num_timers_used];
535 unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
536
537 /* Only consider HPET timer with MSI support */
538 if (!(cfg & HPET_TN_FSB_CAP))
539 continue;
540
541 hdev->flags = 0;
542 if (cfg & HPET_TN_PERIODIC_CAP)
543 hdev->flags |= HPET_DEV_PERI_CAP;
544 hdev->num = i;
545
546 sprintf(hdev->name, "hpet%d", i);
547 if (hpet_assign_irq(hdev))
548 continue;
549
550 hdev->flags |= HPET_DEV_FSB_CAP;
551 hdev->flags |= HPET_DEV_VALID;
552 num_timers_used++;
553 if (num_timers_used == num_possible_cpus())
554 break;
555 }
556
557 printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
558 num_timers, num_timers_used);
559}
560
561static struct hpet_dev *hpet_get_unused_timer(void)
562{
563 int i;
564
565 if (!hpet_devs)
566 return NULL;
567
568 for (i = 0; i < hpet_num_timers; i++) {
569 struct hpet_dev *hdev = &hpet_devs[i];
570
571 if (!(hdev->flags & HPET_DEV_VALID))
572 continue;
573 if (test_and_set_bit(HPET_DEV_USED_BIT,
574 (unsigned long *)&hdev->flags))
575 continue;
576 return hdev;
577 }
578 return NULL;
579}
580
581struct hpet_work_struct {
582 struct delayed_work work;
583 struct completion complete;
584};
585
586static void hpet_work(struct work_struct *w)
587{
588 struct hpet_dev *hdev;
589 int cpu = smp_processor_id();
590 struct hpet_work_struct *hpet_work;
591
592 hpet_work = container_of(w, struct hpet_work_struct, work.work);
593
594 hdev = hpet_get_unused_timer();
595 if (hdev)
596 init_one_hpet_msi_clockevent(hdev, cpu);
597
598 complete(&hpet_work->complete);
599}
600
601static int hpet_cpuhp_notify(struct notifier_block *n,
602 unsigned long action, void *hcpu)
603{
604 unsigned long cpu = (unsigned long)hcpu;
605 struct hpet_work_struct work;
606 struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
607
608 switch (action & 0xf) {
609 case CPU_ONLINE:
610 INIT_DELAYED_WORK(&work.work, hpet_work);
611 init_completion(&work.complete);
612 /* FIXME: add schedule_work_on() */
613 schedule_delayed_work_on(cpu, &work.work, 0);
614 wait_for_completion(&work.complete);
615 break;
616 case CPU_DEAD:
617 if (hdev) {
618 free_irq(hdev->irq, hdev);
619 hdev->flags &= ~HPET_DEV_USED;
620 per_cpu(cpu_hpet_dev, cpu) = NULL;
621 }
622 break;
623 }
624 return NOTIFY_OK;
625}
626#else
627
628void hpet_msi_capability_lookup(unsigned int start_timer)
629{
630 return;
631}
632
633static int hpet_cpuhp_notify(struct notifier_block *n,
634 unsigned long action, void *hcpu)
635{
636 return NOTIFY_OK;
637}
638
639#endif
640
361/* 641/*
362 * Clock source related code 642 * Clock source related code
363 */ 643 */
@@ -493,8 +773,10 @@ int __init hpet_enable(void)
493 773
494 if (id & HPET_ID_LEGSUP) { 774 if (id & HPET_ID_LEGSUP) {
495 hpet_legacy_clockevent_register(); 775 hpet_legacy_clockevent_register();
776 hpet_msi_capability_lookup(2);
496 return 1; 777 return 1;
497 } 778 }
779 hpet_msi_capability_lookup(0);
498 return 0; 780 return 0;
499 781
500out_nohpet: 782out_nohpet:
@@ -511,6 +793,8 @@ out_nohpet:
511 */ 793 */
512static __init int hpet_late_init(void) 794static __init int hpet_late_init(void)
513{ 795{
796 int cpu;
797
514 if (boot_hpet_disable) 798 if (boot_hpet_disable)
515 return -ENODEV; 799 return -ENODEV;
516 800
@@ -526,6 +810,13 @@ static __init int hpet_late_init(void)
526 810
527 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 811 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
528 812
813 for_each_online_cpu(cpu) {
814 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
815 }
816
817 /* This notifier should be called after workqueue is ready */
818 hotcpu_notifier(hpet_cpuhp_notify, -20);
819
529 return 0; 820 return 0;
530} 821}
531fs_initcall(hpet_late_init); 822fs_initcall(hpet_late_init);