aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c101
1 files changed, 69 insertions, 32 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7d24b0d94ed4..347f17edad77 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void)
261 } 261 }
262#endif 262#endif
263 263
264 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); 264 memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
265} 265}
266 266
267static inline void clear_evtchn(int port) 267static inline void clear_evtchn(int port)
@@ -377,7 +377,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
377 irq = find_unbound_irq(); 377 irq = find_unbound_irq();
378 378
379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
380 handle_edge_irq, "event"); 380 handle_fasteoi_irq, "event");
381 381
382 evtchn_to_irq[evtchn] = irq; 382 evtchn_to_irq[evtchn] = irq;
383 irq_info[irq] = mk_evtchn_info(evtchn); 383 irq_info[irq] = mk_evtchn_info(evtchn);
@@ -435,6 +435,11 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
435 irq = per_cpu(virq_to_irq, cpu)[virq]; 435 irq = per_cpu(virq_to_irq, cpu)[virq];
436 436
437 if (irq == -1) { 437 if (irq == -1) {
438 irq = find_unbound_irq();
439
440 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
441 handle_percpu_irq, "virq");
442
438 bind_virq.virq = virq; 443 bind_virq.virq = virq;
439 bind_virq.vcpu = cpu; 444 bind_virq.vcpu = cpu;
440 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 445 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
@@ -442,11 +447,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
442 BUG(); 447 BUG();
443 evtchn = bind_virq.port; 448 evtchn = bind_virq.port;
444 449
445 irq = find_unbound_irq();
446
447 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
448 handle_percpu_irq, "virq");
449
450 evtchn_to_irq[evtchn] = irq; 450 evtchn_to_irq[evtchn] = irq;
451 irq_info[irq] = mk_virq_info(evtchn, virq); 451 irq_info[irq] = mk_virq_info(evtchn, virq);
452 452
@@ -578,41 +578,75 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
578{ 578{
579 struct shared_info *sh = HYPERVISOR_shared_info; 579 struct shared_info *sh = HYPERVISOR_shared_info;
580 int cpu = smp_processor_id(); 580 int cpu = smp_processor_id();
581 unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
581 int i; 582 int i;
582 unsigned long flags; 583 unsigned long flags;
583 static DEFINE_SPINLOCK(debug_lock); 584 static DEFINE_SPINLOCK(debug_lock);
585 struct vcpu_info *v;
584 586
585 spin_lock_irqsave(&debug_lock, flags); 587 spin_lock_irqsave(&debug_lock, flags);
586 588
587 printk("vcpu %d\n ", cpu); 589 printk("\nvcpu %d\n ", cpu);
588 590
589 for_each_online_cpu(i) { 591 for_each_online_cpu(i) {
590 struct vcpu_info *v = per_cpu(xen_vcpu, i); 592 int pending;
591 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, 593 v = per_cpu(xen_vcpu, i);
592 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, 594 pending = (get_irq_regs() && i == cpu)
593 v->evtchn_upcall_pending, 595 ? xen_irqs_disabled(get_irq_regs())
594 v->evtchn_pending_sel); 596 : v->evtchn_upcall_mask;
597 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
598 pending, v->evtchn_upcall_pending,
599 (int)(sizeof(v->evtchn_pending_sel)*2),
600 v->evtchn_pending_sel);
601 }
602 v = per_cpu(xen_vcpu, cpu);
603
604 printk("\npending:\n ");
605 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
606 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
607 sh->evtchn_pending[i],
608 i % 8 == 0 ? "\n " : " ");
609 printk("\nglobal mask:\n ");
610 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
611 printk("%0*lx%s",
612 (int)(sizeof(sh->evtchn_mask[0])*2),
613 sh->evtchn_mask[i],
614 i % 8 == 0 ? "\n " : " ");
615
616 printk("\nglobally unmasked:\n ");
617 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
618 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
619 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
620 i % 8 == 0 ? "\n " : " ");
621
622 printk("\nlocal cpu%d mask:\n ", cpu);
623 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
624 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
625 cpu_evtchn[i],
626 i % 8 == 0 ? "\n " : " ");
627
628 printk("\nlocally unmasked:\n ");
629 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
630 unsigned long pending = sh->evtchn_pending[i]
631 & ~sh->evtchn_mask[i]
632 & cpu_evtchn[i];
633 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
634 pending, i % 8 == 0 ? "\n " : " ");
595 } 635 }
596 printk("pending:\n ");
597 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
598 printk("%08lx%s", sh->evtchn_pending[i],
599 i % 8 == 0 ? "\n " : " ");
600 printk("\nmasks:\n ");
601 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
602 printk("%08lx%s", sh->evtchn_mask[i],
603 i % 8 == 0 ? "\n " : " ");
604
605 printk("\nunmasked:\n ");
606 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
607 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
608 i % 8 == 0 ? "\n " : " ");
609 636
610 printk("\npending list:\n"); 637 printk("\npending list:\n");
611 for(i = 0; i < NR_EVENT_CHANNELS; i++) { 638 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
612 if (sync_test_bit(i, sh->evtchn_pending)) { 639 if (sync_test_bit(i, sh->evtchn_pending)) {
613 printk(" %d: event %d -> irq %d\n", 640 int word_idx = i / BITS_PER_LONG;
641 printk(" %d: event %d -> irq %d%s%s%s\n",
614 cpu_from_evtchn(i), i, 642 cpu_from_evtchn(i), i,
615 evtchn_to_irq[i]); 643 evtchn_to_irq[i],
644 sync_test_bit(word_idx, &v->evtchn_pending_sel)
645 ? "" : " l2-clear",
646 !sync_test_bit(i, sh->evtchn_mask)
647 ? "" : " globally-masked",
648 sync_test_bit(i, cpu_evtchn)
649 ? "" : " locally-masked");
616 } 650 }
617 } 651 }
618 652
@@ -663,6 +697,9 @@ static void __xen_evtchn_do_upcall(void)
663 int irq = evtchn_to_irq[port]; 697 int irq = evtchn_to_irq[port];
664 struct irq_desc *desc; 698 struct irq_desc *desc;
665 699
700 mask_evtchn(port);
701 clear_evtchn(port);
702
666 if (irq != -1) { 703 if (irq != -1) {
667 desc = irq_to_desc(irq); 704 desc = irq_to_desc(irq);
668 if (desc) 705 if (desc)
@@ -800,10 +837,10 @@ static void ack_dynirq(unsigned int irq)
800{ 837{
801 int evtchn = evtchn_from_irq(irq); 838 int evtchn = evtchn_from_irq(irq);
802 839
803 move_native_irq(irq); 840 move_masked_irq(irq);
804 841
805 if (VALID_EVTCHN(evtchn)) 842 if (VALID_EVTCHN(evtchn))
806 clear_evtchn(evtchn); 843 unmask_evtchn(evtchn);
807} 844}
808 845
809static int retrigger_dynirq(unsigned int irq) 846static int retrigger_dynirq(unsigned int irq)
@@ -959,7 +996,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
959 .mask = disable_dynirq, 996 .mask = disable_dynirq,
960 .unmask = enable_dynirq, 997 .unmask = enable_dynirq,
961 998
962 .ack = ack_dynirq, 999 .eoi = ack_dynirq,
963 .set_affinity = set_affinity_irq, 1000 .set_affinity = set_affinity_irq,
964 .retrigger = retrigger_dynirq, 1001 .retrigger = retrigger_dynirq,
965}; 1002};