aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-10-01 12:20:09 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-10-18 10:40:29 -0400
commitd46a78b05c0e37f76ddf4a7a67bf0b6c68bada55 (patch)
tree0cdea39b1ede2fa88c58be01457a83c39a01e6a8
parentd8e0420603cf1ce9cb459c00ea0b7337de41b968 (diff)
xen: implement pirq type event channels
A privileged PV Xen domain can get direct access to hardware. In order for this to be useful, it must be able to get hardware interrupts. Being a PV Xen domain, all interrupts are delivered as event channels. PIRQ event channels are bound to a pirq number and an interrupt vector. When a IO APIC raises a hardware interrupt on that vector, it is delivered as an event channel, which we can deliver to the appropriate device driver(s). This patch simply implements the infrastructure for dealing with pirq event channels. [ Impact: integrate hardware interrupts into Xen's event scheme ] Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--drivers/xen/events.c243
-rw-r--r--include/xen/events.h11
2 files changed, 252 insertions, 2 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7d24b0d94ed4..bc69a9d92abc 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -16,7 +16,7 @@
16 * (typically dom0). 16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events. 17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs. 18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present. 19 * 4. PIRQs - Hardware interrupts.
20 * 20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */ 22 */
@@ -46,6 +46,9 @@
46#include <xen/interface/hvm/hvm_op.h> 46#include <xen/interface/hvm/hvm_op.h>
47#include <xen/interface/hvm/params.h> 47#include <xen/interface/hvm/params.h>
48 48
49/* Leave low irqs free for identity mapping */
50#define LEGACY_IRQS 16
51
49/* 52/*
50 * This lock protects updates to the following mapping and reference-count 53 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables. 54 * arrays. The lock does not need to be acquired to read the mapping tables.
@@ -89,10 +92,12 @@ struct irq_info
89 enum ipi_vector ipi; 92 enum ipi_vector ipi;
90 struct { 93 struct {
91 unsigned short gsi; 94 unsigned short gsi;
92 unsigned short vector; 95 unsigned char vector;
96 unsigned char flags;
93 } pirq; 97 } pirq;
94 } u; 98 } u;
95}; 99};
100#define PIRQ_NEEDS_EOI (1 << 0)
96 101
97static struct irq_info irq_info[NR_IRQS]; 102static struct irq_info irq_info[NR_IRQS];
98 103
@@ -113,6 +118,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
113 118
114static struct irq_chip xen_dynamic_chip; 119static struct irq_chip xen_dynamic_chip;
115static struct irq_chip xen_percpu_chip; 120static struct irq_chip xen_percpu_chip;
121static struct irq_chip xen_pirq_chip;
116 122
117/* Constructor for packed IRQ information. */ 123/* Constructor for packed IRQ information. */
118static struct irq_info mk_unbound_info(void) 124static struct irq_info mk_unbound_info(void)
@@ -225,6 +231,15 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
225 return ret; 231 return ret;
226} 232}
227 233
234static bool pirq_needs_eoi(unsigned irq)
235{
236 struct irq_info *info = info_for_irq(irq);
237
238 BUG_ON(info->type != IRQT_PIRQ);
239
240 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
241}
242
228static inline unsigned long active_evtchns(unsigned int cpu, 243static inline unsigned long active_evtchns(unsigned int cpu,
229 struct shared_info *sh, 244 struct shared_info *sh,
230 unsigned int idx) 245 unsigned int idx)
@@ -365,6 +380,210 @@ static int find_unbound_irq(void)
365 return irq; 380 return irq;
366} 381}
367 382
383static bool identity_mapped_irq(unsigned irq)
384{
385 /* only identity map legacy irqs */
386 return irq < LEGACY_IRQS;
387}
388
389static void pirq_unmask_notify(int irq)
390{
391 struct physdev_eoi eoi = { .irq = irq };
392
393 if (unlikely(pirq_needs_eoi(irq))) {
394 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
395 WARN_ON(rc);
396 }
397}
398
399static void pirq_query_unmask(int irq)
400{
401 struct physdev_irq_status_query irq_status;
402 struct irq_info *info = info_for_irq(irq);
403
404 BUG_ON(info->type != IRQT_PIRQ);
405
406 irq_status.irq = irq;
407 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
408 irq_status.flags = 0;
409
410 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
411 if (irq_status.flags & XENIRQSTAT_needs_eoi)
412 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
413}
414
415static bool probing_irq(int irq)
416{
417 struct irq_desc *desc = irq_to_desc(irq);
418
419 return desc && desc->action == NULL;
420}
421
422static unsigned int startup_pirq(unsigned int irq)
423{
424 struct evtchn_bind_pirq bind_pirq;
425 struct irq_info *info = info_for_irq(irq);
426 int evtchn = evtchn_from_irq(irq);
427
428 BUG_ON(info->type != IRQT_PIRQ);
429
430 if (VALID_EVTCHN(evtchn))
431 goto out;
432
433 bind_pirq.pirq = irq;
434 /* NB. We are happy to share unless we are probing. */
435 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
436 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
437 if (!probing_irq(irq))
438 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
439 irq);
440 return 0;
441 }
442 evtchn = bind_pirq.port;
443
444 pirq_query_unmask(irq);
445
446 evtchn_to_irq[evtchn] = irq;
447 bind_evtchn_to_cpu(evtchn, 0);
448 info->evtchn = evtchn;
449
450out:
451 unmask_evtchn(evtchn);
452 pirq_unmask_notify(irq);
453
454 return 0;
455}
456
457static void shutdown_pirq(unsigned int irq)
458{
459 struct evtchn_close close;
460 struct irq_info *info = info_for_irq(irq);
461 int evtchn = evtchn_from_irq(irq);
462
463 BUG_ON(info->type != IRQT_PIRQ);
464
465 if (!VALID_EVTCHN(evtchn))
466 return;
467
468 mask_evtchn(evtchn);
469
470 close.port = evtchn;
471 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
472 BUG();
473
474 bind_evtchn_to_cpu(evtchn, 0);
475 evtchn_to_irq[evtchn] = -1;
476 info->evtchn = 0;
477}
478
479static void enable_pirq(unsigned int irq)
480{
481 startup_pirq(irq);
482}
483
484static void disable_pirq(unsigned int irq)
485{
486}
487
488static void ack_pirq(unsigned int irq)
489{
490 int evtchn = evtchn_from_irq(irq);
491
492 move_native_irq(irq);
493
494 if (VALID_EVTCHN(evtchn)) {
495 mask_evtchn(evtchn);
496 clear_evtchn(evtchn);
497 }
498}
499
500static void end_pirq(unsigned int irq)
501{
502 int evtchn = evtchn_from_irq(irq);
503 struct irq_desc *desc = irq_to_desc(irq);
504
505 if (WARN_ON(!desc))
506 return;
507
508 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
509 (IRQ_DISABLED|IRQ_PENDING)) {
510 shutdown_pirq(irq);
511 } else if (VALID_EVTCHN(evtchn)) {
512 unmask_evtchn(evtchn);
513 pirq_unmask_notify(irq);
514 }
515}
516
517static int find_irq_by_gsi(unsigned gsi)
518{
519 int irq;
520
521 for (irq = 0; irq < NR_IRQS; irq++) {
522 struct irq_info *info = info_for_irq(irq);
523
524 if (info == NULL || info->type != IRQT_PIRQ)
525 continue;
526
527 if (gsi_from_irq(irq) == gsi)
528 return irq;
529 }
530
531 return -1;
532}
533
534/*
535 * Allocate a physical irq, along with a vector. We don't assign an
536 * event channel until the irq actually started up. Return an
537 * existing irq if we've already got one for the gsi.
538 */
539int xen_allocate_pirq(unsigned gsi)
540{
541 int irq;
542 struct physdev_irq irq_op;
543
544 spin_lock(&irq_mapping_update_lock);
545
546 irq = find_irq_by_gsi(gsi);
547 if (irq != -1) {
548 printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n",
549 irq, gsi);
550 goto out; /* XXX need refcount? */
551 }
552
553 if (identity_mapped_irq(gsi)) {
554 irq = gsi;
555 dynamic_irq_init(irq);
556 } else
557 irq = find_unbound_irq();
558
559 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
560 handle_level_irq, "pirq");
561
562 irq_op.irq = irq;
563 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
564 dynamic_irq_cleanup(irq);
565 irq = -ENOSPC;
566 goto out;
567 }
568
569 irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector);
570
571out:
572 spin_unlock(&irq_mapping_update_lock);
573
574 return irq;
575}
576
577int xen_vector_from_irq(unsigned irq)
578{
579 return vector_from_irq(irq);
580}
581
582int xen_gsi_from_irq(unsigned irq)
583{
584 return gsi_from_irq(irq);
585}
586
368int bind_evtchn_to_irq(unsigned int evtchn) 587int bind_evtchn_to_irq(unsigned int evtchn)
369{ 588{
370 int irq; 589 int irq;
@@ -964,6 +1183,26 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
964 .retrigger = retrigger_dynirq, 1183 .retrigger = retrigger_dynirq,
965}; 1184};
966 1185
1186static struct irq_chip xen_pirq_chip __read_mostly = {
1187 .name = "xen-pirq",
1188
1189 .startup = startup_pirq,
1190 .shutdown = shutdown_pirq,
1191
1192 .enable = enable_pirq,
1193 .unmask = enable_pirq,
1194
1195 .disable = disable_pirq,
1196 .mask = disable_pirq,
1197
1198 .ack = ack_pirq,
1199 .end = end_pirq,
1200
1201 .set_affinity = set_affinity_irq,
1202
1203 .retrigger = retrigger_dynirq,
1204};
1205
967static struct irq_chip xen_percpu_chip __read_mostly = { 1206static struct irq_chip xen_percpu_chip __read_mostly = {
968 .name = "xen-percpu", 1207 .name = "xen-percpu",
969 1208
diff --git a/include/xen/events.h b/include/xen/events.h
index a15d93262e30..8f6232023b75 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -63,4 +63,15 @@ int xen_set_callback_via(uint64_t via);
63void xen_evtchn_do_upcall(struct pt_regs *regs); 63void xen_evtchn_do_upcall(struct pt_regs *regs);
64void xen_hvm_evtchn_do_upcall(void); 64void xen_hvm_evtchn_do_upcall(void);
65 65
66/* Allocate an irq for a physical interrupt, given a gsi. "Legacy"
67 * GSIs are identity mapped; others are dynamically allocated as
68 * usual. */
69int xen_allocate_pirq(unsigned gsi);
70
71/* Return vector allocated to pirq */
72int xen_vector_from_irq(unsigned pirq);
73
74/* Return gsi allocated to pirq */
75int xen_gsi_from_irq(unsigned pirq);
76
66#endif /* _XEN_EVENTS_H */ 77#endif /* _XEN_EVENTS_H */