aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2011-03-10 11:08:11 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-03-10 14:48:41 -0500
commit6cb6537d34a146c8f3e232da9ffd79ae1265fed3 (patch)
tree1b050ae966f66efe5abcbd3b12029b4420499e5b /drivers/xen/events.c
parent3d4cfa3736f2f080d3bde4f4329b7b6a7544e093 (diff)
xen: events: maintain a list of Xen interrupts
In a PVHVM kernel not all interrupts are Xen interrupts (APIC interrupts can also be present). Currently we get away with walking over all interrupts because the lookup in the irq_info array simply returns IRQT_UNBOUND and we ignore it. However this array will be going away in a future patch so we need to manually track which interrupts have been allocated by the Xen events infrastructure. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c59
1 files changed, 44 insertions, 15 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index cf372d41077d..e119989ec15b 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -56,6 +56,8 @@
56 */ 56 */
57static DEFINE_SPINLOCK(irq_mapping_update_lock); 57static DEFINE_SPINLOCK(irq_mapping_update_lock);
58 58
59static LIST_HEAD(xen_irq_list_head);
60
59/* IRQ <-> VIRQ mapping. */ 61/* IRQ <-> VIRQ mapping. */
60static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 62static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
61 63
@@ -85,7 +87,9 @@ enum xen_irq_type {
85 */ 87 */
86struct irq_info 88struct irq_info
87{ 89{
90 struct list_head list;
88 enum xen_irq_type type; /* type */ 91 enum xen_irq_type type; /* type */
92 unsigned irq;
89 unsigned short evtchn; /* event channel */ 93 unsigned short evtchn; /* event channel */
90 unsigned short cpu; /* cpu bound */ 94 unsigned short cpu; /* cpu bound */
91 95
@@ -135,6 +139,7 @@ static void xen_irq_info_common_init(struct irq_info *info,
135 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 139 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
136 140
137 info->type = type; 141 info->type = type;
142 info->irq = irq;
138 info->evtchn = evtchn; 143 info->evtchn = evtchn;
139 info->cpu = cpu; 144 info->cpu = cpu;
140 145
@@ -311,10 +316,11 @@ static void init_evtchn_cpu_bindings(void)
311{ 316{
312 int i; 317 int i;
313#ifdef CONFIG_SMP 318#ifdef CONFIG_SMP
314 struct irq_desc *desc; 319 struct irq_info *info;
315 320
316 /* By default all event channels notify CPU#0. */ 321 /* By default all event channels notify CPU#0. */
317 for_each_irq_desc(i, desc) { 322 list_for_each_entry(info, &xen_irq_list_head, list) {
323 struct irq_desc *desc = irq_to_desc(info->irq);
318 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 324 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
319 } 325 }
320#endif 326#endif
@@ -397,6 +403,21 @@ static void unmask_evtchn(int port)
397 put_cpu(); 403 put_cpu();
398} 404}
399 405
406static void xen_irq_init(unsigned irq)
407{
408 struct irq_info *info;
409 struct irq_desc *desc = irq_to_desc(irq);
410
411 /* By default all event channels notify CPU#0. */
412 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
413
414 info = &irq_info[irq];
415
416 info->type = IRQT_UNBOUND;
417
418 list_add_tail(&info->list, &xen_irq_list_head);
419}
420
400static int xen_allocate_irq_dynamic(void) 421static int xen_allocate_irq_dynamic(void)
401{ 422{
402 int first = 0; 423 int first = 0;
@@ -426,6 +447,8 @@ retry:
426 if (irq < 0) 447 if (irq < 0)
427 panic("No available IRQ to bind to: increase nr_irqs!\n"); 448 panic("No available IRQ to bind to: increase nr_irqs!\n");
428 449
450 xen_irq_init(irq);
451
429 return irq; 452 return irq;
430} 453}
431 454
@@ -444,18 +467,25 @@ static int xen_allocate_irq_gsi(unsigned gsi)
444 467
445 /* Legacy IRQ descriptors are already allocated by the arch. */ 468 /* Legacy IRQ descriptors are already allocated by the arch. */
446 if (gsi < NR_IRQS_LEGACY) 469 if (gsi < NR_IRQS_LEGACY)
447 return gsi; 470 irq = gsi;
471 else
472 irq = irq_alloc_desc_at(gsi, -1);
448 473
449 irq = irq_alloc_desc_at(gsi, -1);
450 if (irq < 0) 474 if (irq < 0)
451 panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); 475 panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
452 476
477 xen_irq_init(irq);
478
453 return irq; 479 return irq;
454} 480}
455 481
456static void xen_free_irq(unsigned irq) 482static void xen_free_irq(unsigned irq)
457{ 483{
458 irq_info[irq].type = IRQT_UNBOUND; 484 struct irq_info *info = &irq_info[irq];
485
486 info->type = IRQT_UNBOUND;
487
488 list_del(&info->list);
459 489
460 /* Legacy IRQ descriptors are managed by the arch. */ 490 /* Legacy IRQ descriptors are managed by the arch. */
461 if (irq < NR_IRQS_LEGACY) 491 if (irq < NR_IRQS_LEGACY)
@@ -586,16 +616,14 @@ static void ack_pirq(struct irq_data *data)
586 616
587static int find_irq_by_gsi(unsigned gsi) 617static int find_irq_by_gsi(unsigned gsi)
588{ 618{
589 int irq; 619 struct irq_info *info;
590
591 for (irq = 0; irq < nr_irqs; irq++) {
592 struct irq_info *info = info_for_irq(irq);
593 620
594 if (info == NULL || info->type != IRQT_PIRQ) 621 list_for_each_entry(info, &xen_irq_list_head, list) {
622 if (info->type != IRQT_PIRQ)
595 continue; 623 continue;
596 624
597 if (gsi_from_irq(irq) == gsi) 625 if (info->u.pirq.gsi == gsi)
598 return irq; 626 return info->irq;
599 } 627 }
600 628
601 return -1; 629 return -1;
@@ -1374,7 +1402,8 @@ void xen_poll_irq(int irq)
1374 1402
1375void xen_irq_resume(void) 1403void xen_irq_resume(void)
1376{ 1404{
1377 unsigned int cpu, irq, evtchn; 1405 unsigned int cpu, evtchn;
1406 struct irq_info *info;
1378 1407
1379 init_evtchn_cpu_bindings(); 1408 init_evtchn_cpu_bindings();
1380 1409
@@ -1383,8 +1412,8 @@ void xen_irq_resume(void)
1383 mask_evtchn(evtchn); 1412 mask_evtchn(evtchn);
1384 1413
1385 /* No IRQ <-> event-channel mappings. */ 1414 /* No IRQ <-> event-channel mappings. */
1386 for (irq = 0; irq < nr_irqs; irq++) 1415 list_for_each_entry(info, &xen_irq_list_head, list)
1387 irq_info[irq].evtchn = 0; /* zap event-channel binding */ 1416 info->evtchn = 0; /* zap event-channel binding */
1388 1417
1389 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1418 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1390 evtchn_to_irq[evtchn] = -1; 1419 evtchn_to_irq[evtchn] = -1;