aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/xen/pci.h8
-rw-r--r--arch/x86/pci/xen.c35
-rw-r--r--arch/x86/xen/time.c4
-rw-r--r--drivers/pci/xen-pcifront.c31
-rw-r--r--drivers/xen/events.c278
-rw-r--r--include/linux/interrupt.h3
-rw-r--r--kernel/irq/manage.c11
-rw-r--r--kernel/irq/pm.c3
8 files changed, 186 insertions, 187 deletions
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 2329b3eaf8d3..aa8620989162 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
27 * its own functions. 27 * its own functions.
28 */ 28 */
29struct xen_pci_frontend_ops { 29struct xen_pci_frontend_ops {
30 int (*enable_msi)(struct pci_dev *dev, int **vectors); 30 int (*enable_msi)(struct pci_dev *dev, int vectors[]);
31 void (*disable_msi)(struct pci_dev *dev); 31 void (*disable_msi)(struct pci_dev *dev);
32 int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec); 32 int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
33 void (*disable_msix)(struct pci_dev *dev); 33 void (*disable_msix)(struct pci_dev *dev);
34}; 34};
35 35
36extern struct xen_pci_frontend_ops *xen_pci_frontend; 36extern struct xen_pci_frontend_ops *xen_pci_frontend;
37 37
38static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, 38static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
39 int **vectors) 39 int vectors[])
40{ 40{
41 if (xen_pci_frontend && xen_pci_frontend->enable_msi) 41 if (xen_pci_frontend && xen_pci_frontend->enable_msi)
42 return xen_pci_frontend->enable_msi(dev, vectors); 42 return xen_pci_frontend->enable_msi(dev, vectors);
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
48 xen_pci_frontend->disable_msi(dev); 48 xen_pci_frontend->disable_msi(dev);
49} 49}
50static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, 50static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
51 int **vectors, int nvec) 51 int vectors[], int nvec)
52{ 52{
53 if (xen_pci_frontend && xen_pci_frontend->enable_msix) 53 if (xen_pci_frontend && xen_pci_frontend->enable_msix)
54 return xen_pci_frontend->enable_msix(dev, vectors, nvec); 54 return xen_pci_frontend->enable_msix(dev, vectors, nvec);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 25cd4a07d09f..8634e1b49c03 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -150,21 +150,21 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
150 return -ENOMEM; 150 return -ENOMEM;
151 151
152 if (type == PCI_CAP_ID_MSIX) 152 if (type == PCI_CAP_ID_MSIX)
153 ret = xen_pci_frontend_enable_msix(dev, &v, nvec); 153 ret = xen_pci_frontend_enable_msix(dev, v, nvec);
154 else 154 else
155 ret = xen_pci_frontend_enable_msi(dev, &v); 155 ret = xen_pci_frontend_enable_msi(dev, v);
156 if (ret) 156 if (ret)
157 goto error; 157 goto error;
158 i = 0; 158 i = 0;
159 list_for_each_entry(msidesc, &dev->msi_list, list) { 159 list_for_each_entry(msidesc, &dev->msi_list, list) {
160 irq = xen_allocate_pirq(v[i], 0, /* not sharable */ 160 xen_allocate_pirq_msi(
161 (type == PCI_CAP_ID_MSIX) ? 161 (type == PCI_CAP_ID_MSIX) ?
162 "pcifront-msi-x" : "pcifront-msi"); 162 "pcifront-msi-x" : "pcifront-msi",
163 &irq, &v[i], XEN_ALLOC_IRQ);
163 if (irq < 0) { 164 if (irq < 0) {
164 ret = -1; 165 ret = -1;
165 goto free; 166 goto free;
166 } 167 }
167
168 ret = set_irq_msi(irq, msidesc); 168 ret = set_irq_msi(irq, msidesc);
169 if (ret) 169 if (ret)
170 goto error_while; 170 goto error_while;
@@ -193,6 +193,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
193 xen_pci_frontend_disable_msix(dev); 193 xen_pci_frontend_disable_msix(dev);
194 else 194 else
195 xen_pci_frontend_disable_msi(dev); 195 xen_pci_frontend_disable_msi(dev);
196
197 /* Free the IRQ's and the msidesc using the generic code. */
198 default_teardown_msi_irqs(dev);
196} 199}
197 200
198static void xen_teardown_msi_irq(unsigned int irq) 201static void xen_teardown_msi_irq(unsigned int irq)
@@ -226,21 +229,27 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
226{ 229{
227 int rc; 230 int rc;
228 int share = 1; 231 int share = 1;
232 u8 gsi;
229 233
230 dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); 234 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
231 235 if (rc < 0) {
232 if (dev->irq < 0) 236 dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
233 return -EINVAL; 237 rc);
238 return rc;
239 }
234 240
235 if (dev->irq < NR_IRQS_LEGACY) 241 if (gsi < NR_IRQS_LEGACY)
236 share = 0; 242 share = 0;
237 243
238 rc = xen_allocate_pirq(dev->irq, share, "pcifront"); 244 rc = xen_allocate_pirq(gsi, share, "pcifront");
239 if (rc < 0) { 245 if (rc < 0) {
240 dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", 246 dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
241 dev->irq, rc); 247 gsi, rc);
242 return rc; 248 return rc;
243 } 249 }
250
251 dev->irq = rc;
252 dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
244 return 0; 253 return 0;
245} 254}
246 255
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 067759e3d6a5..2e2d370a47b1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu)
397 name = "<timer kasprintf failed>"; 397 name = "<timer kasprintf failed>";
398 398
399 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 399 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
400 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, 400 IRQF_DISABLED|IRQF_PERCPU|
401 IRQF_NOBALANCING|IRQF_TIMER|
402 IRQF_FORCE_RESUME,
401 name, NULL); 403 name, NULL);
402 404
403 evt = &per_cpu(xen_clock_events, cpu); 405 evt = &per_cpu(xen_clock_events, cpu);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 3a5a6fcc0ead..492b7d807fe8 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = {
243 243
244#ifdef CONFIG_PCI_MSI 244#ifdef CONFIG_PCI_MSI
245static int pci_frontend_enable_msix(struct pci_dev *dev, 245static int pci_frontend_enable_msix(struct pci_dev *dev,
246 int **vector, int nvec) 246 int vector[], int nvec)
247{ 247{
248 int err; 248 int err;
249 int i; 249 int i;
@@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
277 if (likely(!err)) { 277 if (likely(!err)) {
278 if (likely(!op.value)) { 278 if (likely(!op.value)) {
279 /* we get the result */ 279 /* we get the result */
280 for (i = 0; i < nvec; i++) 280 for (i = 0; i < nvec; i++) {
281 *(*vector+i) = op.msix_entries[i].vector; 281 if (op.msix_entries[i].vector <= 0) {
282 return 0; 282 dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
283 i, op.msix_entries[i].vector);
284 err = -EINVAL;
285 vector[i] = -1;
286 continue;
287 }
288 vector[i] = op.msix_entries[i].vector;
289 }
283 } else { 290 } else {
284 printk(KERN_DEBUG "enable msix get value %x\n", 291 printk(KERN_DEBUG "enable msix get value %x\n",
285 op.value); 292 op.value);
286 return op.value;
287 } 293 }
288 } else { 294 } else {
289 dev_err(&dev->dev, "enable msix get err %x\n", err); 295 dev_err(&dev->dev, "enable msix get err %x\n", err);
290 return err;
291 } 296 }
297 return err;
292} 298}
293 299
294static void pci_frontend_disable_msix(struct pci_dev *dev) 300static void pci_frontend_disable_msix(struct pci_dev *dev)
@@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
310 dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); 316 dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
311} 317}
312 318
313static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector) 319static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
314{ 320{
315 int err; 321 int err;
316 struct xen_pci_op op = { 322 struct xen_pci_op op = {
@@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
324 330
325 err = do_pci_op(pdev, &op); 331 err = do_pci_op(pdev, &op);
326 if (likely(!err)) { 332 if (likely(!err)) {
327 *(*vector) = op.value; 333 vector[0] = op.value;
334 if (op.value <= 0) {
335 dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
336 op.value);
337 err = -EINVAL;
338 vector[0] = -1;
339 }
328 } else { 340 } else {
329 dev_err(&dev->dev, "pci frontend enable msi failed for dev " 341 dev_err(&dev->dev, "pci frontend enable msi failed for dev "
330 "%x:%x\n", op.bus, op.devfn); 342 "%x:%x\n", op.bus, op.devfn);
@@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev)
733 745
734 pcifront_free_roots(pdev); 746 pcifront_free_roots(pdev);
735 747
736 /*For PCIE_AER error handling job*/ 748 cancel_work_sync(&pdev->op_work);
737 flush_scheduled_work();
738 749
739 if (pdev->irq >= 0) 750 if (pdev->irq >= 0)
740 unbind_from_irqhandler(pdev->irq, pdev); 751 unbind_from_irqhandler(pdev->irq, pdev);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 74681478100a..89987a7bf26f 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
277 277
278 BUG_ON(irq == -1); 278 BUG_ON(irq == -1);
279#ifdef CONFIG_SMP 279#ifdef CONFIG_SMP
280 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 280 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
281#endif 281#endif
282 282
283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
294 294
295 /* By default all event channels notify CPU#0. */ 295 /* By default all event channels notify CPU#0. */
296 for_each_irq_desc(i, desc) { 296 for_each_irq_desc(i, desc) {
297 cpumask_copy(desc->affinity, cpumask_of(0)); 297 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
298 } 298 }
299#endif 299#endif
300 300
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
376 put_cpu(); 376 put_cpu();
377} 377}
378 378
379static int get_nr_hw_irqs(void) 379static int xen_allocate_irq_dynamic(void)
380{ 380{
381 int ret = 1; 381 int first = 0;
382 int irq;
382 383
383#ifdef CONFIG_X86_IO_APIC 384#ifdef CONFIG_X86_IO_APIC
384 ret = get_nr_irqs_gsi(); 385 /*
386 * For an HVM guest or domain 0 which see "real" (emulated or
387 * actual repectively) GSIs we allocate dynamic IRQs
388 * e.g. those corresponding to event channels or MSIs
389 * etc. from the range above those "real" GSIs to avoid
390 * collisions.
391 */
392 if (xen_initial_domain() || xen_hvm_domain())
393 first = get_nr_irqs_gsi();
385#endif 394#endif
386 395
387 return ret; 396retry:
388} 397 irq = irq_alloc_desc_from(first, -1);
389 398
390static int find_unbound_pirq(int type) 399 if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
391{ 400 printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
392 int rc, i; 401 first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
393 struct physdev_get_free_pirq op_get_free_pirq; 402 goto retry;
394 op_get_free_pirq.type = type; 403 }
395 404
396 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 405 if (irq < 0)
397 if (!rc) 406 panic("No available IRQ to bind to: increase nr_irqs!\n");
398 return op_get_free_pirq.pirq;
399 407
400 for (i = 0; i < nr_irqs; i++) { 408 return irq;
401 if (pirq_to_irq[i] < 0)
402 return i;
403 }
404 return -1;
405} 409}
406 410
407static int find_unbound_irq(void) 411static int xen_allocate_irq_gsi(unsigned gsi)
408{ 412{
409 struct irq_data *data; 413 int irq;
410 int irq, res;
411 int bottom = get_nr_hw_irqs();
412 int top = nr_irqs-1;
413
414 if (bottom == nr_irqs)
415 goto no_irqs;
416 414
417 /* This loop starts from the top of IRQ space and goes down. 415 /*
418 * We need this b/c if we have a PCI device in a Xen PV guest 416 * A PV guest has no concept of a GSI (since it has no ACPI
419 * we do not have an IO-APIC (though the backend might have them) 417 * nor access to/knowledge of the physical APICs). Therefore
420 * mapped in. To not have a collision of physical IRQs with the Xen 418 * all IRQs are dynamically allocated from the entire IRQ
421 * event channels start at the top of the IRQ space for virtual IRQs. 419 * space.
422 */ 420 */
423 for (irq = top; irq > bottom; irq--) { 421 if (xen_pv_domain() && !xen_initial_domain())
424 data = irq_get_irq_data(irq); 422 return xen_allocate_irq_dynamic();
425 /* only 15->0 have init'd desc; handle irq > 16 */
426 if (!data)
427 break;
428 if (data->chip == &no_irq_chip)
429 break;
430 if (data->chip != &xen_dynamic_chip)
431 continue;
432 if (irq_info[irq].type == IRQT_UNBOUND)
433 return irq;
434 }
435
436 if (irq == bottom)
437 goto no_irqs;
438 423
439 res = irq_alloc_desc_at(irq, -1); 424 /* Legacy IRQ descriptors are already allocated by the arch. */
425 if (gsi < NR_IRQS_LEGACY)
426 return gsi;
440 427
441 if (WARN_ON(res != irq)) 428 irq = irq_alloc_desc_at(gsi, -1);
442 return -1; 429 if (irq < 0)
430 panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
443 431
444 return irq; 432 return irq;
445
446no_irqs:
447 panic("No available IRQ to bind to: increase nr_irqs!\n");
448} 433}
449 434
450static bool identity_mapped_irq(unsigned irq) 435static void xen_free_irq(unsigned irq)
451{ 436{
452 /* identity map all the hardware irqs */ 437 /* Legacy IRQ descriptors are managed by the arch. */
453 return irq < get_nr_hw_irqs(); 438 if (irq < NR_IRQS_LEGACY)
439 return;
440
441 irq_free_desc(irq);
454} 442}
455 443
456static void pirq_unmask_notify(int irq) 444static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
486 return desc && desc->action == NULL; 474 return desc && desc->action == NULL;
487} 475}
488 476
489static unsigned int startup_pirq(unsigned int irq) 477static unsigned int __startup_pirq(unsigned int irq)
490{ 478{
491 struct evtchn_bind_pirq bind_pirq; 479 struct evtchn_bind_pirq bind_pirq;
492 struct irq_info *info = info_for_irq(irq); 480 struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
524 return 0; 512 return 0;
525} 513}
526 514
527static void shutdown_pirq(unsigned int irq) 515static unsigned int startup_pirq(struct irq_data *data)
516{
517 return __startup_pirq(data->irq);
518}
519
520static void shutdown_pirq(struct irq_data *data)
528{ 521{
529 struct evtchn_close close; 522 struct evtchn_close close;
523 unsigned int irq = data->irq;
530 struct irq_info *info = info_for_irq(irq); 524 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq); 525 int evtchn = evtchn_from_irq(irq);
532 526
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
546 info->evtchn = 0; 540 info->evtchn = 0;
547} 541}
548 542
549static void enable_pirq(unsigned int irq) 543static void enable_pirq(struct irq_data *data)
550{ 544{
551 startup_pirq(irq); 545 startup_pirq(data);
552} 546}
553 547
554static void disable_pirq(unsigned int irq) 548static void disable_pirq(struct irq_data *data)
555{ 549{
556} 550}
557 551
558static void ack_pirq(unsigned int irq) 552static void ack_pirq(struct irq_data *data)
559{ 553{
560 int evtchn = evtchn_from_irq(irq); 554 int evtchn = evtchn_from_irq(data->irq);
561 555
562 move_native_irq(irq); 556 move_native_irq(data->irq);
563 557
564 if (VALID_EVTCHN(evtchn)) { 558 if (VALID_EVTCHN(evtchn)) {
565 mask_evtchn(evtchn); 559 mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
567 } 561 }
568} 562}
569 563
570static void end_pirq(unsigned int irq)
571{
572 int evtchn = evtchn_from_irq(irq);
573 struct irq_desc *desc = irq_to_desc(irq);
574
575 if (WARN_ON(!desc))
576 return;
577
578 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
579 (IRQ_DISABLED|IRQ_PENDING)) {
580 shutdown_pirq(irq);
581 } else if (VALID_EVTCHN(evtchn)) {
582 unmask_evtchn(evtchn);
583 pirq_unmask_notify(irq);
584 }
585}
586
587static int find_irq_by_gsi(unsigned gsi) 564static int find_irq_by_gsi(unsigned gsi)
588{ 565{
589 int irq; 566 int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
638 goto out; /* XXX need refcount? */ 615 goto out; /* XXX need refcount? */
639 } 616 }
640 617
641 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore 618 irq = xen_allocate_irq_gsi(gsi);
642 * we are using the !xen_initial_domain() to drop in the function.*/
643 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
644 xen_pv_domain())) {
645 irq = gsi;
646 irq_alloc_desc_at(irq, -1);
647 } else
648 irq = find_unbound_irq();
649 619
650 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 620 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
651 handle_level_irq, name); 621 handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
658 * this in the priv domain. */ 628 * this in the priv domain. */
659 if (xen_initial_domain() && 629 if (xen_initial_domain() &&
660 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 630 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
661 irq_free_desc(irq); 631 xen_free_irq(irq);
662 irq = -ENOSPC; 632 irq = -ENOSPC;
663 goto out; 633 goto out;
664 } 634 }
@@ -677,12 +647,29 @@ out:
677#include <linux/msi.h> 647#include <linux/msi.h>
678#include "../pci/msi.h" 648#include "../pci/msi.h"
679 649
650static int find_unbound_pirq(int type)
651{
652 int rc, i;
653 struct physdev_get_free_pirq op_get_free_pirq;
654 op_get_free_pirq.type = type;
655
656 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
657 if (!rc)
658 return op_get_free_pirq.pirq;
659
660 for (i = 0; i < nr_irqs; i++) {
661 if (pirq_to_irq[i] < 0)
662 return i;
663 }
664 return -1;
665}
666
680void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) 667void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
681{ 668{
682 spin_lock(&irq_mapping_update_lock); 669 spin_lock(&irq_mapping_update_lock);
683 670
684 if (alloc & XEN_ALLOC_IRQ) { 671 if (alloc & XEN_ALLOC_IRQ) {
685 *irq = find_unbound_irq(); 672 *irq = xen_allocate_irq_dynamic();
686 if (*irq == -1) 673 if (*irq == -1)
687 goto out; 674 goto out;
688 } 675 }
@@ -732,7 +719,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
732 719
733 spin_lock(&irq_mapping_update_lock); 720 spin_lock(&irq_mapping_update_lock);
734 721
735 irq = find_unbound_irq(); 722 irq = xen_allocate_irq_dynamic();
736 723
737 if (irq == -1) 724 if (irq == -1)
738 goto out; 725 goto out;
@@ -741,7 +728,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
741 if (rc) { 728 if (rc) {
742 printk(KERN_WARNING "xen map irq failed %d\n", rc); 729 printk(KERN_WARNING "xen map irq failed %d\n", rc);
743 730
744 irq_free_desc(irq); 731 xen_free_irq(irq);
745 732
746 irq = -1; 733 irq = -1;
747 goto out; 734 goto out;
@@ -779,11 +766,12 @@ int xen_destroy_irq(int irq)
779 printk(KERN_WARNING "unmap irq failed %d\n", rc); 766 printk(KERN_WARNING "unmap irq failed %d\n", rc);
780 goto out; 767 goto out;
781 } 768 }
782 pirq_to_irq[info->u.pirq.pirq] = -1;
783 } 769 }
770 pirq_to_irq[info->u.pirq.pirq] = -1;
771
784 irq_info[irq] = mk_unbound_info(); 772 irq_info[irq] = mk_unbound_info();
785 773
786 irq_free_desc(irq); 774 xen_free_irq(irq);
787 775
788out: 776out:
789 spin_unlock(&irq_mapping_update_lock); 777 spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
814 irq = evtchn_to_irq[evtchn]; 802 irq = evtchn_to_irq[evtchn];
815 803
816 if (irq == -1) { 804 if (irq == -1) {
817 irq = find_unbound_irq(); 805 irq = xen_allocate_irq_dynamic();
818 806
819 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 807 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
820 handle_fasteoi_irq, "event"); 808 handle_fasteoi_irq, "event");
@@ -839,7 +827,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
839 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 827 irq = per_cpu(ipi_to_irq, cpu)[ipi];
840 828
841 if (irq == -1) { 829 if (irq == -1) {
842 irq = find_unbound_irq(); 830 irq = xen_allocate_irq_dynamic();
843 if (irq < 0) 831 if (irq < 0)
844 goto out; 832 goto out;
845 833
@@ -875,7 +863,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
875 irq = per_cpu(virq_to_irq, cpu)[virq]; 863 irq = per_cpu(virq_to_irq, cpu)[virq];
876 864
877 if (irq == -1) { 865 if (irq == -1) {
878 irq = find_unbound_irq(); 866 irq = xen_allocate_irq_dynamic();
879 867
880 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 868 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
881 handle_percpu_irq, "virq"); 869 handle_percpu_irq, "virq");
@@ -934,7 +922,7 @@ static void unbind_from_irq(unsigned int irq)
934 if (irq_info[irq].type != IRQT_UNBOUND) { 922 if (irq_info[irq].type != IRQT_UNBOUND) {
935 irq_info[irq] = mk_unbound_info(); 923 irq_info[irq] = mk_unbound_info();
936 924
937 irq_free_desc(irq); 925 xen_free_irq(irq);
938 } 926 }
939 927
940 spin_unlock(&irq_mapping_update_lock); 928 spin_unlock(&irq_mapping_update_lock);
@@ -990,7 +978,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
990 if (irq < 0) 978 if (irq < 0)
991 return irq; 979 return irq;
992 980
993 irqflags |= IRQF_NO_SUSPEND; 981 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
994 retval = request_irq(irq, handler, irqflags, devname, dev_id); 982 retval = request_irq(irq, handler, irqflags, devname, dev_id);
995 if (retval != 0) { 983 if (retval != 0) {
996 unbind_from_irq(irq); 984 unbind_from_irq(irq);
@@ -1234,11 +1222,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1234 return 0; 1222 return 0;
1235} 1223}
1236 1224
1237static int set_affinity_irq(unsigned irq, const struct cpumask *dest) 1225static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1226 bool force)
1238{ 1227{
1239 unsigned tcpu = cpumask_first(dest); 1228 unsigned tcpu = cpumask_first(dest);
1240 1229
1241 return rebind_irq_to_cpu(irq, tcpu); 1230 return rebind_irq_to_cpu(data->irq, tcpu);
1242} 1231}
1243 1232
1244int resend_irq_on_evtchn(unsigned int irq) 1233int resend_irq_on_evtchn(unsigned int irq)
@@ -1257,35 +1246,35 @@ int resend_irq_on_evtchn(unsigned int irq)
1257 return 1; 1246 return 1;
1258} 1247}
1259 1248
1260static void enable_dynirq(unsigned int irq) 1249static void enable_dynirq(struct irq_data *data)
1261{ 1250{
1262 int evtchn = evtchn_from_irq(irq); 1251 int evtchn = evtchn_from_irq(data->irq);
1263 1252
1264 if (VALID_EVTCHN(evtchn)) 1253 if (VALID_EVTCHN(evtchn))
1265 unmask_evtchn(evtchn); 1254 unmask_evtchn(evtchn);
1266} 1255}
1267 1256
1268static void disable_dynirq(unsigned int irq) 1257static void disable_dynirq(struct irq_data *data)
1269{ 1258{
1270 int evtchn = evtchn_from_irq(irq); 1259 int evtchn = evtchn_from_irq(data->irq);
1271 1260
1272 if (VALID_EVTCHN(evtchn)) 1261 if (VALID_EVTCHN(evtchn))
1273 mask_evtchn(evtchn); 1262 mask_evtchn(evtchn);
1274} 1263}
1275 1264
1276static void ack_dynirq(unsigned int irq) 1265static void ack_dynirq(struct irq_data *data)
1277{ 1266{
1278 int evtchn = evtchn_from_irq(irq); 1267 int evtchn = evtchn_from_irq(data->irq);
1279 1268
1280 move_masked_irq(irq); 1269 move_masked_irq(data->irq);
1281 1270
1282 if (VALID_EVTCHN(evtchn)) 1271 if (VALID_EVTCHN(evtchn))
1283 unmask_evtchn(evtchn); 1272 unmask_evtchn(evtchn);
1284} 1273}
1285 1274
1286static int retrigger_dynirq(unsigned int irq) 1275static int retrigger_dynirq(struct irq_data *data)
1287{ 1276{
1288 int evtchn = evtchn_from_irq(irq); 1277 int evtchn = evtchn_from_irq(data->irq);
1289 struct shared_info *sh = HYPERVISOR_shared_info; 1278 struct shared_info *sh = HYPERVISOR_shared_info;
1290 int ret = 0; 1279 int ret = 0;
1291 1280
@@ -1334,7 +1323,7 @@ static void restore_cpu_pirqs(void)
1334 1323
1335 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1324 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1336 1325
1337 startup_pirq(irq); 1326 __startup_pirq(irq);
1338 } 1327 }
1339} 1328}
1340 1329
@@ -1445,7 +1434,6 @@ void xen_poll_irq(int irq)
1445void xen_irq_resume(void) 1434void xen_irq_resume(void)
1446{ 1435{
1447 unsigned int cpu, irq, evtchn; 1436 unsigned int cpu, irq, evtchn;
1448 struct irq_desc *desc;
1449 1437
1450 init_evtchn_cpu_bindings(); 1438 init_evtchn_cpu_bindings();
1451 1439
@@ -1465,66 +1453,48 @@ void xen_irq_resume(void)
1465 restore_cpu_ipis(cpu); 1453 restore_cpu_ipis(cpu);
1466 } 1454 }
1467 1455
1468 /*
1469 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1470 * are not handled by the IRQ core.
1471 */
1472 for_each_irq_desc(irq, desc) {
1473 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1474 continue;
1475 if (desc->status & IRQ_DISABLED)
1476 continue;
1477
1478 evtchn = evtchn_from_irq(irq);
1479 if (evtchn == -1)
1480 continue;
1481
1482 unmask_evtchn(evtchn);
1483 }
1484
1485 restore_cpu_pirqs(); 1456 restore_cpu_pirqs();
1486} 1457}
1487 1458
1488static struct irq_chip xen_dynamic_chip __read_mostly = { 1459static struct irq_chip xen_dynamic_chip __read_mostly = {
1489 .name = "xen-dyn", 1460 .name = "xen-dyn",
1490 1461
1491 .disable = disable_dynirq, 1462 .irq_disable = disable_dynirq,
1492 .mask = disable_dynirq, 1463 .irq_mask = disable_dynirq,
1493 .unmask = enable_dynirq, 1464 .irq_unmask = enable_dynirq,
1494 1465
1495 .eoi = ack_dynirq, 1466 .irq_eoi = ack_dynirq,
1496 .set_affinity = set_affinity_irq, 1467 .irq_set_affinity = set_affinity_irq,
1497 .retrigger = retrigger_dynirq, 1468 .irq_retrigger = retrigger_dynirq,
1498}; 1469};
1499 1470
1500static struct irq_chip xen_pirq_chip __read_mostly = { 1471static struct irq_chip xen_pirq_chip __read_mostly = {
1501 .name = "xen-pirq", 1472 .name = "xen-pirq",
1502 1473
1503 .startup = startup_pirq, 1474 .irq_startup = startup_pirq,
1504 .shutdown = shutdown_pirq, 1475 .irq_shutdown = shutdown_pirq,
1505 1476
1506 .enable = enable_pirq, 1477 .irq_enable = enable_pirq,
1507 .unmask = enable_pirq, 1478 .irq_unmask = enable_pirq,
1508 1479
1509 .disable = disable_pirq, 1480 .irq_disable = disable_pirq,
1510 .mask = disable_pirq, 1481 .irq_mask = disable_pirq,
1511 1482
1512 .ack = ack_pirq, 1483 .irq_ack = ack_pirq,
1513 .end = end_pirq,
1514 1484
1515 .set_affinity = set_affinity_irq, 1485 .irq_set_affinity = set_affinity_irq,
1516 1486
1517 .retrigger = retrigger_dynirq, 1487 .irq_retrigger = retrigger_dynirq,
1518}; 1488};
1519 1489
1520static struct irq_chip xen_percpu_chip __read_mostly = { 1490static struct irq_chip xen_percpu_chip __read_mostly = {
1521 .name = "xen-percpu", 1491 .name = "xen-percpu",
1522 1492
1523 .disable = disable_dynirq, 1493 .irq_disable = disable_dynirq,
1524 .mask = disable_dynirq, 1494 .irq_mask = disable_dynirq,
1525 .unmask = enable_dynirq, 1495 .irq_unmask = enable_dynirq,
1526 1496
1527 .ack = ack_dynirq, 1497 .irq_ack = ack_dynirq,
1528}; 1498};
1529 1499
1530int xen_set_callback_via(uint64_t via) 1500int xen_set_callback_via(uint64_t via)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 55e0d4253e49..d746da19c6a2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -55,7 +55,7 @@
55 * Used by threaded interrupts which need to keep the 55 * Used by threaded interrupts which need to keep the
56 * irq line disabled until the threaded handler has been run. 56 * irq line disabled until the threaded handler has been run.
57 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend 57 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
58 * 58 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
59 */ 59 */
60#define IRQF_DISABLED 0x00000020 60#define IRQF_DISABLED 0x00000020
61#define IRQF_SAMPLE_RANDOM 0x00000040 61#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -67,6 +67,7 @@
67#define IRQF_IRQPOLL 0x00001000 67#define IRQF_IRQPOLL 0x00001000
68#define IRQF_ONESHOT 0x00002000 68#define IRQF_ONESHOT 0x00002000
69#define IRQF_NO_SUSPEND 0x00004000 69#define IRQF_NO_SUSPEND 0x00004000
70#define IRQF_FORCE_RESUME 0x00008000
70 71
71#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) 72#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
72 73
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9033c1c70828..2782bacdf494 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -282,8 +282,17 @@ EXPORT_SYMBOL(disable_irq);
282 282
283void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 283void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
284{ 284{
285 if (resume) 285 if (resume) {
286 if (!(desc->status & IRQ_SUSPENDED)) {
287 if (!desc->action)
288 return;
289 if (!(desc->action->flags & IRQF_FORCE_RESUME))
290 return;
291 /* Pretend that it got disabled ! */
292 desc->depth++;
293 }
286 desc->status &= ~IRQ_SUSPENDED; 294 desc->status &= ~IRQ_SUSPENDED;
295 }
287 296
288 switch (desc->depth) { 297 switch (desc->depth) {
289 case 0: 298 case 0:
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 0d4005d85b03..d6bfb89cce91 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -53,9 +53,6 @@ void resume_device_irqs(void)
53 for_each_irq_desc(irq, desc) { 53 for_each_irq_desc(irq, desc) {
54 unsigned long flags; 54 unsigned long flags;
55 55
56 if (!(desc->status & IRQ_SUSPENDED))
57 continue;
58
59 raw_spin_lock_irqsave(&desc->lock, flags); 56 raw_spin_lock_irqsave(&desc->lock, flags);
60 __enable_irq(desc, irq, true); 57 __enable_irq(desc, irq, true);
61 raw_spin_unlock_irqrestore(&desc->lock, flags); 58 raw_spin_unlock_irqrestore(&desc->lock, flags);