aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/events.c342
-rw-r--r--drivers/xen/manage.c153
-rw-r--r--drivers/xen/platform-pci.c3
4 files changed, 223 insertions, 291 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 43f9f02c7db..718050ace08 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -232,7 +232,7 @@ static int increase_reservation(unsigned long nr_pages)
232 set_phys_to_machine(pfn, frame_list[i]); 232 set_phys_to_machine(pfn, frame_list[i]);
233 233
234 /* Link back into the page tables if not highmem. */ 234 /* Link back into the page tables if not highmem. */
235 if (pfn < max_low_pfn) { 235 if (!xen_hvm_domain() && pfn < max_low_pfn) {
236 int ret; 236 int ret;
237 ret = HYPERVISOR_update_va_mapping( 237 ret = HYPERVISOR_update_va_mapping(
238 (unsigned long)__va(pfn << PAGE_SHIFT), 238 (unsigned long)__va(pfn << PAGE_SHIFT),
@@ -280,7 +280,7 @@ static int decrease_reservation(unsigned long nr_pages)
280 280
281 scrub_page(page); 281 scrub_page(page);
282 282
283 if (!PageHighMem(page)) { 283 if (!xen_hvm_domain() && !PageHighMem(page)) {
284 ret = HYPERVISOR_update_va_mapping( 284 ret = HYPERVISOR_update_va_mapping(
285 (unsigned long)__va(pfn << PAGE_SHIFT), 285 (unsigned long)__va(pfn << PAGE_SHIFT),
286 __pte_ma(0), 0); 286 __pte_ma(0), 0);
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
296 /* No more mappings: invalidate P2M and add to balloon. */ 296 /* No more mappings: invalidate P2M and add to balloon. */
297 for (i = 0; i < nr_pages; i++) { 297 for (i = 0; i < nr_pages; i++) {
298 pfn = mfn_to_pfn(frame_list[i]); 298 pfn = mfn_to_pfn(frame_list[i]);
299 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 299 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
300 balloon_append(pfn_to_page(pfn)); 300 balloon_append(pfn_to_page(pfn));
301 } 301 }
302 302
@@ -392,15 +392,19 @@ static struct notifier_block xenstore_notifier;
392 392
393static int __init balloon_init(void) 393static int __init balloon_init(void)
394{ 394{
395 unsigned long pfn, extra_pfn_end; 395 unsigned long pfn, nr_pages, extra_pfn_end;
396 struct page *page; 396 struct page *page;
397 397
398 if (!xen_pv_domain()) 398 if (!xen_domain())
399 return -ENODEV; 399 return -ENODEV;
400 400
401 pr_info("xen_balloon: Initialising balloon driver.\n"); 401 pr_info("xen_balloon: Initialising balloon driver.\n");
402 402
403 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); 403 if (xen_pv_domain())
404 nr_pages = xen_start_info->nr_pages;
405 else
406 nr_pages = max_pfn;
407 balloon_stats.current_pages = min(nr_pages, max_pfn);
404 balloon_stats.target_pages = balloon_stats.current_pages; 408 balloon_stats.target_pages = balloon_stats.current_pages;
405 balloon_stats.balloon_low = 0; 409 balloon_stats.balloon_low = 0;
406 balloon_stats.balloon_high = 0; 410 balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a4160167563..65f5068afd8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -114,7 +114,7 @@ struct cpu_evtchn_s {
114static __initdata struct cpu_evtchn_s init_evtchn_mask = { 114static __initdata struct cpu_evtchn_s init_evtchn_mask = {
115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, 115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
116}; 116};
117static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask; 117static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
118 118
119static inline unsigned long *cpu_evtchn_mask(int cpu) 119static inline unsigned long *cpu_evtchn_mask(int cpu)
120{ 120{
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
277 277
278 BUG_ON(irq == -1); 278 BUG_ON(irq == -1);
279#ifdef CONFIG_SMP 279#ifdef CONFIG_SMP
280 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 280 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
281#endif 281#endif
282 282
283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
294 294
295 /* By default all event channels notify CPU#0. */ 295 /* By default all event channels notify CPU#0. */
296 for_each_irq_desc(i, desc) { 296 for_each_irq_desc(i, desc) {
297 cpumask_copy(desc->affinity, cpumask_of(0)); 297 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
298 } 298 }
299#endif 299#endif
300 300
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
376 put_cpu(); 376 put_cpu();
377} 377}
378 378
379static int get_nr_hw_irqs(void) 379static int xen_allocate_irq_dynamic(void)
380{ 380{
381 int ret = 1; 381 int first = 0;
382 int irq;
382 383
383#ifdef CONFIG_X86_IO_APIC 384#ifdef CONFIG_X86_IO_APIC
384 ret = get_nr_irqs_gsi(); 385 /*
386 * For an HVM guest or domain 0 which see "real" (emulated or
387 * actual repectively) GSIs we allocate dynamic IRQs
388 * e.g. those corresponding to event channels or MSIs
389 * etc. from the range above those "real" GSIs to avoid
390 * collisions.
391 */
392 if (xen_initial_domain() || xen_hvm_domain())
393 first = get_nr_irqs_gsi();
385#endif 394#endif
386 395
387 return ret; 396retry:
388} 397 irq = irq_alloc_desc_from(first, -1);
389 398
390static int find_unbound_pirq(int type) 399 if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
391{ 400 printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
392 int rc, i; 401 first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
393 struct physdev_get_free_pirq op_get_free_pirq; 402 goto retry;
394 op_get_free_pirq.type = type; 403 }
395 404
396 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 405 if (irq < 0)
397 if (!rc) 406 panic("No available IRQ to bind to: increase nr_irqs!\n");
398 return op_get_free_pirq.pirq;
399 407
400 for (i = 0; i < nr_irqs; i++) { 408 return irq;
401 if (pirq_to_irq[i] < 0)
402 return i;
403 }
404 return -1;
405} 409}
406 410
407static int find_unbound_irq(void) 411static int xen_allocate_irq_gsi(unsigned gsi)
408{ 412{
409 struct irq_data *data; 413 int irq;
410 int irq, res;
411 int bottom = get_nr_hw_irqs();
412 int top = nr_irqs-1;
413
414 if (bottom == nr_irqs)
415 goto no_irqs;
416 414
417 /* This loop starts from the top of IRQ space and goes down. 415 /*
418 * We need this b/c if we have a PCI device in a Xen PV guest 416 * A PV guest has no concept of a GSI (since it has no ACPI
419 * we do not have an IO-APIC (though the backend might have them) 417 * nor access to/knowledge of the physical APICs). Therefore
420 * mapped in. To not have a collision of physical IRQs with the Xen 418 * all IRQs are dynamically allocated from the entire IRQ
421 * event channels start at the top of the IRQ space for virtual IRQs. 419 * space.
422 */ 420 */
423 for (irq = top; irq > bottom; irq--) { 421 if (xen_pv_domain() && !xen_initial_domain())
424 data = irq_get_irq_data(irq); 422 return xen_allocate_irq_dynamic();
425 /* only 15->0 have init'd desc; handle irq > 16 */
426 if (!data)
427 break;
428 if (data->chip == &no_irq_chip)
429 break;
430 if (data->chip != &xen_dynamic_chip)
431 continue;
432 if (irq_info[irq].type == IRQT_UNBOUND)
433 return irq;
434 }
435
436 if (irq == bottom)
437 goto no_irqs;
438 423
439 res = irq_alloc_desc_at(irq, -1); 424 /* Legacy IRQ descriptors are already allocated by the arch. */
425 if (gsi < NR_IRQS_LEGACY)
426 return gsi;
440 427
441 if (WARN_ON(res != irq)) 428 irq = irq_alloc_desc_at(gsi, -1);
442 return -1; 429 if (irq < 0)
430 panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
443 431
444 return irq; 432 return irq;
445
446no_irqs:
447 panic("No available IRQ to bind to: increase nr_irqs!\n");
448} 433}
449 434
450static bool identity_mapped_irq(unsigned irq) 435static void xen_free_irq(unsigned irq)
451{ 436{
452 /* identity map all the hardware irqs */ 437 /* Legacy IRQ descriptors are managed by the arch. */
453 return irq < get_nr_hw_irqs(); 438 if (irq < NR_IRQS_LEGACY)
439 return;
440
441 irq_free_desc(irq);
454} 442}
455 443
456static void pirq_unmask_notify(int irq) 444static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
486 return desc && desc->action == NULL; 474 return desc && desc->action == NULL;
487} 475}
488 476
489static unsigned int startup_pirq(unsigned int irq) 477static unsigned int __startup_pirq(unsigned int irq)
490{ 478{
491 struct evtchn_bind_pirq bind_pirq; 479 struct evtchn_bind_pirq bind_pirq;
492 struct irq_info *info = info_for_irq(irq); 480 struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
524 return 0; 512 return 0;
525} 513}
526 514
527static void shutdown_pirq(unsigned int irq) 515static unsigned int startup_pirq(struct irq_data *data)
516{
517 return __startup_pirq(data->irq);
518}
519
520static void shutdown_pirq(struct irq_data *data)
528{ 521{
529 struct evtchn_close close; 522 struct evtchn_close close;
523 unsigned int irq = data->irq;
530 struct irq_info *info = info_for_irq(irq); 524 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq); 525 int evtchn = evtchn_from_irq(irq);
532 526
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
546 info->evtchn = 0; 540 info->evtchn = 0;
547} 541}
548 542
549static void enable_pirq(unsigned int irq) 543static void enable_pirq(struct irq_data *data)
550{ 544{
551 startup_pirq(irq); 545 startup_pirq(data);
552} 546}
553 547
554static void disable_pirq(unsigned int irq) 548static void disable_pirq(struct irq_data *data)
555{ 549{
556} 550}
557 551
558static void ack_pirq(unsigned int irq) 552static void ack_pirq(struct irq_data *data)
559{ 553{
560 int evtchn = evtchn_from_irq(irq); 554 int evtchn = evtchn_from_irq(data->irq);
561 555
562 move_native_irq(irq); 556 move_native_irq(data->irq);
563 557
564 if (VALID_EVTCHN(evtchn)) { 558 if (VALID_EVTCHN(evtchn)) {
565 mask_evtchn(evtchn); 559 mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
567 } 561 }
568} 562}
569 563
570static void end_pirq(unsigned int irq)
571{
572 int evtchn = evtchn_from_irq(irq);
573 struct irq_desc *desc = irq_to_desc(irq);
574
575 if (WARN_ON(!desc))
576 return;
577
578 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
579 (IRQ_DISABLED|IRQ_PENDING)) {
580 shutdown_pirq(irq);
581 } else if (VALID_EVTCHN(evtchn)) {
582 unmask_evtchn(evtchn);
583 pirq_unmask_notify(irq);
584 }
585}
586
587static int find_irq_by_gsi(unsigned gsi) 564static int find_irq_by_gsi(unsigned gsi)
588{ 565{
589 int irq; 566 int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
638 goto out; /* XXX need refcount? */ 615 goto out; /* XXX need refcount? */
639 } 616 }
640 617
641 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore 618 irq = xen_allocate_irq_gsi(gsi);
642 * we are using the !xen_initial_domain() to drop in the function.*/
643 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
644 xen_pv_domain())) {
645 irq = gsi;
646 irq_alloc_desc_at(irq, -1);
647 } else
648 irq = find_unbound_irq();
649 619
650 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 620 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
651 handle_level_irq, name); 621 handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
658 * this in the priv domain. */ 628 * this in the priv domain. */
659 if (xen_initial_domain() && 629 if (xen_initial_domain() &&
660 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 630 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
661 irq_free_desc(irq); 631 xen_free_irq(irq);
662 irq = -ENOSPC; 632 irq = -ENOSPC;
663 goto out; 633 goto out;
664 } 634 }
@@ -674,87 +644,46 @@ out:
674} 644}
675 645
676#ifdef CONFIG_PCI_MSI 646#ifdef CONFIG_PCI_MSI
677#include <linux/msi.h> 647int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
678#include "../pci/msi.h"
679
680void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
681{ 648{
682 spin_lock(&irq_mapping_update_lock); 649 int rc;
683 650 struct physdev_get_free_pirq op_get_free_pirq;
684 if (alloc & XEN_ALLOC_IRQ) {
685 *irq = find_unbound_irq();
686 if (*irq == -1)
687 goto out;
688 }
689
690 if (alloc & XEN_ALLOC_PIRQ) {
691 *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
692 if (*pirq == -1)
693 goto out;
694 }
695 651
696 set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, 652 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
697 handle_level_irq, name); 653 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
698 654
699 irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0); 655 WARN_ONCE(rc == -ENOSYS,
700 pirq_to_irq[*pirq] = *irq; 656 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
701 657
702out: 658 return rc ? -1 : op_get_free_pirq.pirq;
703 spin_unlock(&irq_mapping_update_lock);
704} 659}
705 660
706int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) 661int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
662 int pirq, int vector, const char *name)
707{ 663{
708 int irq = -1; 664 int irq, ret;
709 struct physdev_map_pirq map_irq;
710 int rc;
711 int pos;
712 u32 table_offset, bir;
713
714 memset(&map_irq, 0, sizeof(map_irq));
715 map_irq.domid = DOMID_SELF;
716 map_irq.type = MAP_PIRQ_TYPE_MSI;
717 map_irq.index = -1;
718 map_irq.pirq = -1;
719 map_irq.bus = dev->bus->number;
720 map_irq.devfn = dev->devfn;
721
722 if (type == PCI_CAP_ID_MSIX) {
723 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
724
725 pci_read_config_dword(dev, msix_table_offset_reg(pos),
726 &table_offset);
727 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
728
729 map_irq.table_base = pci_resource_start(dev, bir);
730 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
731 }
732 665
733 spin_lock(&irq_mapping_update_lock); 666 spin_lock(&irq_mapping_update_lock);
734 667
735 irq = find_unbound_irq(); 668 irq = xen_allocate_irq_dynamic();
736
737 if (irq == -1) 669 if (irq == -1)
738 goto out; 670 goto out;
739 671
740 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
741 if (rc) {
742 printk(KERN_WARNING "xen map irq failed %d\n", rc);
743
744 irq_free_desc(irq);
745
746 irq = -1;
747 goto out;
748 }
749 irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
750
751 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 672 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
752 handle_level_irq, 673 handle_level_irq, name);
753 (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
754 674
675 irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
676 pirq_to_irq[pirq] = irq;
677 ret = irq_set_msi_desc(irq, msidesc);
678 if (ret < 0)
679 goto error_irq;
755out: 680out:
756 spin_unlock(&irq_mapping_update_lock); 681 spin_unlock(&irq_mapping_update_lock);
757 return irq; 682 return irq;
683error_irq:
684 spin_unlock(&irq_mapping_update_lock);
685 xen_free_irq(irq);
686 return -1;
758} 687}
759#endif 688#endif
760 689
@@ -779,11 +708,12 @@ int xen_destroy_irq(int irq)
779 printk(KERN_WARNING "unmap irq failed %d\n", rc); 708 printk(KERN_WARNING "unmap irq failed %d\n", rc);
780 goto out; 709 goto out;
781 } 710 }
782 pirq_to_irq[info->u.pirq.pirq] = -1;
783 } 711 }
712 pirq_to_irq[info->u.pirq.pirq] = -1;
713
784 irq_info[irq] = mk_unbound_info(); 714 irq_info[irq] = mk_unbound_info();
785 715
786 irq_free_desc(irq); 716 xen_free_irq(irq);
787 717
788out: 718out:
789 spin_unlock(&irq_mapping_update_lock); 719 spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +744,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
814 irq = evtchn_to_irq[evtchn]; 744 irq = evtchn_to_irq[evtchn];
815 745
816 if (irq == -1) { 746 if (irq == -1) {
817 irq = find_unbound_irq(); 747 irq = xen_allocate_irq_dynamic();
818 748
819 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 749 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
820 handle_fasteoi_irq, "event"); 750 handle_fasteoi_irq, "event");
@@ -839,7 +769,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
839 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 769 irq = per_cpu(ipi_to_irq, cpu)[ipi];
840 770
841 if (irq == -1) { 771 if (irq == -1) {
842 irq = find_unbound_irq(); 772 irq = xen_allocate_irq_dynamic();
843 if (irq < 0) 773 if (irq < 0)
844 goto out; 774 goto out;
845 775
@@ -890,7 +820,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
890 irq = per_cpu(virq_to_irq, cpu)[virq]; 820 irq = per_cpu(virq_to_irq, cpu)[virq];
891 821
892 if (irq == -1) { 822 if (irq == -1) {
893 irq = find_unbound_irq(); 823 irq = xen_allocate_irq_dynamic();
894 824
895 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 825 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
896 handle_percpu_irq, "virq"); 826 handle_percpu_irq, "virq");
@@ -949,7 +879,7 @@ static void unbind_from_irq(unsigned int irq)
949 if (irq_info[irq].type != IRQT_UNBOUND) { 879 if (irq_info[irq].type != IRQT_UNBOUND) {
950 irq_info[irq] = mk_unbound_info(); 880 irq_info[irq] = mk_unbound_info();
951 881
952 irq_free_desc(irq); 882 xen_free_irq(irq);
953 } 883 }
954 884
955 spin_unlock(&irq_mapping_update_lock); 885 spin_unlock(&irq_mapping_update_lock);
@@ -1028,7 +958,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1028 if (irq < 0) 958 if (irq < 0)
1029 return irq; 959 return irq;
1030 960
1031 irqflags |= IRQF_NO_SUSPEND; 961 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
1032 retval = request_irq(irq, handler, irqflags, devname, dev_id); 962 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1033 if (retval != 0) { 963 if (retval != 0) {
1034 unbind_from_irq(irq); 964 unbind_from_irq(irq);
@@ -1272,11 +1202,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1272 return 0; 1202 return 0;
1273} 1203}
1274 1204
1275static int set_affinity_irq(unsigned irq, const struct cpumask *dest) 1205static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1206 bool force)
1276{ 1207{
1277 unsigned tcpu = cpumask_first(dest); 1208 unsigned tcpu = cpumask_first(dest);
1278 1209
1279 return rebind_irq_to_cpu(irq, tcpu); 1210 return rebind_irq_to_cpu(data->irq, tcpu);
1280} 1211}
1281 1212
1282int resend_irq_on_evtchn(unsigned int irq) 1213int resend_irq_on_evtchn(unsigned int irq)
@@ -1295,35 +1226,35 @@ int resend_irq_on_evtchn(unsigned int irq)
1295 return 1; 1226 return 1;
1296} 1227}
1297 1228
1298static void enable_dynirq(unsigned int irq) 1229static void enable_dynirq(struct irq_data *data)
1299{ 1230{
1300 int evtchn = evtchn_from_irq(irq); 1231 int evtchn = evtchn_from_irq(data->irq);
1301 1232
1302 if (VALID_EVTCHN(evtchn)) 1233 if (VALID_EVTCHN(evtchn))
1303 unmask_evtchn(evtchn); 1234 unmask_evtchn(evtchn);
1304} 1235}
1305 1236
1306static void disable_dynirq(unsigned int irq) 1237static void disable_dynirq(struct irq_data *data)
1307{ 1238{
1308 int evtchn = evtchn_from_irq(irq); 1239 int evtchn = evtchn_from_irq(data->irq);
1309 1240
1310 if (VALID_EVTCHN(evtchn)) 1241 if (VALID_EVTCHN(evtchn))
1311 mask_evtchn(evtchn); 1242 mask_evtchn(evtchn);
1312} 1243}
1313 1244
1314static void ack_dynirq(unsigned int irq) 1245static void ack_dynirq(struct irq_data *data)
1315{ 1246{
1316 int evtchn = evtchn_from_irq(irq); 1247 int evtchn = evtchn_from_irq(data->irq);
1317 1248
1318 move_masked_irq(irq); 1249 move_masked_irq(data->irq);
1319 1250
1320 if (VALID_EVTCHN(evtchn)) 1251 if (VALID_EVTCHN(evtchn))
1321 unmask_evtchn(evtchn); 1252 unmask_evtchn(evtchn);
1322} 1253}
1323 1254
1324static int retrigger_dynirq(unsigned int irq) 1255static int retrigger_dynirq(struct irq_data *data)
1325{ 1256{
1326 int evtchn = evtchn_from_irq(irq); 1257 int evtchn = evtchn_from_irq(data->irq);
1327 struct shared_info *sh = HYPERVISOR_shared_info; 1258 struct shared_info *sh = HYPERVISOR_shared_info;
1328 int ret = 0; 1259 int ret = 0;
1329 1260
@@ -1372,7 +1303,7 @@ static void restore_cpu_pirqs(void)
1372 1303
1373 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1304 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1374 1305
1375 startup_pirq(irq); 1306 __startup_pirq(irq);
1376 } 1307 }
1377} 1308}
1378 1309
@@ -1483,7 +1414,6 @@ void xen_poll_irq(int irq)
1483void xen_irq_resume(void) 1414void xen_irq_resume(void)
1484{ 1415{
1485 unsigned int cpu, irq, evtchn; 1416 unsigned int cpu, irq, evtchn;
1486 struct irq_desc *desc;
1487 1417
1488 init_evtchn_cpu_bindings(); 1418 init_evtchn_cpu_bindings();
1489 1419
@@ -1503,66 +1433,48 @@ void xen_irq_resume(void)
1503 restore_cpu_ipis(cpu); 1433 restore_cpu_ipis(cpu);
1504 } 1434 }
1505 1435
1506 /*
1507 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1508 * are not handled by the IRQ core.
1509 */
1510 for_each_irq_desc(irq, desc) {
1511 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1512 continue;
1513 if (desc->status & IRQ_DISABLED)
1514 continue;
1515
1516 evtchn = evtchn_from_irq(irq);
1517 if (evtchn == -1)
1518 continue;
1519
1520 unmask_evtchn(evtchn);
1521 }
1522
1523 restore_cpu_pirqs(); 1436 restore_cpu_pirqs();
1524} 1437}
1525 1438
1526static struct irq_chip xen_dynamic_chip __read_mostly = { 1439static struct irq_chip xen_dynamic_chip __read_mostly = {
1527 .name = "xen-dyn", 1440 .name = "xen-dyn",
1528 1441
1529 .disable = disable_dynirq, 1442 .irq_disable = disable_dynirq,
1530 .mask = disable_dynirq, 1443 .irq_mask = disable_dynirq,
1531 .unmask = enable_dynirq, 1444 .irq_unmask = enable_dynirq,
1532 1445
1533 .eoi = ack_dynirq, 1446 .irq_eoi = ack_dynirq,
1534 .set_affinity = set_affinity_irq, 1447 .irq_set_affinity = set_affinity_irq,
1535 .retrigger = retrigger_dynirq, 1448 .irq_retrigger = retrigger_dynirq,
1536}; 1449};
1537 1450
1538static struct irq_chip xen_pirq_chip __read_mostly = { 1451static struct irq_chip xen_pirq_chip __read_mostly = {
1539 .name = "xen-pirq", 1452 .name = "xen-pirq",
1540 1453
1541 .startup = startup_pirq, 1454 .irq_startup = startup_pirq,
1542 .shutdown = shutdown_pirq, 1455 .irq_shutdown = shutdown_pirq,
1543 1456
1544 .enable = enable_pirq, 1457 .irq_enable = enable_pirq,
1545 .unmask = enable_pirq, 1458 .irq_unmask = enable_pirq,
1546 1459
1547 .disable = disable_pirq, 1460 .irq_disable = disable_pirq,
1548 .mask = disable_pirq, 1461 .irq_mask = disable_pirq,
1549 1462
1550 .ack = ack_pirq, 1463 .irq_ack = ack_pirq,
1551 .end = end_pirq,
1552 1464
1553 .set_affinity = set_affinity_irq, 1465 .irq_set_affinity = set_affinity_irq,
1554 1466
1555 .retrigger = retrigger_dynirq, 1467 .irq_retrigger = retrigger_dynirq,
1556}; 1468};
1557 1469
1558static struct irq_chip xen_percpu_chip __read_mostly = { 1470static struct irq_chip xen_percpu_chip __read_mostly = {
1559 .name = "xen-percpu", 1471 .name = "xen-percpu",
1560 1472
1561 .disable = disable_dynirq, 1473 .irq_disable = disable_dynirq,
1562 .mask = disable_dynirq, 1474 .irq_mask = disable_dynirq,
1563 .unmask = enable_dynirq, 1475 .irq_unmask = enable_dynirq,
1564 1476
1565 .ack = ack_dynirq, 1477 .irq_ack = ack_dynirq,
1566}; 1478};
1567 1479
1568int xen_set_callback_via(uint64_t via) 1480int xen_set_callback_via(uint64_t via)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 24177272bcb..ebb292859b5 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -34,42 +34,38 @@ enum shutdown_state {
34/* Ignore multiple shutdown requests. */ 34/* Ignore multiple shutdown requests. */
35static enum shutdown_state shutting_down = SHUTDOWN_INVALID; 35static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
36 36
37#ifdef CONFIG_PM_SLEEP 37struct suspend_info {
38static int xen_hvm_suspend(void *data) 38 int cancelled;
39{ 39 unsigned long arg; /* extra hypercall argument */
40 int err; 40 void (*pre)(void);
41 struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 41 void (*post)(int cancelled);
42 int *cancelled = data; 42};
43
44 BUG_ON(!irqs_disabled());
45
46 err = sysdev_suspend(PMSG_SUSPEND);
47 if (err) {
48 printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
49 err);
50 return err;
51 }
52
53 *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
54 43
55 xen_hvm_post_suspend(*cancelled); 44static void xen_hvm_post_suspend(int cancelled)
45{
46 xen_arch_hvm_post_suspend(cancelled);
56 gnttab_resume(); 47 gnttab_resume();
48}
57 49
58 if (!*cancelled) { 50static void xen_pre_suspend(void)
59 xen_irq_resume(); 51{
60 xen_console_resume(); 52 xen_mm_pin_all();
61 xen_timer_resume(); 53 gnttab_suspend();
62 } 54 xen_arch_pre_suspend();
63 55}
64 sysdev_resume();
65 56
66 return 0; 57static void xen_post_suspend(int cancelled)
58{
59 xen_arch_post_suspend(cancelled);
60 gnttab_resume();
61 xen_mm_unpin_all();
67} 62}
68 63
64#ifdef CONFIG_PM_SLEEP
69static int xen_suspend(void *data) 65static int xen_suspend(void *data)
70{ 66{
67 struct suspend_info *si = data;
71 int err; 68 int err;
72 int *cancelled = data;
73 69
74 BUG_ON(!irqs_disabled()); 70 BUG_ON(!irqs_disabled());
75 71
@@ -80,22 +76,20 @@ static int xen_suspend(void *data)
80 return err; 76 return err;
81 } 77 }
82 78
83 xen_mm_pin_all(); 79 if (si->pre)
84 gnttab_suspend(); 80 si->pre();
85 xen_pre_suspend();
86 81
87 /* 82 /*
88 * This hypercall returns 1 if suspend was cancelled 83 * This hypercall returns 1 if suspend was cancelled
89 * or the domain was merely checkpointed, and 0 if it 84 * or the domain was merely checkpointed, and 0 if it
90 * is resuming in a new domain. 85 * is resuming in a new domain.
91 */ 86 */
92 *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info)); 87 si->cancelled = HYPERVISOR_suspend(si->arg);
93 88
94 xen_post_suspend(*cancelled); 89 if (si->post)
95 gnttab_resume(); 90 si->post(si->cancelled);
96 xen_mm_unpin_all();
97 91
98 if (!*cancelled) { 92 if (!si->cancelled) {
99 xen_irq_resume(); 93 xen_irq_resume();
100 xen_console_resume(); 94 xen_console_resume();
101 xen_timer_resume(); 95 xen_timer_resume();
@@ -109,7 +103,7 @@ static int xen_suspend(void *data)
109static void do_suspend(void) 103static void do_suspend(void)
110{ 104{
111 int err; 105 int err;
112 int cancelled = 1; 106 struct suspend_info si;
113 107
114 shutting_down = SHUTDOWN_SUSPEND; 108 shutting_down = SHUTDOWN_SUSPEND;
115 109
@@ -139,20 +133,29 @@ static void do_suspend(void)
139 goto out_resume; 133 goto out_resume;
140 } 134 }
141 135
142 if (xen_hvm_domain()) 136 si.cancelled = 1;
143 err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0)); 137
144 else 138 if (xen_hvm_domain()) {
145 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); 139 si.arg = 0UL;
140 si.pre = NULL;
141 si.post = &xen_hvm_post_suspend;
142 } else {
143 si.arg = virt_to_mfn(xen_start_info);
144 si.pre = &xen_pre_suspend;
145 si.post = &xen_post_suspend;
146 }
147
148 err = stop_machine(xen_suspend, &si, cpumask_of(0));
146 149
147 dpm_resume_noirq(PMSG_RESUME); 150 dpm_resume_noirq(PMSG_RESUME);
148 151
149 if (err) { 152 if (err) {
150 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 153 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
151 cancelled = 1; 154 si.cancelled = 1;
152 } 155 }
153 156
154out_resume: 157out_resume:
155 if (!cancelled) { 158 if (!si.cancelled) {
156 xen_arch_resume(); 159 xen_arch_resume();
157 xs_resume(); 160 xs_resume();
158 } else 161 } else
@@ -172,12 +175,39 @@ out:
172} 175}
173#endif /* CONFIG_PM_SLEEP */ 176#endif /* CONFIG_PM_SLEEP */
174 177
178struct shutdown_handler {
179 const char *command;
180 void (*cb)(void);
181};
182
183static void do_poweroff(void)
184{
185 shutting_down = SHUTDOWN_POWEROFF;
186 orderly_poweroff(false);
187}
188
189static void do_reboot(void)
190{
191 shutting_down = SHUTDOWN_POWEROFF; /* ? */
192 ctrl_alt_del();
193}
194
175static void shutdown_handler(struct xenbus_watch *watch, 195static void shutdown_handler(struct xenbus_watch *watch,
176 const char **vec, unsigned int len) 196 const char **vec, unsigned int len)
177{ 197{
178 char *str; 198 char *str;
179 struct xenbus_transaction xbt; 199 struct xenbus_transaction xbt;
180 int err; 200 int err;
201 static struct shutdown_handler handlers[] = {
202 { "poweroff", do_poweroff },
203 { "halt", do_poweroff },
204 { "reboot", do_reboot },
205#ifdef CONFIG_PM_SLEEP
206 { "suspend", do_suspend },
207#endif
208 {NULL, NULL},
209 };
210 static struct shutdown_handler *handler;
181 211
182 if (shutting_down != SHUTDOWN_INVALID) 212 if (shutting_down != SHUTDOWN_INVALID)
183 return; 213 return;
@@ -194,7 +224,14 @@ static void shutdown_handler(struct xenbus_watch *watch,
194 return; 224 return;
195 } 225 }
196 226
197 xenbus_write(xbt, "control", "shutdown", ""); 227 for (handler = &handlers[0]; handler->command; handler++) {
228 if (strcmp(str, handler->command) == 0)
229 break;
230 }
231
232 /* Only acknowledge commands which we are prepared to handle. */
233 if (handler->cb)
234 xenbus_write(xbt, "control", "shutdown", "");
198 235
199 err = xenbus_transaction_end(xbt, 0); 236 err = xenbus_transaction_end(xbt, 0);
200 if (err == -EAGAIN) { 237 if (err == -EAGAIN) {
@@ -202,17 +239,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
202 goto again; 239 goto again;
203 } 240 }
204 241
205 if (strcmp(str, "poweroff") == 0 || 242 if (handler->cb) {
206 strcmp(str, "halt") == 0) { 243 handler->cb();
207 shutting_down = SHUTDOWN_POWEROFF;
208 orderly_poweroff(false);
209 } else if (strcmp(str, "reboot") == 0) {
210 shutting_down = SHUTDOWN_POWEROFF; /* ? */
211 ctrl_alt_del();
212#ifdef CONFIG_PM_SLEEP
213 } else if (strcmp(str, "suspend") == 0) {
214 do_suspend();
215#endif
216 } else { 244 } else {
217 printk(KERN_INFO "Ignoring shutdown request: %s\n", str); 245 printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
218 shutting_down = SHUTDOWN_INVALID; 246 shutting_down = SHUTDOWN_INVALID;
@@ -291,27 +319,18 @@ static int shutdown_event(struct notifier_block *notifier,
291 return NOTIFY_DONE; 319 return NOTIFY_DONE;
292} 320}
293 321
294static int __init __setup_shutdown_event(void)
295{
296 /* Delay initialization in the PV on HVM case */
297 if (xen_hvm_domain())
298 return 0;
299
300 if (!xen_pv_domain())
301 return -ENODEV;
302
303 return xen_setup_shutdown_event();
304}
305
306int xen_setup_shutdown_event(void) 322int xen_setup_shutdown_event(void)
307{ 323{
308 static struct notifier_block xenstore_notifier = { 324 static struct notifier_block xenstore_notifier = {
309 .notifier_call = shutdown_event 325 .notifier_call = shutdown_event
310 }; 326 };
327
328 if (!xen_domain())
329 return -ENODEV;
311 register_xenstore_notifier(&xenstore_notifier); 330 register_xenstore_notifier(&xenstore_notifier);
312 331
313 return 0; 332 return 0;
314} 333}
315EXPORT_SYMBOL_GPL(xen_setup_shutdown_event); 334EXPORT_SYMBOL_GPL(xen_setup_shutdown_event);
316 335
317subsys_initcall(__setup_shutdown_event); 336subsys_initcall(xen_setup_shutdown_event);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index afbe041f42c..319dd0a94d5 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -156,9 +156,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
156 if (ret) 156 if (ret)
157 goto out; 157 goto out;
158 xenbus_probe(NULL); 158 xenbus_probe(NULL);
159 ret = xen_setup_shutdown_event();
160 if (ret)
161 goto out;
162 return 0; 159 return 0;
163 160
164out: 161out: