diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-11 11:55:42 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-11 11:55:42 -0400 |
commit | 940010c5a314a7bd9b498593bc6ba1718ac5aec5 (patch) | |
tree | d141e08ced08c40c6a8e3ab2cdecde5ff14e560f /drivers | |
parent | 8dc8e5e8bc0ce00b0f656bf972f67cd8a72759e5 (diff) | |
parent | 991ec02cdca33b03a132a0cacfe6f0aa0be9aa8d (diff) |
Merge branch 'linus' into perfcounters/core
Conflicts:
arch/x86/kernel/irqinit.c
arch/x86/kernel/irqinit_64.c
arch/x86/kernel/traps.c
arch/x86/mm/fault.c
include/linux/sched.h
kernel/exit.c
Diffstat (limited to 'drivers')
43 files changed, 1324 insertions, 214 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 51b9f8280f88..2faa9e2ac893 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -401,7 +401,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
401 | /* Interrupt Line values above 0xF are forbidden */ | 401 | /* Interrupt Line values above 0xF are forbidden */ |
402 | if (dev->irq > 0 && (dev->irq <= 0xF)) { | 402 | if (dev->irq > 0 && (dev->irq <= 0xF)) { |
403 | printk(" - using IRQ %d\n", dev->irq); | 403 | printk(" - using IRQ %d\n", dev->irq); |
404 | acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, | 404 | acpi_register_gsi(&dev->dev, dev->irq, |
405 | ACPI_LEVEL_SENSITIVE, | ||
405 | ACPI_ACTIVE_LOW); | 406 | ACPI_ACTIVE_LOW); |
406 | return 0; | 407 | return 0; |
407 | } else { | 408 | } else { |
@@ -410,7 +411,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
410 | } | 411 | } |
411 | } | 412 | } |
412 | 413 | ||
413 | rc = acpi_register_gsi(gsi, triggering, polarity); | 414 | rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); |
414 | if (rc < 0) { | 415 | if (rc < 0) { |
415 | dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", | 416 | dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", |
416 | pin_name(pin)); | 417 | pin_name(pin)); |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 45ad3288c5ff..23f0fb84f1c1 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device) | |||
844 | if (!pr) | 844 | if (!pr) |
845 | return -ENOMEM; | 845 | return -ENOMEM; |
846 | 846 | ||
847 | if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { | 847 | if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { |
848 | kfree(pr); | 848 | kfree(pr); |
849 | return -ENOMEM; | 849 | return -ENOMEM; |
850 | } | 850 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 08186ecbaf8d..6b91c26a4635 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -220,6 +220,7 @@ enum { | |||
220 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ | 220 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ |
221 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | 221 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ |
222 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ | 222 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ |
223 | AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ | ||
223 | 224 | ||
224 | /* ap->flags bits */ | 225 | /* ap->flags bits */ |
225 | 226 | ||
@@ -2316,9 +2317,17 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) | |||
2316 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) | 2317 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
2317 | { | 2318 | { |
2318 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 2319 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
2320 | struct ahci_host_priv *hpriv = host->private_data; | ||
2319 | void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; | 2321 | void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; |
2320 | u32 ctl; | 2322 | u32 ctl; |
2321 | 2323 | ||
2324 | if (mesg.event & PM_EVENT_SUSPEND && | ||
2325 | hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { | ||
2326 | dev_printk(KERN_ERR, &pdev->dev, | ||
2327 | "BIOS update required for suspend/resume\n"); | ||
2328 | return -EIO; | ||
2329 | } | ||
2330 | |||
2322 | if (mesg.event & PM_EVENT_SLEEP) { | 2331 | if (mesg.event & PM_EVENT_SLEEP) { |
2323 | /* AHCI spec rev1.1 section 8.3.3: | 2332 | /* AHCI spec rev1.1 section 8.3.3: |
2324 | * Software must disable interrupts prior to requesting a | 2333 | * Software must disable interrupts prior to requesting a |
@@ -2610,6 +2619,63 @@ static bool ahci_broken_system_poweroff(struct pci_dev *pdev) | |||
2610 | return false; | 2619 | return false; |
2611 | } | 2620 | } |
2612 | 2621 | ||
2622 | static bool ahci_broken_suspend(struct pci_dev *pdev) | ||
2623 | { | ||
2624 | static const struct dmi_system_id sysids[] = { | ||
2625 | /* | ||
2626 | * On HP dv[4-6] and HDX18 with earlier BIOSen, link | ||
2627 | * to the harddisk doesn't become online after | ||
2628 | * resuming from STR. Warn and fail suspend. | ||
2629 | */ | ||
2630 | { | ||
2631 | .ident = "dv4", | ||
2632 | .matches = { | ||
2633 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
2634 | DMI_MATCH(DMI_PRODUCT_NAME, | ||
2635 | "HP Pavilion dv4 Notebook PC"), | ||
2636 | }, | ||
2637 | .driver_data = "F.30", /* cutoff BIOS version */ | ||
2638 | }, | ||
2639 | { | ||
2640 | .ident = "dv5", | ||
2641 | .matches = { | ||
2642 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
2643 | DMI_MATCH(DMI_PRODUCT_NAME, | ||
2644 | "HP Pavilion dv5 Notebook PC"), | ||
2645 | }, | ||
2646 | .driver_data = "F.16", /* cutoff BIOS version */ | ||
2647 | }, | ||
2648 | { | ||
2649 | .ident = "dv6", | ||
2650 | .matches = { | ||
2651 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
2652 | DMI_MATCH(DMI_PRODUCT_NAME, | ||
2653 | "HP Pavilion dv6 Notebook PC"), | ||
2654 | }, | ||
2655 | .driver_data = "F.21", /* cutoff BIOS version */ | ||
2656 | }, | ||
2657 | { | ||
2658 | .ident = "HDX18", | ||
2659 | .matches = { | ||
2660 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
2661 | DMI_MATCH(DMI_PRODUCT_NAME, | ||
2662 | "HP HDX18 Notebook PC"), | ||
2663 | }, | ||
2664 | .driver_data = "F.23", /* cutoff BIOS version */ | ||
2665 | }, | ||
2666 | { } /* terminate list */ | ||
2667 | }; | ||
2668 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | ||
2669 | const char *ver; | ||
2670 | |||
2671 | if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) | ||
2672 | return false; | ||
2673 | |||
2674 | ver = dmi_get_system_info(DMI_BIOS_VERSION); | ||
2675 | |||
2676 | return !ver || strcmp(ver, dmi->driver_data) < 0; | ||
2677 | } | ||
2678 | |||
2613 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2679 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2614 | { | 2680 | { |
2615 | static int printed_version; | 2681 | static int printed_version; |
@@ -2715,6 +2781,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2715 | "quirky BIOS, skipping spindown on poweroff\n"); | 2781 | "quirky BIOS, skipping spindown on poweroff\n"); |
2716 | } | 2782 | } |
2717 | 2783 | ||
2784 | if (ahci_broken_suspend(pdev)) { | ||
2785 | hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; | ||
2786 | dev_printk(KERN_WARNING, &pdev->dev, | ||
2787 | "BIOS update required for suspend/resume\n"); | ||
2788 | } | ||
2789 | |||
2718 | /* CAP.NP sometimes indicate the index of the last enabled | 2790 | /* CAP.NP sometimes indicate the index of the last enabled |
2719 | * port, at other times, that of the last possible port, so | 2791 | * port, at other times, that of the last possible port, so |
2720 | * determining the maximum port number requires looking at | 2792 | * determining the maximum port number requires looking at |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d51a17c0f59b..1aeb7082b0c4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -1455,6 +1455,15 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev) | |||
1455 | /* PCI slot number of the controller */ | 1455 | /* PCI slot number of the controller */ |
1456 | .driver_data = (void *)0x1FUL, | 1456 | .driver_data = (void *)0x1FUL, |
1457 | }, | 1457 | }, |
1458 | { | ||
1459 | .ident = "HP Compaq nc6000", | ||
1460 | .matches = { | ||
1461 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1462 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"), | ||
1463 | }, | ||
1464 | /* PCI slot number of the controller */ | ||
1465 | .driver_data = (void *)0x1FUL, | ||
1466 | }, | ||
1458 | 1467 | ||
1459 | { } /* terminate list */ | 1468 | { } /* terminate list */ |
1460 | }; | 1469 | }; |
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 751b7ea4816c..fc9c5d6d7d80 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c | |||
@@ -497,14 +497,16 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
497 | }; | 497 | }; |
498 | /* Revision 0x20 added DMA */ | 498 | /* Revision 0x20 added DMA */ |
499 | static const struct ata_port_info info_20 = { | 499 | static const struct ata_port_info info_20 = { |
500 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 500 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | |
501 | ATA_FLAG_IGN_SIMPLEX, | ||
501 | .pio_mask = ATA_PIO4, | 502 | .pio_mask = ATA_PIO4, |
502 | .mwdma_mask = ATA_MWDMA2, | 503 | .mwdma_mask = ATA_MWDMA2, |
503 | .port_ops = &ali_20_port_ops | 504 | .port_ops = &ali_20_port_ops |
504 | }; | 505 | }; |
505 | /* Revision 0x20 with support logic added UDMA */ | 506 | /* Revision 0x20 with support logic added UDMA */ |
506 | static const struct ata_port_info info_20_udma = { | 507 | static const struct ata_port_info info_20_udma = { |
507 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 508 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | |
509 | ATA_FLAG_IGN_SIMPLEX, | ||
508 | .pio_mask = ATA_PIO4, | 510 | .pio_mask = ATA_PIO4, |
509 | .mwdma_mask = ATA_MWDMA2, | 511 | .mwdma_mask = ATA_MWDMA2, |
510 | .udma_mask = ATA_UDMA2, | 512 | .udma_mask = ATA_UDMA2, |
@@ -512,7 +514,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
512 | }; | 514 | }; |
513 | /* Revision 0xC2 adds UDMA66 */ | 515 | /* Revision 0xC2 adds UDMA66 */ |
514 | static const struct ata_port_info info_c2 = { | 516 | static const struct ata_port_info info_c2 = { |
515 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 517 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | |
518 | ATA_FLAG_IGN_SIMPLEX, | ||
516 | .pio_mask = ATA_PIO4, | 519 | .pio_mask = ATA_PIO4, |
517 | .mwdma_mask = ATA_MWDMA2, | 520 | .mwdma_mask = ATA_MWDMA2, |
518 | .udma_mask = ATA_UDMA4, | 521 | .udma_mask = ATA_UDMA4, |
@@ -520,7 +523,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
520 | }; | 523 | }; |
521 | /* Revision 0xC3 is UDMA66 for now */ | 524 | /* Revision 0xC3 is UDMA66 for now */ |
522 | static const struct ata_port_info info_c3 = { | 525 | static const struct ata_port_info info_c3 = { |
523 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 526 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | |
527 | ATA_FLAG_IGN_SIMPLEX, | ||
524 | .pio_mask = ATA_PIO4, | 528 | .pio_mask = ATA_PIO4, |
525 | .mwdma_mask = ATA_MWDMA2, | 529 | .mwdma_mask = ATA_MWDMA2, |
526 | .udma_mask = ATA_UDMA4, | 530 | .udma_mask = ATA_UDMA4, |
@@ -528,7 +532,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
528 | }; | 532 | }; |
529 | /* Revision 0xC4 is UDMA100 */ | 533 | /* Revision 0xC4 is UDMA100 */ |
530 | static const struct ata_port_info info_c4 = { | 534 | static const struct ata_port_info info_c4 = { |
531 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, | 535 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | |
536 | ATA_FLAG_IGN_SIMPLEX, | ||
532 | .pio_mask = ATA_PIO4, | 537 | .pio_mask = ATA_PIO4, |
533 | .mwdma_mask = ATA_MWDMA2, | 538 | .mwdma_mask = ATA_MWDMA2, |
534 | .udma_mask = ATA_UDMA5, | 539 | .udma_mask = ATA_UDMA5, |
@@ -536,7 +541,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
536 | }; | 541 | }; |
537 | /* Revision 0xC5 is UDMA133 with LBA48 DMA */ | 542 | /* Revision 0xC5 is UDMA133 with LBA48 DMA */ |
538 | static const struct ata_port_info info_c5 = { | 543 | static const struct ata_port_info info_c5 = { |
539 | .flags = ATA_FLAG_SLAVE_POSS, | 544 | .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX, |
540 | .pio_mask = ATA_PIO4, | 545 | .pio_mask = ATA_PIO4, |
541 | .mwdma_mask = ATA_MWDMA2, | 546 | .mwdma_mask = ATA_MWDMA2, |
542 | .udma_mask = ATA_UDMA6, | 547 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 2085e0a3a05a..2a6412f5d117 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/ata.h> | 22 | #include <linux/ata.h> |
23 | 23 | ||
24 | #define DRV_NAME "pata_efar" | 24 | #define DRV_NAME "pata_efar" |
25 | #define DRV_VERSION "0.4.4" | 25 | #define DRV_VERSION "0.4.5" |
26 | 26 | ||
27 | /** | 27 | /** |
28 | * efar_pre_reset - Enable bits | 28 | * efar_pre_reset - Enable bits |
@@ -98,18 +98,17 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) | |||
98 | { 2, 1 }, | 98 | { 2, 1 }, |
99 | { 2, 3 }, }; | 99 | { 2, 3 }, }; |
100 | 100 | ||
101 | if (pio > 2) | 101 | if (pio > 1) |
102 | control |= 1; /* TIME1 enable */ | 102 | control |= 1; /* TIME */ |
103 | if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ | 103 | if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ |
104 | control |= 2; /* IE enable */ | 104 | control |= 2; /* IE */ |
105 | /* Intel specifies that the PPE functionality is for disk only */ | 105 | /* Intel specifies that the prefetch/posting is for disk only */ |
106 | if (adev->class == ATA_DEV_ATA) | 106 | if (adev->class == ATA_DEV_ATA) |
107 | control |= 4; /* PPE enable */ | 107 | control |= 4; /* PPE */ |
108 | 108 | ||
109 | pci_read_config_word(dev, idetm_port, &idetm_data); | 109 | pci_read_config_word(dev, idetm_port, &idetm_data); |
110 | 110 | ||
111 | /* Enable PPE, IE and TIME as appropriate */ | 111 | /* Set PPE, IE, and TIME as appropriate */ |
112 | |||
113 | if (adev->devno == 0) { | 112 | if (adev->devno == 0) { |
114 | idetm_data &= 0xCCF0; | 113 | idetm_data &= 0xCCF0; |
115 | idetm_data |= control; | 114 | idetm_data |= control; |
@@ -129,7 +128,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) | |||
129 | pci_write_config_byte(dev, 0x44, slave_data); | 128 | pci_write_config_byte(dev, 0x44, slave_data); |
130 | } | 129 | } |
131 | 130 | ||
132 | idetm_data |= 0x4000; /* Ensure SITRE is enabled */ | 131 | idetm_data |= 0x4000; /* Ensure SITRE is set */ |
133 | pci_write_config_word(dev, idetm_port, idetm_data); | 132 | pci_write_config_word(dev, idetm_port, idetm_data); |
134 | } | 133 | } |
135 | 134 | ||
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index f72c6c5b820f..6932e56d179c 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c | |||
@@ -48,6 +48,7 @@ | |||
48 | * | 48 | * |
49 | */ | 49 | */ |
50 | 50 | ||
51 | #include <linux/async.h> | ||
51 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
52 | #include <linux/module.h> | 53 | #include <linux/module.h> |
53 | #include <linux/pci.h> | 54 | #include <linux/pci.h> |
@@ -1028,6 +1029,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) | |||
1028 | &legacy_sht); | 1029 | &legacy_sht); |
1029 | if (ret) | 1030 | if (ret) |
1030 | goto fail; | 1031 | goto fail; |
1032 | async_synchronize_full(); | ||
1031 | ld->platform_dev = pdev; | 1033 | ld->platform_dev = pdev; |
1032 | 1034 | ||
1033 | /* Nothing found means we drop the port as its probably not there */ | 1035 | /* Nothing found means we drop the port as its probably not there */ |
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 9a698097134b..f0d52f72f5bb 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c | |||
@@ -26,7 +26,7 @@ static unsigned int netcell_read_id(struct ata_device *adev, | |||
26 | unsigned int err_mask = ata_do_dev_read_id(adev, tf, id); | 26 | unsigned int err_mask = ata_do_dev_read_id(adev, tf, id); |
27 | /* Firmware forgets to mark words 85-87 valid */ | 27 | /* Firmware forgets to mark words 85-87 valid */ |
28 | if (err_mask == 0) | 28 | if (err_mask == 0) |
29 | id[ATA_ID_CSF_DEFAULT] |= 0x0400; | 29 | id[ATA_ID_CSF_DEFAULT] |= 0x4000; |
30 | return err_mask; | 30 | return err_mask; |
31 | } | 31 | } |
32 | 32 | ||
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 340ba4f9dc54..4a9f3492b921 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -224,7 +224,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp) | |||
224 | break; | 224 | break; |
225 | } | 225 | } |
226 | 226 | ||
227 | gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE, | 227 | gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, |
228 | ACPI_ACTIVE_LOW); | 228 | ACPI_ACTIVE_LOW); |
229 | if (gsi > 0) | 229 | if (gsi > 0) |
230 | break; | 230 | break; |
@@ -939,7 +939,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
939 | irqp = &res->data.extended_irq; | 939 | irqp = &res->data.extended_irq; |
940 | 940 | ||
941 | for (i = 0; i < irqp->interrupt_count; i++) { | 941 | for (i = 0; i < irqp->interrupt_count; i++) { |
942 | irq = acpi_register_gsi(irqp->interrupts[i], | 942 | irq = acpi_register_gsi(NULL, irqp->interrupts[i], |
943 | irqp->triggering, irqp->polarity); | 943 | irqp->triggering, irqp->polarity); |
944 | if (irq < 0) | 944 | if (irq < 0) |
945 | return AE_ERROR; | 945 | return AE_ERROR; |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 65e12bca657c..f96d0bef855e 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -694,9 +694,8 @@ static ssize_t read_zero(struct file * file, char __user * buf, | |||
694 | written += chunk - unwritten; | 694 | written += chunk - unwritten; |
695 | if (unwritten) | 695 | if (unwritten) |
696 | break; | 696 | break; |
697 | /* Consider changing this to just 'signal_pending()' with lots of testing */ | 697 | if (signal_pending(current)) |
698 | if (fatal_signal_pending(current)) | 698 | return written ? written : -ERESTARTSYS; |
699 | return written ? written : -EINTR; | ||
700 | buf += chunk; | 699 | buf += chunk; |
701 | count -= chunk; | 700 | count -= chunk; |
702 | cond_resched(); | 701 | cond_resched(); |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index a420e8d437dd..13f8871e5b21 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -2711,7 +2711,7 @@ static int __init mxser_module_init(void) | |||
2711 | continue; | 2711 | continue; |
2712 | 2712 | ||
2713 | brd = &mxser_boards[m]; | 2713 | brd = &mxser_boards[m]; |
2714 | retval = mxser_get_ISA_conf(!ioaddr[b], brd); | 2714 | retval = mxser_get_ISA_conf(ioaddr[b], brd); |
2715 | if (retval <= 0) { | 2715 | if (retval <= 0) { |
2716 | brd->info = NULL; | 2716 | brd->info = NULL; |
2717 | continue; | 2717 | continue; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 47d2ad0ae079..6e2ec0b18948 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
808 | ret = -ENOMEM; | 808 | ret = -ENOMEM; |
809 | goto nomem_out; | 809 | goto nomem_out; |
810 | } | 810 | } |
811 | if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { | 811 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { |
812 | free_cpumask_var(policy->cpus); | 812 | free_cpumask_var(policy->cpus); |
813 | kfree(policy); | 813 | kfree(policy); |
814 | ret = -ENOMEM; | 814 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 80a257554b30..0411d912d82a 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
@@ -371,8 +371,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
371 | list->user_token = list->hash.key << PAGE_SHIFT; | 371 | list->user_token = list->hash.key << PAGE_SHIFT; |
372 | mutex_unlock(&dev->struct_mutex); | 372 | mutex_unlock(&dev->struct_mutex); |
373 | 373 | ||
374 | if (!(map->flags & _DRM_DRIVER)) | 374 | list->master = dev->primary->master; |
375 | list->master = dev->primary->master; | ||
376 | *maplist = list; | 375 | *maplist = list; |
377 | return 0; | 376 | return 0; |
378 | } | 377 | } |
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 248a54bd2386..b3bc96f930a6 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> | 2 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> |
3 | * Copyright (C) 2006-2007 MontaVista Software, Inc. | 3 | * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc. |
4 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | 4 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz |
5 | * | 5 | * |
6 | * Portions Copyright (C) 1999 Promise Technology, Inc. | 6 | * Portions Copyright (C) 1999 Promise Technology, Inc. |
@@ -227,28 +227,19 @@ somebody_else: | |||
227 | return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ | 227 | return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ |
228 | } | 228 | } |
229 | 229 | ||
230 | static void pdc202xx_reset_host (ide_hwif_t *hwif) | 230 | static void pdc202xx_reset(ide_drive_t *drive) |
231 | { | 231 | { |
232 | ide_hwif_t *hwif = drive->hwif; | ||
232 | unsigned long high_16 = hwif->extra_base - 16; | 233 | unsigned long high_16 = hwif->extra_base - 16; |
233 | u8 udma_speed_flag = inb(high_16 | 0x001f); | 234 | u8 udma_speed_flag = inb(high_16 | 0x001f); |
234 | 235 | ||
236 | printk(KERN_WARNING "PDC202xx: software reset...\n"); | ||
237 | |||
235 | outb(udma_speed_flag | 0x10, high_16 | 0x001f); | 238 | outb(udma_speed_flag | 0x10, high_16 | 0x001f); |
236 | mdelay(100); | 239 | mdelay(100); |
237 | outb(udma_speed_flag & ~0x10, high_16 | 0x001f); | 240 | outb(udma_speed_flag & ~0x10, high_16 | 0x001f); |
238 | mdelay(2000); /* 2 seconds ?! */ | 241 | mdelay(2000); /* 2 seconds ?! */ |
239 | 242 | ||
240 | printk(KERN_WARNING "PDC202XX: %s channel reset.\n", | ||
241 | hwif->channel ? "Secondary" : "Primary"); | ||
242 | } | ||
243 | |||
244 | static void pdc202xx_reset (ide_drive_t *drive) | ||
245 | { | ||
246 | ide_hwif_t *hwif = drive->hwif; | ||
247 | ide_hwif_t *mate = hwif->mate; | ||
248 | |||
249 | pdc202xx_reset_host(hwif); | ||
250 | pdc202xx_reset_host(mate); | ||
251 | |||
252 | ide_set_max_pio(drive); | 243 | ide_set_max_pio(drive); |
253 | } | 244 | } |
254 | 245 | ||
@@ -328,9 +319,8 @@ static const struct ide_dma_ops pdc20246_dma_ops = { | |||
328 | .dma_start = ide_dma_start, | 319 | .dma_start = ide_dma_start, |
329 | .dma_end = ide_dma_end, | 320 | .dma_end = ide_dma_end, |
330 | .dma_test_irq = pdc202xx_dma_test_irq, | 321 | .dma_test_irq = pdc202xx_dma_test_irq, |
331 | .dma_lost_irq = pdc202xx_dma_lost_irq, | 322 | .dma_lost_irq = ide_dma_lost_irq, |
332 | .dma_timer_expiry = ide_dma_sff_timer_expiry, | 323 | .dma_timer_expiry = ide_dma_sff_timer_expiry, |
333 | .dma_clear = pdc202xx_reset, | ||
334 | .dma_sff_read_status = ide_dma_sff_read_status, | 324 | .dma_sff_read_status = ide_dma_sff_read_status, |
335 | }; | 325 | }; |
336 | 326 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 424f7b048c30..3fd8b1e65483 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -20,7 +20,8 @@ | |||
20 | #include <linux/idr.h> | 20 | #include <linux/idr.h> |
21 | #include <linux/hdreg.h> | 21 | #include <linux/hdreg.h> |
22 | #include <linux/blktrace_api.h> | 22 | #include <linux/blktrace_api.h> |
23 | #include <trace/block.h> | 23 | |
24 | #include <trace/events/block.h> | ||
24 | 25 | ||
25 | #define DM_MSG_PREFIX "core" | 26 | #define DM_MSG_PREFIX "core" |
26 | 27 | ||
@@ -53,8 +54,6 @@ struct dm_target_io { | |||
53 | union map_info info; | 54 | union map_info info; |
54 | }; | 55 | }; |
55 | 56 | ||
56 | DEFINE_TRACE(block_bio_complete); | ||
57 | |||
58 | /* | 57 | /* |
59 | * For request-based dm. | 58 | * For request-based dm. |
60 | * One of these is allocated per request. | 59 | * One of these is allocated per request. |
@@ -656,8 +655,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
656 | /* the bio has been remapped so dispatch it */ | 655 | /* the bio has been remapped so dispatch it */ |
657 | 656 | ||
658 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, | 657 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, |
659 | tio->io->bio->bi_bdev->bd_dev, | 658 | tio->io->bio->bi_bdev->bd_dev, sector); |
660 | clone->bi_sector, sector); | ||
661 | 659 | ||
662 | generic_make_request(clone); | 660 | generic_make_request(clone); |
663 | } else if (r < 0 || r == DM_MAPIO_REQUEUE) { | 661 | } else if (r < 0 || r == DM_MAPIO_REQUEUE) { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5d400aef8d9b..bb37fb1b2d82 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q); | |||
362 | 362 | ||
363 | static struct stripe_head * | 363 | static struct stripe_head * |
364 | get_active_stripe(raid5_conf_t *conf, sector_t sector, | 364 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
365 | int previous, int noblock) | 365 | int previous, int noblock, int noquiesce) |
366 | { | 366 | { |
367 | struct stripe_head *sh; | 367 | struct stripe_head *sh; |
368 | 368 | ||
@@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, | |||
372 | 372 | ||
373 | do { | 373 | do { |
374 | wait_event_lock_irq(conf->wait_for_stripe, | 374 | wait_event_lock_irq(conf->wait_for_stripe, |
375 | conf->quiesce == 0, | 375 | conf->quiesce == 0 || noquiesce, |
376 | conf->device_lock, /* nothing */); | 376 | conf->device_lock, /* nothing */); |
377 | sh = __find_stripe(conf, sector, conf->generation - previous); | 377 | sh = __find_stripe(conf, sector, conf->generation - previous); |
378 | if (!sh) { | 378 | if (!sh) { |
@@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2671 | sector_t bn = compute_blocknr(sh, i, 1); | 2671 | sector_t bn = compute_blocknr(sh, i, 1); |
2672 | sector_t s = raid5_compute_sector(conf, bn, 0, | 2672 | sector_t s = raid5_compute_sector(conf, bn, 0, |
2673 | &dd_idx, NULL); | 2673 | &dd_idx, NULL); |
2674 | sh2 = get_active_stripe(conf, s, 0, 1); | 2674 | sh2 = get_active_stripe(conf, s, 0, 1, 1); |
2675 | if (sh2 == NULL) | 2675 | if (sh2 == NULL) |
2676 | /* so far only the early blocks of this stripe | 2676 | /* so far only the early blocks of this stripe |
2677 | * have been requested. When later blocks | 2677 | * have been requested. When later blocks |
@@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2944 | /* Finish reconstruct operations initiated by the expansion process */ | 2944 | /* Finish reconstruct operations initiated by the expansion process */ |
2945 | if (sh->reconstruct_state == reconstruct_state_result) { | 2945 | if (sh->reconstruct_state == reconstruct_state_result) { |
2946 | struct stripe_head *sh2 | 2946 | struct stripe_head *sh2 |
2947 | = get_active_stripe(conf, sh->sector, 1, 1); | 2947 | = get_active_stripe(conf, sh->sector, 1, 1, 1); |
2948 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | 2948 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { |
2949 | /* sh cannot be written until sh2 has been read. | 2949 | /* sh cannot be written until sh2 has been read. |
2950 | * so arrange for sh to be delayed a little | 2950 | * so arrange for sh to be delayed a little |
@@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3189 | 3189 | ||
3190 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { | 3190 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { |
3191 | struct stripe_head *sh2 | 3191 | struct stripe_head *sh2 |
3192 | = get_active_stripe(conf, sh->sector, 1, 1); | 3192 | = get_active_stripe(conf, sh->sector, 1, 1, 1); |
3193 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | 3193 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { |
3194 | /* sh cannot be written until sh2 has been read. | 3194 | /* sh cannot be written until sh2 has been read. |
3195 | * so arrange for sh to be delayed a little | 3195 | * so arrange for sh to be delayed a little |
@@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
3288 | int i; | 3288 | int i; |
3289 | 3289 | ||
3290 | rcu_read_lock(); | 3290 | rcu_read_lock(); |
3291 | for (i=0; i<mddev->raid_disks; i++) { | 3291 | for (i = 0; i < conf->raid_disks; i++) { |
3292 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 3292 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); |
3293 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | 3293 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { |
3294 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | 3294 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); |
@@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3675 | (unsigned long long)logical_sector); | 3675 | (unsigned long long)logical_sector); |
3676 | 3676 | ||
3677 | sh = get_active_stripe(conf, new_sector, previous, | 3677 | sh = get_active_stripe(conf, new_sector, previous, |
3678 | (bi->bi_rw&RWA_MASK)); | 3678 | (bi->bi_rw&RWA_MASK), 0); |
3679 | if (sh) { | 3679 | if (sh) { |
3680 | if (unlikely(previous)) { | 3680 | if (unlikely(previous)) { |
3681 | /* expansion might have moved on while waiting for a | 3681 | /* expansion might have moved on while waiting for a |
@@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3873 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { | 3873 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { |
3874 | int j; | 3874 | int j; |
3875 | int skipped = 0; | 3875 | int skipped = 0; |
3876 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0); | 3876 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); |
3877 | set_bit(STRIPE_EXPANDING, &sh->state); | 3877 | set_bit(STRIPE_EXPANDING, &sh->state); |
3878 | atomic_inc(&conf->reshape_stripes); | 3878 | atomic_inc(&conf->reshape_stripes); |
3879 | /* If any of this stripe is beyond the end of the old | 3879 | /* If any of this stripe is beyond the end of the old |
@@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3916 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), | 3916 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), |
3917 | 1, &dd_idx, NULL); | 3917 | 1, &dd_idx, NULL); |
3918 | last_sector = | 3918 | last_sector = |
3919 | raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) | 3919 | raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) |
3920 | *(new_data_disks) - 1), | 3920 | *(new_data_disks) - 1), |
3921 | 1, &dd_idx, NULL); | 3921 | 1, &dd_idx, NULL); |
3922 | if (last_sector >= mddev->dev_sectors) | 3922 | if (last_sector >= mddev->dev_sectors) |
3923 | last_sector = mddev->dev_sectors - 1; | 3923 | last_sector = mddev->dev_sectors - 1; |
3924 | while (first_sector <= last_sector) { | 3924 | while (first_sector <= last_sector) { |
3925 | sh = get_active_stripe(conf, first_sector, 1, 0); | 3925 | sh = get_active_stripe(conf, first_sector, 1, 0, 1); |
3926 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); | 3926 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
3927 | set_bit(STRIPE_HANDLE, &sh->state); | 3927 | set_bit(STRIPE_HANDLE, &sh->state); |
3928 | release_stripe(sh); | 3928 | release_stripe(sh); |
@@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4022 | 4022 | ||
4023 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | 4023 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
4024 | 4024 | ||
4025 | sh = get_active_stripe(conf, sector_nr, 0, 1); | 4025 | sh = get_active_stripe(conf, sector_nr, 0, 1, 0); |
4026 | if (sh == NULL) { | 4026 | if (sh == NULL) { |
4027 | sh = get_active_stripe(conf, sector_nr, 0, 0); | 4027 | sh = get_active_stripe(conf, sector_nr, 0, 0, 0); |
4028 | /* make sure we don't swamp the stripe cache if someone else | 4028 | /* make sure we don't swamp the stripe cache if someone else |
4029 | * is trying to get access | 4029 | * is trying to get access |
4030 | */ | 4030 | */ |
@@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4034 | * We don't need to check the 'failed' flag as when that gets set, | 4034 | * We don't need to check the 'failed' flag as when that gets set, |
4035 | * recovery aborts. | 4035 | * recovery aborts. |
4036 | */ | 4036 | */ |
4037 | for (i=0; i<mddev->raid_disks; i++) | 4037 | for (i = 0; i < conf->raid_disks; i++) |
4038 | if (conf->disks[i].rdev == NULL) | 4038 | if (conf->disks[i].rdev == NULL) |
4039 | still_degraded = 1; | 4039 | still_degraded = 1; |
4040 | 4040 | ||
@@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4086 | /* already done this stripe */ | 4086 | /* already done this stripe */ |
4087 | continue; | 4087 | continue; |
4088 | 4088 | ||
4089 | sh = get_active_stripe(conf, sector, 0, 1); | 4089 | sh = get_active_stripe(conf, sector, 0, 1, 0); |
4090 | 4090 | ||
4091 | if (!sh) { | 4091 | if (!sh) { |
4092 | /* failed to get a stripe - must wait */ | 4092 | /* failed to get a stripe - must wait */ |
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c index ff7b7deded4f..7fde36e6d227 100644 --- a/drivers/media/video/ivtv/ivtv-queue.c +++ b/drivers/media/video/ivtv/ivtv-queue.c | |||
@@ -230,7 +230,8 @@ int ivtv_stream_alloc(struct ivtv_stream *s) | |||
230 | return -ENOMEM; | 230 | return -ENOMEM; |
231 | } | 231 | } |
232 | if (ivtv_might_use_dma(s)) { | 232 | if (ivtv_might_use_dma(s)) { |
233 | s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma); | 233 | s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, |
234 | sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE); | ||
234 | ivtv_stream_sync_for_cpu(s); | 235 | ivtv_stream_sync_for_cpu(s); |
235 | } | 236 | } |
236 | 237 | ||
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index c643d0fe118f..b56d72ff06e9 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c | |||
@@ -64,6 +64,31 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) | |||
64 | unsigned int tmout; | 64 | unsigned int tmout; |
65 | int tmout_index; | 65 | int tmout_index; |
66 | 66 | ||
67 | /* | ||
68 | * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE | ||
69 | * register is sometimes not set before a while when some | ||
70 | * "unusual" data block sizes are used (such as with the SWITCH | ||
71 | * command), even despite the fact that the XFER_DONE interrupt | ||
72 | * was raised. And if another data transfer starts before | ||
73 | * this bit comes to good sense (which eventually happens by | ||
74 | * itself) then the new transfer simply fails with a timeout. | ||
75 | */ | ||
76 | if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { | ||
77 | unsigned long t = jiffies + HZ; | ||
78 | unsigned int hw_state, count = 0; | ||
79 | do { | ||
80 | if (time_after(jiffies, t)) { | ||
81 | dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); | ||
82 | break; | ||
83 | } | ||
84 | hw_state = mvsd_read(MVSD_HW_STATE); | ||
85 | count++; | ||
86 | } while (!(hw_state & (1 << 13))); | ||
87 | dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " | ||
88 | "(hw=0x%04x, count=%d, jiffies=%ld)\n", | ||
89 | hw_state, count, jiffies - (t - HZ)); | ||
90 | } | ||
91 | |||
67 | /* If timeout=0 then maximum timeout index is used. */ | 92 | /* If timeout=0 then maximum timeout index is used. */ |
68 | tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); | 93 | tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); |
69 | tmout += data->timeout_clks; | 94 | tmout += data->timeout_clks; |
@@ -620,9 +645,18 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
620 | if (ios->bus_width == MMC_BUS_WIDTH_4) | 645 | if (ios->bus_width == MMC_BUS_WIDTH_4) |
621 | ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; | 646 | ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; |
622 | 647 | ||
648 | /* | ||
649 | * The HI_SPEED_EN bit is causing trouble with many (but not all) | ||
650 | * high speed SD, SDHC and SDIO cards. Not enabling that bit | ||
651 | * makes all cards work. So let's just ignore that bit for now | ||
652 | * and revisit this issue if problems for not enabling this bit | ||
653 | * are ever reported. | ||
654 | */ | ||
655 | #if 0 | ||
623 | if (ios->timing == MMC_TIMING_MMC_HS || | 656 | if (ios->timing == MMC_TIMING_MMC_HS || |
624 | ios->timing == MMC_TIMING_SD_HS) | 657 | ios->timing == MMC_TIMING_SD_HS) |
625 | ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; | 658 | ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; |
659 | #endif | ||
626 | 660 | ||
627 | host->ctrl = ctrl_reg; | 661 | host->ctrl = ctrl_reg; |
628 | mvsd_write(MVSD_HOST_CTRL, ctrl_reg); | 662 | mvsd_write(MVSD_HOST_CTRL, ctrl_reg); |
@@ -882,3 +916,4 @@ module_param(nodma, int, 0); | |||
882 | MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); | 916 | MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); |
883 | MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); | 917 | MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); |
884 | MODULE_LICENSE("GPL"); | 918 | MODULE_LICENSE("GPL"); |
919 | MODULE_ALIAS("platform:mvsdio"); | ||
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index b4a615c55f28..f4cbe473670e 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c | |||
@@ -140,6 +140,8 @@ struct mxcmci_host { | |||
140 | struct work_struct datawork; | 140 | struct work_struct datawork; |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); | ||
144 | |||
143 | static inline int mxcmci_use_dma(struct mxcmci_host *host) | 145 | static inline int mxcmci_use_dma(struct mxcmci_host *host) |
144 | { | 146 | { |
145 | return host->do_dma; | 147 | return host->do_dma; |
@@ -160,7 +162,7 @@ static void mxcmci_softreset(struct mxcmci_host *host) | |||
160 | writew(0xff, host->base + MMC_REG_RES_TO); | 162 | writew(0xff, host->base + MMC_REG_RES_TO); |
161 | } | 163 | } |
162 | 164 | ||
163 | static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | 165 | static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) |
164 | { | 166 | { |
165 | unsigned int nob = data->blocks; | 167 | unsigned int nob = data->blocks; |
166 | unsigned int blksz = data->blksz; | 168 | unsigned int blksz = data->blksz; |
@@ -168,6 +170,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
168 | #ifdef HAS_DMA | 170 | #ifdef HAS_DMA |
169 | struct scatterlist *sg; | 171 | struct scatterlist *sg; |
170 | int i; | 172 | int i; |
173 | int ret; | ||
171 | #endif | 174 | #endif |
172 | if (data->flags & MMC_DATA_STREAM) | 175 | if (data->flags & MMC_DATA_STREAM) |
173 | nob = 0xffff; | 176 | nob = 0xffff; |
@@ -183,7 +186,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
183 | for_each_sg(data->sg, sg, data->sg_len, i) { | 186 | for_each_sg(data->sg, sg, data->sg_len, i) { |
184 | if (sg->offset & 3 || sg->length & 3) { | 187 | if (sg->offset & 3 || sg->length & 3) { |
185 | host->do_dma = 0; | 188 | host->do_dma = 0; |
186 | return; | 189 | return 0; |
187 | } | 190 | } |
188 | } | 191 | } |
189 | 192 | ||
@@ -192,23 +195,30 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
192 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | 195 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, |
193 | data->sg_len, host->dma_dir); | 196 | data->sg_len, host->dma_dir); |
194 | 197 | ||
195 | imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, | 198 | ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, |
196 | host->res->start + MMC_REG_BUFFER_ACCESS, | 199 | datasize, |
197 | DMA_MODE_READ); | 200 | host->res->start + MMC_REG_BUFFER_ACCESS, |
201 | DMA_MODE_READ); | ||
198 | } else { | 202 | } else { |
199 | host->dma_dir = DMA_TO_DEVICE; | 203 | host->dma_dir = DMA_TO_DEVICE; |
200 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | 204 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, |
201 | data->sg_len, host->dma_dir); | 205 | data->sg_len, host->dma_dir); |
202 | 206 | ||
203 | imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, | 207 | ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, |
204 | host->res->start + MMC_REG_BUFFER_ACCESS, | 208 | datasize, |
205 | DMA_MODE_WRITE); | 209 | host->res->start + MMC_REG_BUFFER_ACCESS, |
210 | DMA_MODE_WRITE); | ||
206 | } | 211 | } |
207 | 212 | ||
213 | if (ret) { | ||
214 | dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); | ||
215 | return ret; | ||
216 | } | ||
208 | wmb(); | 217 | wmb(); |
209 | 218 | ||
210 | imx_dma_enable(host->dma); | 219 | imx_dma_enable(host->dma); |
211 | #endif /* HAS_DMA */ | 220 | #endif /* HAS_DMA */ |
221 | return 0; | ||
212 | } | 222 | } |
213 | 223 | ||
214 | static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, | 224 | static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, |
@@ -345,8 +355,11 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) | |||
345 | stat = readl(host->base + MMC_REG_STATUS); | 355 | stat = readl(host->base + MMC_REG_STATUS); |
346 | if (stat & STATUS_ERR_MASK) | 356 | if (stat & STATUS_ERR_MASK) |
347 | return stat; | 357 | return stat; |
348 | if (time_after(jiffies, timeout)) | 358 | if (time_after(jiffies, timeout)) { |
359 | mxcmci_softreset(host); | ||
360 | mxcmci_set_clk_rate(host, host->clock); | ||
349 | return STATUS_TIME_OUT_READ; | 361 | return STATUS_TIME_OUT_READ; |
362 | } | ||
350 | if (stat & mask) | 363 | if (stat & mask) |
351 | return 0; | 364 | return 0; |
352 | cpu_relax(); | 365 | cpu_relax(); |
@@ -531,6 +544,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) | |||
531 | { | 544 | { |
532 | struct mxcmci_host *host = mmc_priv(mmc); | 545 | struct mxcmci_host *host = mmc_priv(mmc); |
533 | unsigned int cmdat = host->cmdat; | 546 | unsigned int cmdat = host->cmdat; |
547 | int error; | ||
534 | 548 | ||
535 | WARN_ON(host->req != NULL); | 549 | WARN_ON(host->req != NULL); |
536 | 550 | ||
@@ -540,7 +554,12 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) | |||
540 | host->do_dma = 1; | 554 | host->do_dma = 1; |
541 | #endif | 555 | #endif |
542 | if (req->data) { | 556 | if (req->data) { |
543 | mxcmci_setup_data(host, req->data); | 557 | error = mxcmci_setup_data(host, req->data); |
558 | if (error) { | ||
559 | req->cmd->error = error; | ||
560 | goto out; | ||
561 | } | ||
562 | |||
544 | 563 | ||
545 | cmdat |= CMD_DAT_CONT_DATA_ENABLE; | 564 | cmdat |= CMD_DAT_CONT_DATA_ENABLE; |
546 | 565 | ||
@@ -548,7 +567,9 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) | |||
548 | cmdat |= CMD_DAT_CONT_WRITE; | 567 | cmdat |= CMD_DAT_CONT_WRITE; |
549 | } | 568 | } |
550 | 569 | ||
551 | if (mxcmci_start_cmd(host, req->cmd, cmdat)) | 570 | error = mxcmci_start_cmd(host, req->cmd, cmdat); |
571 | out: | ||
572 | if (error) | ||
552 | mxcmci_finish_request(host, req); | 573 | mxcmci_finish_request(host, req); |
553 | } | 574 | } |
554 | 575 | ||
@@ -724,7 +745,9 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
724 | goto out_clk_put; | 745 | goto out_clk_put; |
725 | } | 746 | } |
726 | 747 | ||
727 | mmc->f_min = clk_get_rate(host->clk) >> 7; | 748 | mmc->f_min = clk_get_rate(host->clk) >> 16; |
749 | if (mmc->f_min < 400000) | ||
750 | mmc->f_min = 400000; | ||
728 | mmc->f_max = clk_get_rate(host->clk) >> 1; | 751 | mmc->f_max = clk_get_rate(host->clk) >> 1; |
729 | 752 | ||
730 | /* recommended in data sheet */ | 753 | /* recommended in data sheet */ |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index bfa25c01c872..dceb5ee3bda0 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
@@ -822,7 +822,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
822 | del_timer(&host->cmd_abort_timer); | 822 | del_timer(&host->cmd_abort_timer); |
823 | host->abort = 1; | 823 | host->abort = 1; |
824 | OMAP_MMC_WRITE(host, IE, 0); | 824 | OMAP_MMC_WRITE(host, IE, 0); |
825 | disable_irq(host->irq); | 825 | disable_irq_nosync(host->irq); |
826 | schedule_work(&host->cmd_abort_work); | 826 | schedule_work(&host->cmd_abort_work); |
827 | return IRQ_HANDLED; | 827 | return IRQ_HANDLED; |
828 | } | 828 | } |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index e62a22a7f00c..c40cb96255a2 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -680,7 +680,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) | |||
680 | host->dma_ch = -1; | 680 | host->dma_ch = -1; |
681 | /* | 681 | /* |
682 | * DMA Callback: run in interrupt context. | 682 | * DMA Callback: run in interrupt context. |
683 | * mutex_unlock will through a kernel warning if used. | 683 | * mutex_unlock will throw a kernel warning if used. |
684 | */ | 684 | */ |
685 | up(&host->sem); | 685 | up(&host->sem); |
686 | } | 686 | } |
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c index 3ff4ac3abe8b..128c614d11aa 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of.c | |||
@@ -55,7 +55,13 @@ static u32 esdhc_readl(struct sdhci_host *host, int reg) | |||
55 | 55 | ||
56 | static u16 esdhc_readw(struct sdhci_host *host, int reg) | 56 | static u16 esdhc_readw(struct sdhci_host *host, int reg) |
57 | { | 57 | { |
58 | return in_be16(host->ioaddr + (reg ^ 0x2)); | 58 | u16 ret; |
59 | |||
60 | if (unlikely(reg == SDHCI_HOST_VERSION)) | ||
61 | ret = in_be16(host->ioaddr + reg); | ||
62 | else | ||
63 | ret = in_be16(host->ioaddr + (reg ^ 0x2)); | ||
64 | return ret; | ||
59 | } | 65 | } |
60 | 66 | ||
61 | static u8 esdhc_readb(struct sdhci_host *host, int reg) | 67 | static u8 esdhc_readb(struct sdhci_host *host, int reg) |
@@ -277,6 +283,7 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev) | |||
277 | static const struct of_device_id sdhci_of_match[] = { | 283 | static const struct of_device_id sdhci_of_match[] = { |
278 | { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, | 284 | { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, |
279 | { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, | 285 | { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, |
286 | { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, | ||
280 | { .compatible = "generic-sdhci", }, | 287 | { .compatible = "generic-sdhci", }, |
281 | {}, | 288 | {}, |
282 | }; | 289 | }; |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 0119220de7d0..02700f769b8a 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -407,16 +407,17 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
407 | } | 407 | } |
408 | info->chip.ecc.mode = ecc_mode; | 408 | info->chip.ecc.mode = ecc_mode; |
409 | 409 | ||
410 | info->clk = clk_get(&pdev->dev, "AEMIFCLK"); | 410 | info->clk = clk_get(&pdev->dev, "aemif"); |
411 | if (IS_ERR(info->clk)) { | 411 | if (IS_ERR(info->clk)) { |
412 | ret = PTR_ERR(info->clk); | 412 | ret = PTR_ERR(info->clk); |
413 | dev_dbg(&pdev->dev, "unable to get AEMIFCLK, err %d\n", ret); | 413 | dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); |
414 | goto err_clk; | 414 | goto err_clk; |
415 | } | 415 | } |
416 | 416 | ||
417 | ret = clk_enable(info->clk); | 417 | ret = clk_enable(info->clk); |
418 | if (ret < 0) { | 418 | if (ret < 0) { |
419 | dev_dbg(&pdev->dev, "unable to enable AEMIFCLK, err %d\n", ret); | 419 | dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", |
420 | ret); | ||
420 | goto err_clk_enable; | 421 | goto err_clk_enable; |
421 | } | 422 | } |
422 | 423 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 8247a945a1d9..3b19e0ce290f 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32; | |||
66 | #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ | 66 | #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ |
67 | #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ | 67 | #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ |
68 | #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ | 68 | #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ |
69 | #define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ | ||
70 | #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ | 69 | #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ |
71 | #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ | 70 | #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ |
72 | 71 | ||
@@ -2357,10 +2356,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) | |||
2357 | return cmd; | 2356 | return cmd; |
2358 | } | 2357 | } |
2359 | 2358 | ||
2360 | static void rtl_set_rx_max_size(void __iomem *ioaddr) | 2359 | static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) |
2361 | { | 2360 | { |
2362 | /* Low hurts. Let's disable the filtering. */ | 2361 | /* Low hurts. Let's disable the filtering. */ |
2363 | RTL_W16(RxMaxSize, 16383); | 2362 | RTL_W16(RxMaxSize, rx_buf_sz); |
2364 | } | 2363 | } |
2365 | 2364 | ||
2366 | static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) | 2365 | static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) |
@@ -2407,7 +2406,7 @@ static void rtl_hw_start_8169(struct net_device *dev) | |||
2407 | 2406 | ||
2408 | RTL_W8(EarlyTxThres, EarlyTxThld); | 2407 | RTL_W8(EarlyTxThres, EarlyTxThld); |
2409 | 2408 | ||
2410 | rtl_set_rx_max_size(ioaddr); | 2409 | rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); |
2411 | 2410 | ||
2412 | if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || | 2411 | if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || |
2413 | (tp->mac_version == RTL_GIGA_MAC_VER_02) || | 2412 | (tp->mac_version == RTL_GIGA_MAC_VER_02) || |
@@ -2668,7 +2667,7 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
2668 | 2667 | ||
2669 | RTL_W8(EarlyTxThres, EarlyTxThld); | 2668 | RTL_W8(EarlyTxThres, EarlyTxThld); |
2670 | 2669 | ||
2671 | rtl_set_rx_max_size(ioaddr); | 2670 | rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); |
2672 | 2671 | ||
2673 | tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; | 2672 | tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; |
2674 | 2673 | ||
@@ -2846,7 +2845,7 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
2846 | 2845 | ||
2847 | RTL_W8(EarlyTxThres, EarlyTxThld); | 2846 | RTL_W8(EarlyTxThres, EarlyTxThld); |
2848 | 2847 | ||
2849 | rtl_set_rx_max_size(ioaddr); | 2848 | rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); |
2850 | 2849 | ||
2851 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; | 2850 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; |
2852 | 2851 | ||
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 73348c4047e9..4a9cc92d4d18 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
@@ -702,7 +702,7 @@ static unsigned int iosapic_startup_irq(unsigned int irq) | |||
702 | } | 702 | } |
703 | 703 | ||
704 | #ifdef CONFIG_SMP | 704 | #ifdef CONFIG_SMP |
705 | static void iosapic_set_affinity_irq(unsigned int irq, | 705 | static int iosapic_set_affinity_irq(unsigned int irq, |
706 | const struct cpumask *dest) | 706 | const struct cpumask *dest) |
707 | { | 707 | { |
708 | struct vector_info *vi = iosapic_get_vector(irq); | 708 | struct vector_info *vi = iosapic_get_vector(irq); |
@@ -712,7 +712,7 @@ static void iosapic_set_affinity_irq(unsigned int irq, | |||
712 | 712 | ||
713 | dest_cpu = cpu_check_affinity(irq, dest); | 713 | dest_cpu = cpu_check_affinity(irq, dest); |
714 | if (dest_cpu < 0) | 714 | if (dest_cpu < 0) |
715 | return; | 715 | return -1; |
716 | 716 | ||
717 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu)); | 717 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu)); |
718 | vi->txn_addr = txn_affinity_addr(irq, dest_cpu); | 718 | vi->txn_addr = txn_affinity_addr(irq, dest_cpu); |
@@ -724,6 +724,8 @@ static void iosapic_set_affinity_irq(unsigned int irq, | |||
724 | iosapic_set_irt_data(vi, &dummy_d0, &d1); | 724 | iosapic_set_irt_data(vi, &dummy_d0, &d1); |
725 | iosapic_wr_irt_entry(vi, d0, d1); | 725 | iosapic_wr_irt_entry(vi, d0, d1); |
726 | spin_unlock_irqrestore(&iosapic_lock, flags); | 726 | spin_unlock_irqrestore(&iosapic_lock, flags); |
727 | |||
728 | return 0; | ||
727 | } | 729 | } |
728 | #endif | 730 | #endif |
729 | 731 | ||
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index dd18f857dfb0..42e4260c3b12 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -153,45 +153,47 @@ int ibmphp_init_devno(struct slot **cur_slot) | |||
153 | return -1; | 153 | return -1; |
154 | } | 154 | } |
155 | for (loop = 0; loop < len; loop++) { | 155 | for (loop = 0; loop < len; loop++) { |
156 | if ((*cur_slot)->number == rtable->slots[loop].slot) { | 156 | if ((*cur_slot)->number == rtable->slots[loop].slot && |
157 | if ((*cur_slot)->bus == rtable->slots[loop].bus) { | 157 | (*cur_slot)->bus == rtable->slots[loop].bus) { |
158 | struct io_apic_irq_attr irq_attr; | ||
159 | |||
158 | (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); | 160 | (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); |
159 | for (i = 0; i < 4; i++) | 161 | for (i = 0; i < 4; i++) |
160 | (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, | 162 | (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, |
161 | (int) (*cur_slot)->device, i); | 163 | (int) (*cur_slot)->device, i, |
162 | 164 | &irq_attr); | |
163 | debug("(*cur_slot)->irq[0] = %x\n", | 165 | |
164 | (*cur_slot)->irq[0]); | 166 | debug("(*cur_slot)->irq[0] = %x\n", |
165 | debug("(*cur_slot)->irq[1] = %x\n", | 167 | (*cur_slot)->irq[0]); |
166 | (*cur_slot)->irq[1]); | 168 | debug("(*cur_slot)->irq[1] = %x\n", |
167 | debug("(*cur_slot)->irq[2] = %x\n", | 169 | (*cur_slot)->irq[1]); |
168 | (*cur_slot)->irq[2]); | 170 | debug("(*cur_slot)->irq[2] = %x\n", |
169 | debug("(*cur_slot)->irq[3] = %x\n", | 171 | (*cur_slot)->irq[2]); |
170 | (*cur_slot)->irq[3]); | 172 | debug("(*cur_slot)->irq[3] = %x\n", |
171 | 173 | (*cur_slot)->irq[3]); | |
172 | debug("rtable->exlusive_irqs = %x\n", | 174 | |
175 | debug("rtable->exlusive_irqs = %x\n", | ||
173 | rtable->exclusive_irqs); | 176 | rtable->exclusive_irqs); |
174 | debug("rtable->slots[loop].irq[0].bitmap = %x\n", | 177 | debug("rtable->slots[loop].irq[0].bitmap = %x\n", |
175 | rtable->slots[loop].irq[0].bitmap); | 178 | rtable->slots[loop].irq[0].bitmap); |
176 | debug("rtable->slots[loop].irq[1].bitmap = %x\n", | 179 | debug("rtable->slots[loop].irq[1].bitmap = %x\n", |
177 | rtable->slots[loop].irq[1].bitmap); | 180 | rtable->slots[loop].irq[1].bitmap); |
178 | debug("rtable->slots[loop].irq[2].bitmap = %x\n", | 181 | debug("rtable->slots[loop].irq[2].bitmap = %x\n", |
179 | rtable->slots[loop].irq[2].bitmap); | 182 | rtable->slots[loop].irq[2].bitmap); |
180 | debug("rtable->slots[loop].irq[3].bitmap = %x\n", | 183 | debug("rtable->slots[loop].irq[3].bitmap = %x\n", |
181 | rtable->slots[loop].irq[3].bitmap); | 184 | rtable->slots[loop].irq[3].bitmap); |
182 | 185 | ||
183 | debug("rtable->slots[loop].irq[0].link = %x\n", | 186 | debug("rtable->slots[loop].irq[0].link = %x\n", |
184 | rtable->slots[loop].irq[0].link); | 187 | rtable->slots[loop].irq[0].link); |
185 | debug("rtable->slots[loop].irq[1].link = %x\n", | 188 | debug("rtable->slots[loop].irq[1].link = %x\n", |
186 | rtable->slots[loop].irq[1].link); | 189 | rtable->slots[loop].irq[1].link); |
187 | debug("rtable->slots[loop].irq[2].link = %x\n", | 190 | debug("rtable->slots[loop].irq[2].link = %x\n", |
188 | rtable->slots[loop].irq[2].link); | 191 | rtable->slots[loop].irq[2].link); |
189 | debug("rtable->slots[loop].irq[3].link = %x\n", | 192 | debug("rtable->slots[loop].irq[3].link = %x\n", |
190 | rtable->slots[loop].irq[3].link); | 193 | rtable->slots[loop].irq[3].link); |
191 | debug("end of init_devno\n"); | 194 | debug("end of init_devno\n"); |
192 | kfree(rtable); | 195 | kfree(rtable); |
193 | return 0; | 196 | return 0; |
194 | } | ||
195 | } | 197 | } |
196 | } | 198 | } |
197 | 199 | ||
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 6808d8333ecc..737a1c44b07a 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
@@ -98,6 +98,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) | |||
98 | int max_irq; | 98 | int max_irq; |
99 | int pos; | 99 | int pos; |
100 | int irq; | 100 | int irq; |
101 | int node; | ||
101 | 102 | ||
102 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); | 103 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); |
103 | if (!pos) | 104 | if (!pos) |
@@ -125,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) | |||
125 | cfg->msg.address_lo = 0xffffffff; | 126 | cfg->msg.address_lo = 0xffffffff; |
126 | cfg->msg.address_hi = 0xffffffff; | 127 | cfg->msg.address_hi = 0xffffffff; |
127 | 128 | ||
128 | irq = create_irq(); | 129 | node = dev_to_node(&dev->dev); |
130 | irq = create_irq_nr(0, node); | ||
129 | 131 | ||
130 | if (irq <= 0) { | 132 | if (irq <= 0) { |
131 | kfree(cfg); | 133 | kfree(cfg); |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index a563fbe559d0..cd389162735f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1972,15 +1972,6 @@ static int __init init_dmars(void) | |||
1972 | } | 1972 | } |
1973 | } | 1973 | } |
1974 | 1974 | ||
1975 | #ifdef CONFIG_INTR_REMAP | ||
1976 | if (!intr_remapping_enabled) { | ||
1977 | ret = enable_intr_remapping(0); | ||
1978 | if (ret) | ||
1979 | printk(KERN_ERR | ||
1980 | "IOMMU: enable interrupt remapping failed\n"); | ||
1981 | } | ||
1982 | #endif | ||
1983 | |||
1984 | /* | 1975 | /* |
1985 | * For each rmrr | 1976 | * For each rmrr |
1986 | * for each dev attached to rmrr | 1977 | * for each dev attached to rmrr |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index f5e0ea724a6f..3a0cb0bb0593 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |||
15 | static int ir_ioapic_num; | 15 | static int ir_ioapic_num; |
16 | int intr_remapping_enabled; | 16 | int intr_remapping_enabled; |
17 | 17 | ||
18 | static int disable_intremap; | ||
19 | static __init int setup_nointremap(char *str) | ||
20 | { | ||
21 | disable_intremap = 1; | ||
22 | return 0; | ||
23 | } | ||
24 | early_param("nointremap", setup_nointremap); | ||
25 | |||
18 | struct irq_2_iommu { | 26 | struct irq_2_iommu { |
19 | struct intel_iommu *iommu; | 27 | struct intel_iommu *iommu; |
20 | u16 irte_index; | 28 | u16 irte_index; |
@@ -23,15 +31,12 @@ struct irq_2_iommu { | |||
23 | }; | 31 | }; |
24 | 32 | ||
25 | #ifdef CONFIG_GENERIC_HARDIRQS | 33 | #ifdef CONFIG_GENERIC_HARDIRQS |
26 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) | 34 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) |
27 | { | 35 | { |
28 | struct irq_2_iommu *iommu; | 36 | struct irq_2_iommu *iommu; |
29 | int node; | ||
30 | |||
31 | node = cpu_to_node(cpu); | ||
32 | 37 | ||
33 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | 38 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); |
34 | printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); | 39 | printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); |
35 | 40 | ||
36 | return iommu; | 41 | return iommu; |
37 | } | 42 | } |
@@ -48,7 +53,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |||
48 | return desc->irq_2_iommu; | 53 | return desc->irq_2_iommu; |
49 | } | 54 | } |
50 | 55 | ||
51 | static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | 56 | static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) |
52 | { | 57 | { |
53 | struct irq_desc *desc; | 58 | struct irq_desc *desc; |
54 | struct irq_2_iommu *irq_iommu; | 59 | struct irq_2_iommu *irq_iommu; |
@@ -56,7 +61,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | |||
56 | /* | 61 | /* |
57 | * alloc irq desc if not allocated already. | 62 | * alloc irq desc if not allocated already. |
58 | */ | 63 | */ |
59 | desc = irq_to_desc_alloc_cpu(irq, cpu); | 64 | desc = irq_to_desc_alloc_node(irq, node); |
60 | if (!desc) { | 65 | if (!desc) { |
61 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | 66 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); |
62 | return NULL; | 67 | return NULL; |
@@ -65,14 +70,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | |||
65 | irq_iommu = desc->irq_2_iommu; | 70 | irq_iommu = desc->irq_2_iommu; |
66 | 71 | ||
67 | if (!irq_iommu) | 72 | if (!irq_iommu) |
68 | desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); | 73 | desc->irq_2_iommu = get_one_free_irq_2_iommu(node); |
69 | 74 | ||
70 | return desc->irq_2_iommu; | 75 | return desc->irq_2_iommu; |
71 | } | 76 | } |
72 | 77 | ||
73 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | 78 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
74 | { | 79 | { |
75 | return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); | 80 | return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id)); |
76 | } | 81 | } |
77 | 82 | ||
78 | #else /* !CONFIG_SPARSE_IRQ */ | 83 | #else /* !CONFIG_SPARSE_IRQ */ |
@@ -423,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
423 | readl, (sts & DMA_GSTS_IRTPS), sts); | 428 | readl, (sts & DMA_GSTS_IRTPS), sts); |
424 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 429 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
425 | 430 | ||
426 | if (mode == 0) { | ||
427 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
428 | |||
429 | /* enable comaptiblity format interrupt pass through */ | ||
430 | cmd = iommu->gcmd | DMA_GCMD_CFI; | ||
431 | iommu->gcmd |= DMA_GCMD_CFI; | ||
432 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | ||
433 | |||
434 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | ||
435 | readl, (sts & DMA_GSTS_CFIS), sts); | ||
436 | |||
437 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
438 | } | ||
439 | |||
440 | /* | 431 | /* |
441 | * global invalidation of interrupt entry cache before enabling | 432 | * global invalidation of interrupt entry cache before enabling |
442 | * interrupt-remapping. | 433 | * interrupt-remapping. |
@@ -516,6 +507,23 @@ end: | |||
516 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 507 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
517 | } | 508 | } |
518 | 509 | ||
510 | int __init intr_remapping_supported(void) | ||
511 | { | ||
512 | struct dmar_drhd_unit *drhd; | ||
513 | |||
514 | if (disable_intremap) | ||
515 | return 0; | ||
516 | |||
517 | for_each_drhd_unit(drhd) { | ||
518 | struct intel_iommu *iommu = drhd->iommu; | ||
519 | |||
520 | if (!ecap_ir_support(iommu->ecap)) | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | return 1; | ||
525 | } | ||
526 | |||
519 | int __init enable_intr_remapping(int eim) | 527 | int __init enable_intr_remapping(int eim) |
520 | { | 528 | { |
521 | struct dmar_drhd_unit *drhd; | 529 | struct dmar_drhd_unit *drhd; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e3c3e081b834..f1ae2475ffff 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -745,6 +745,8 @@ int pci_setup_device(struct pci_dev *dev) | |||
745 | 745 | ||
746 | /* Early fixups, before probing the BARs */ | 746 | /* Early fixups, before probing the BARs */ |
747 | pci_fixup_device(pci_fixup_early, dev); | 747 | pci_fixup_device(pci_fixup_early, dev); |
748 | /* device class may be changed after fixup */ | ||
749 | class = dev->class >> 8; | ||
748 | 750 | ||
749 | switch (dev->hdr_type) { /* header type */ | 751 | switch (dev->hdr_type) { /* header type */ |
750 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ | 752 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index adf17856bacc..7f207f335bec 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -123,7 +123,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, | |||
123 | } | 123 | } |
124 | 124 | ||
125 | flags = irq_flags(triggering, polarity, shareable); | 125 | flags = irq_flags(triggering, polarity, shareable); |
126 | irq = acpi_register_gsi(gsi, triggering, polarity); | 126 | irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); |
127 | if (irq >= 0) | 127 | if (irq >= 0) |
128 | pcibios_penalize_isa_irq(irq, 1); | 128 | pcibios_penalize_isa_irq(irq, 1); |
129 | else | 129 | else |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index e1716f14cd47..91e316fe6522 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1065,6 +1065,7 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
1065 | return blk_trace_setup(sdp->device->request_queue, | 1065 | return blk_trace_setup(sdp->device->request_queue, |
1066 | sdp->disk->disk_name, | 1066 | sdp->disk->disk_name, |
1067 | MKDEV(SCSI_GENERIC_MAJOR, sdp->index), | 1067 | MKDEV(SCSI_GENERIC_MAJOR, sdp->index), |
1068 | NULL, | ||
1068 | (char *)arg); | 1069 | (char *)arg); |
1069 | case BLKTRACESTART: | 1070 | case BLKTRACESTART: |
1070 | return blk_trace_startstop(sdp->device->request_queue, 1); | 1071 | return blk_trace_startstop(sdp->device->request_queue, 1); |
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 9f460b175c50..5f0be40dfdab 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c | |||
@@ -1031,6 +1031,8 @@ imx_console_setup(struct console *co, char *options) | |||
1031 | if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) | 1031 | if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) |
1032 | co->index = 0; | 1032 | co->index = 0; |
1033 | sport = imx_ports[co->index]; | 1033 | sport = imx_ports[co->index]; |
1034 | if(sport == NULL) | ||
1035 | return -ENODEV; | ||
1034 | 1036 | ||
1035 | if (options) | 1037 | if (options) |
1036 | uart_parse_options(options, &baud, &parity, &bits, &flow); | 1038 | uart_parse_options(options, &baud, &parity, &bits, &flow); |
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c index 7dc3a6b41397..a0e0d246b592 100644 --- a/drivers/ssb/embedded.c +++ b/drivers/ssb/embedded.c | |||
@@ -29,6 +29,7 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks) | |||
29 | } | 29 | } |
30 | return -ENODEV; | 30 | return -ENODEV; |
31 | } | 31 | } |
32 | EXPORT_SYMBOL(ssb_watchdog_timer_set); | ||
32 | 33 | ||
33 | u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) | 34 | u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) |
34 | { | 35 | { |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 8ac9cddac575..cab100acf983 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -18,6 +18,16 @@ config XEN_SCRUB_PAGES | |||
18 | secure, but slightly less efficient. | 18 | secure, but slightly less efficient. |
19 | If in doubt, say yes. | 19 | If in doubt, say yes. |
20 | 20 | ||
21 | config XEN_DEV_EVTCHN | ||
22 | tristate "Xen /dev/xen/evtchn device" | ||
23 | depends on XEN | ||
24 | default y | ||
25 | help | ||
26 | The evtchn driver allows a userspace process to triger event | ||
27 | channels and to receive notification of an event channel | ||
28 | firing. | ||
29 | If in doubt, say yes. | ||
30 | |||
21 | config XENFS | 31 | config XENFS |
22 | tristate "Xen filesystem" | 32 | tristate "Xen filesystem" |
23 | depends on XEN | 33 | depends on XEN |
@@ -41,3 +51,13 @@ config XEN_COMPAT_XENFS | |||
41 | a xen platform. | 51 | a xen platform. |
42 | If in doubt, say yes. | 52 | If in doubt, say yes. |
43 | 53 | ||
54 | config XEN_SYS_HYPERVISOR | ||
55 | bool "Create xen entries under /sys/hypervisor" | ||
56 | depends on XEN && SYSFS | ||
57 | select SYS_HYPERVISOR | ||
58 | default y | ||
59 | help | ||
60 | Create entries under /sys/hypervisor describing the Xen | ||
61 | hypervisor environment. When running native or in another | ||
62 | virtual environment, /sys/hypervisor will still be present, | ||
63 | but will have no xen contents. \ No newline at end of file | ||
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index ff8accc9e103..ec2a39b1e26f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -4,4 +4,6 @@ obj-y += xenbus/ | |||
4 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 4 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
5 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 5 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
6 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | 6 | obj-$(CONFIG_XEN_BALLOON) += balloon.o |
7 | obj-$(CONFIG_XENFS) += xenfs/ \ No newline at end of file | 7 | obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o |
8 | obj-$(CONFIG_XENFS) += xenfs/ | ||
9 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o \ No newline at end of file | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 30963af5dba0..891d2e90753a 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -151,6 +151,12 @@ static unsigned int evtchn_from_irq(unsigned irq) | |||
151 | return info_for_irq(irq)->evtchn; | 151 | return info_for_irq(irq)->evtchn; |
152 | } | 152 | } |
153 | 153 | ||
154 | unsigned irq_from_evtchn(unsigned int evtchn) | ||
155 | { | ||
156 | return evtchn_to_irq[evtchn]; | ||
157 | } | ||
158 | EXPORT_SYMBOL_GPL(irq_from_evtchn); | ||
159 | |||
154 | static enum ipi_vector ipi_from_irq(unsigned irq) | 160 | static enum ipi_vector ipi_from_irq(unsigned irq) |
155 | { | 161 | { |
156 | struct irq_info *info = info_for_irq(irq); | 162 | struct irq_info *info = info_for_irq(irq); |
@@ -335,7 +341,7 @@ static int find_unbound_irq(void) | |||
335 | if (irq == nr_irqs) | 341 | if (irq == nr_irqs) |
336 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | 342 | panic("No available IRQ to bind to: increase nr_irqs!\n"); |
337 | 343 | ||
338 | desc = irq_to_desc_alloc_cpu(irq, 0); | 344 | desc = irq_to_desc_alloc_node(irq, 0); |
339 | if (WARN_ON(desc == NULL)) | 345 | if (WARN_ON(desc == NULL)) |
340 | return -1; | 346 | return -1; |
341 | 347 | ||
@@ -688,13 +694,13 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
688 | } | 694 | } |
689 | 695 | ||
690 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | 696 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
691 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | 697 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
692 | { | 698 | { |
693 | struct evtchn_bind_vcpu bind_vcpu; | 699 | struct evtchn_bind_vcpu bind_vcpu; |
694 | int evtchn = evtchn_from_irq(irq); | 700 | int evtchn = evtchn_from_irq(irq); |
695 | 701 | ||
696 | if (!VALID_EVTCHN(evtchn)) | 702 | if (!VALID_EVTCHN(evtchn)) |
697 | return; | 703 | return -1; |
698 | 704 | ||
699 | /* Send future instances of this interrupt to other vcpu. */ | 705 | /* Send future instances of this interrupt to other vcpu. */ |
700 | bind_vcpu.port = evtchn; | 706 | bind_vcpu.port = evtchn; |
@@ -707,13 +713,15 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
707 | */ | 713 | */ |
708 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | 714 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) |
709 | bind_evtchn_to_cpu(evtchn, tcpu); | 715 | bind_evtchn_to_cpu(evtchn, tcpu); |
710 | } | ||
711 | 716 | ||
717 | return 0; | ||
718 | } | ||
712 | 719 | ||
713 | static void set_affinity_irq(unsigned irq, const struct cpumask *dest) | 720 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) |
714 | { | 721 | { |
715 | unsigned tcpu = cpumask_first(dest); | 722 | unsigned tcpu = cpumask_first(dest); |
716 | rebind_irq_to_cpu(irq, tcpu); | 723 | |
724 | return rebind_irq_to_cpu(irq, tcpu); | ||
717 | } | 725 | } |
718 | 726 | ||
719 | int resend_irq_on_evtchn(unsigned int irq) | 727 | int resend_irq_on_evtchn(unsigned int irq) |
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c new file mode 100644 index 000000000000..af031950f9b1 --- /dev/null +++ b/drivers/xen/evtchn.c | |||
@@ -0,0 +1,507 @@ | |||
1 | /****************************************************************************** | ||
2 | * evtchn.c | ||
3 | * | ||
4 | * Driver for receiving and demuxing event-channel signals. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005, K A Fraser | ||
7 | * Multi-process extensions Copyright (c) 2004, Steven Smith | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation; or, when distributed | ||
12 | * separately from the Linux kernel or incorporated into other | ||
13 | * software packages, subject to the following license: | ||
14 | * | ||
15 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
16 | * of this source file (the "Software"), to deal in the Software without | ||
17 | * restriction, including without limitation the rights to use, copy, modify, | ||
18 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
19 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
20 | * the following conditions: | ||
21 | * | ||
22 | * The above copyright notice and this permission notice shall be included in | ||
23 | * all copies or substantial portions of the Software. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
28 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
29 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
30 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
31 | * IN THE SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/sched.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/string.h> | ||
39 | #include <linux/errno.h> | ||
40 | #include <linux/fs.h> | ||
41 | #include <linux/errno.h> | ||
42 | #include <linux/miscdevice.h> | ||
43 | #include <linux/major.h> | ||
44 | #include <linux/proc_fs.h> | ||
45 | #include <linux/stat.h> | ||
46 | #include <linux/poll.h> | ||
47 | #include <linux/irq.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/gfp.h> | ||
50 | #include <linux/mutex.h> | ||
51 | #include <linux/cpu.h> | ||
52 | #include <xen/events.h> | ||
53 | #include <xen/evtchn.h> | ||
54 | #include <asm/xen/hypervisor.h> | ||
55 | |||
56 | struct per_user_data { | ||
57 | struct mutex bind_mutex; /* serialize bind/unbind operations */ | ||
58 | |||
59 | /* Notification ring, accessed via /dev/xen/evtchn. */ | ||
60 | #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) | ||
61 | #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) | ||
62 | evtchn_port_t *ring; | ||
63 | unsigned int ring_cons, ring_prod, ring_overflow; | ||
64 | struct mutex ring_cons_mutex; /* protect against concurrent readers */ | ||
65 | |||
66 | /* Processes wait on this queue when ring is empty. */ | ||
67 | wait_queue_head_t evtchn_wait; | ||
68 | struct fasync_struct *evtchn_async_queue; | ||
69 | const char *name; | ||
70 | }; | ||
71 | |||
72 | /* Who's bound to each port? */ | ||
73 | static struct per_user_data *port_user[NR_EVENT_CHANNELS]; | ||
74 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ | ||
75 | |||
76 | irqreturn_t evtchn_interrupt(int irq, void *data) | ||
77 | { | ||
78 | unsigned int port = (unsigned long)data; | ||
79 | struct per_user_data *u; | ||
80 | |||
81 | spin_lock(&port_user_lock); | ||
82 | |||
83 | u = port_user[port]; | ||
84 | |||
85 | disable_irq_nosync(irq); | ||
86 | |||
87 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { | ||
88 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; | ||
89 | wmb(); /* Ensure ring contents visible */ | ||
90 | if (u->ring_cons == u->ring_prod++) { | ||
91 | wake_up_interruptible(&u->evtchn_wait); | ||
92 | kill_fasync(&u->evtchn_async_queue, | ||
93 | SIGIO, POLL_IN); | ||
94 | } | ||
95 | } else { | ||
96 | u->ring_overflow = 1; | ||
97 | } | ||
98 | |||
99 | spin_unlock(&port_user_lock); | ||
100 | |||
101 | return IRQ_HANDLED; | ||
102 | } | ||
103 | |||
104 | static ssize_t evtchn_read(struct file *file, char __user *buf, | ||
105 | size_t count, loff_t *ppos) | ||
106 | { | ||
107 | int rc; | ||
108 | unsigned int c, p, bytes1 = 0, bytes2 = 0; | ||
109 | struct per_user_data *u = file->private_data; | ||
110 | |||
111 | /* Whole number of ports. */ | ||
112 | count &= ~(sizeof(evtchn_port_t)-1); | ||
113 | |||
114 | if (count == 0) | ||
115 | return 0; | ||
116 | |||
117 | if (count > PAGE_SIZE) | ||
118 | count = PAGE_SIZE; | ||
119 | |||
120 | for (;;) { | ||
121 | mutex_lock(&u->ring_cons_mutex); | ||
122 | |||
123 | rc = -EFBIG; | ||
124 | if (u->ring_overflow) | ||
125 | goto unlock_out; | ||
126 | |||
127 | c = u->ring_cons; | ||
128 | p = u->ring_prod; | ||
129 | if (c != p) | ||
130 | break; | ||
131 | |||
132 | mutex_unlock(&u->ring_cons_mutex); | ||
133 | |||
134 | if (file->f_flags & O_NONBLOCK) | ||
135 | return -EAGAIN; | ||
136 | |||
137 | rc = wait_event_interruptible(u->evtchn_wait, | ||
138 | u->ring_cons != u->ring_prod); | ||
139 | if (rc) | ||
140 | return rc; | ||
141 | } | ||
142 | |||
143 | /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ | ||
144 | if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { | ||
145 | bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * | ||
146 | sizeof(evtchn_port_t); | ||
147 | bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); | ||
148 | } else { | ||
149 | bytes1 = (p - c) * sizeof(evtchn_port_t); | ||
150 | bytes2 = 0; | ||
151 | } | ||
152 | |||
153 | /* Truncate chunks according to caller's maximum byte count. */ | ||
154 | if (bytes1 > count) { | ||
155 | bytes1 = count; | ||
156 | bytes2 = 0; | ||
157 | } else if ((bytes1 + bytes2) > count) { | ||
158 | bytes2 = count - bytes1; | ||
159 | } | ||
160 | |||
161 | rc = -EFAULT; | ||
162 | rmb(); /* Ensure that we see the port before we copy it. */ | ||
163 | if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || | ||
164 | ((bytes2 != 0) && | ||
165 | copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) | ||
166 | goto unlock_out; | ||
167 | |||
168 | u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); | ||
169 | rc = bytes1 + bytes2; | ||
170 | |||
171 | unlock_out: | ||
172 | mutex_unlock(&u->ring_cons_mutex); | ||
173 | return rc; | ||
174 | } | ||
175 | |||
176 | static ssize_t evtchn_write(struct file *file, const char __user *buf, | ||
177 | size_t count, loff_t *ppos) | ||
178 | { | ||
179 | int rc, i; | ||
180 | evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); | ||
181 | struct per_user_data *u = file->private_data; | ||
182 | |||
183 | if (kbuf == NULL) | ||
184 | return -ENOMEM; | ||
185 | |||
186 | /* Whole number of ports. */ | ||
187 | count &= ~(sizeof(evtchn_port_t)-1); | ||
188 | |||
189 | rc = 0; | ||
190 | if (count == 0) | ||
191 | goto out; | ||
192 | |||
193 | if (count > PAGE_SIZE) | ||
194 | count = PAGE_SIZE; | ||
195 | |||
196 | rc = -EFAULT; | ||
197 | if (copy_from_user(kbuf, buf, count) != 0) | ||
198 | goto out; | ||
199 | |||
200 | spin_lock_irq(&port_user_lock); | ||
201 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) | ||
202 | if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) | ||
203 | enable_irq(irq_from_evtchn(kbuf[i])); | ||
204 | spin_unlock_irq(&port_user_lock); | ||
205 | |||
206 | rc = count; | ||
207 | |||
208 | out: | ||
209 | free_page((unsigned long)kbuf); | ||
210 | return rc; | ||
211 | } | ||
212 | |||
213 | static int evtchn_bind_to_user(struct per_user_data *u, int port) | ||
214 | { | ||
215 | int rc = 0; | ||
216 | |||
217 | /* | ||
218 | * Ports are never reused, so every caller should pass in a | ||
219 | * unique port. | ||
220 | * | ||
221 | * (Locking not necessary because we haven't registered the | ||
222 | * interrupt handler yet, and our caller has already | ||
223 | * serialized bind operations.) | ||
224 | */ | ||
225 | BUG_ON(port_user[port] != NULL); | ||
226 | port_user[port] = u; | ||
227 | |||
228 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, | ||
229 | u->name, (void *)(unsigned long)port); | ||
230 | if (rc >= 0) | ||
231 | rc = 0; | ||
232 | |||
233 | return rc; | ||
234 | } | ||
235 | |||
236 | static void evtchn_unbind_from_user(struct per_user_data *u, int port) | ||
237 | { | ||
238 | int irq = irq_from_evtchn(port); | ||
239 | |||
240 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); | ||
241 | |||
242 | /* make sure we unbind the irq handler before clearing the port */ | ||
243 | barrier(); | ||
244 | |||
245 | port_user[port] = NULL; | ||
246 | } | ||
247 | |||
248 | static long evtchn_ioctl(struct file *file, | ||
249 | unsigned int cmd, unsigned long arg) | ||
250 | { | ||
251 | int rc; | ||
252 | struct per_user_data *u = file->private_data; | ||
253 | void __user *uarg = (void __user *) arg; | ||
254 | |||
255 | /* Prevent bind from racing with unbind */ | ||
256 | mutex_lock(&u->bind_mutex); | ||
257 | |||
258 | switch (cmd) { | ||
259 | case IOCTL_EVTCHN_BIND_VIRQ: { | ||
260 | struct ioctl_evtchn_bind_virq bind; | ||
261 | struct evtchn_bind_virq bind_virq; | ||
262 | |||
263 | rc = -EFAULT; | ||
264 | if (copy_from_user(&bind, uarg, sizeof(bind))) | ||
265 | break; | ||
266 | |||
267 | bind_virq.virq = bind.virq; | ||
268 | bind_virq.vcpu = 0; | ||
269 | rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | ||
270 | &bind_virq); | ||
271 | if (rc != 0) | ||
272 | break; | ||
273 | |||
274 | rc = evtchn_bind_to_user(u, bind_virq.port); | ||
275 | if (rc == 0) | ||
276 | rc = bind_virq.port; | ||
277 | break; | ||
278 | } | ||
279 | |||
280 | case IOCTL_EVTCHN_BIND_INTERDOMAIN: { | ||
281 | struct ioctl_evtchn_bind_interdomain bind; | ||
282 | struct evtchn_bind_interdomain bind_interdomain; | ||
283 | |||
284 | rc = -EFAULT; | ||
285 | if (copy_from_user(&bind, uarg, sizeof(bind))) | ||
286 | break; | ||
287 | |||
288 | bind_interdomain.remote_dom = bind.remote_domain; | ||
289 | bind_interdomain.remote_port = bind.remote_port; | ||
290 | rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, | ||
291 | &bind_interdomain); | ||
292 | if (rc != 0) | ||
293 | break; | ||
294 | |||
295 | rc = evtchn_bind_to_user(u, bind_interdomain.local_port); | ||
296 | if (rc == 0) | ||
297 | rc = bind_interdomain.local_port; | ||
298 | break; | ||
299 | } | ||
300 | |||
301 | case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { | ||
302 | struct ioctl_evtchn_bind_unbound_port bind; | ||
303 | struct evtchn_alloc_unbound alloc_unbound; | ||
304 | |||
305 | rc = -EFAULT; | ||
306 | if (copy_from_user(&bind, uarg, sizeof(bind))) | ||
307 | break; | ||
308 | |||
309 | alloc_unbound.dom = DOMID_SELF; | ||
310 | alloc_unbound.remote_dom = bind.remote_domain; | ||
311 | rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, | ||
312 | &alloc_unbound); | ||
313 | if (rc != 0) | ||
314 | break; | ||
315 | |||
316 | rc = evtchn_bind_to_user(u, alloc_unbound.port); | ||
317 | if (rc == 0) | ||
318 | rc = alloc_unbound.port; | ||
319 | break; | ||
320 | } | ||
321 | |||
322 | case IOCTL_EVTCHN_UNBIND: { | ||
323 | struct ioctl_evtchn_unbind unbind; | ||
324 | |||
325 | rc = -EFAULT; | ||
326 | if (copy_from_user(&unbind, uarg, sizeof(unbind))) | ||
327 | break; | ||
328 | |||
329 | rc = -EINVAL; | ||
330 | if (unbind.port >= NR_EVENT_CHANNELS) | ||
331 | break; | ||
332 | |||
333 | spin_lock_irq(&port_user_lock); | ||
334 | |||
335 | rc = -ENOTCONN; | ||
336 | if (port_user[unbind.port] != u) { | ||
337 | spin_unlock_irq(&port_user_lock); | ||
338 | break; | ||
339 | } | ||
340 | |||
341 | evtchn_unbind_from_user(u, unbind.port); | ||
342 | |||
343 | spin_unlock_irq(&port_user_lock); | ||
344 | |||
345 | rc = 0; | ||
346 | break; | ||
347 | } | ||
348 | |||
349 | case IOCTL_EVTCHN_NOTIFY: { | ||
350 | struct ioctl_evtchn_notify notify; | ||
351 | |||
352 | rc = -EFAULT; | ||
353 | if (copy_from_user(¬ify, uarg, sizeof(notify))) | ||
354 | break; | ||
355 | |||
356 | if (notify.port >= NR_EVENT_CHANNELS) { | ||
357 | rc = -EINVAL; | ||
358 | } else if (port_user[notify.port] != u) { | ||
359 | rc = -ENOTCONN; | ||
360 | } else { | ||
361 | notify_remote_via_evtchn(notify.port); | ||
362 | rc = 0; | ||
363 | } | ||
364 | break; | ||
365 | } | ||
366 | |||
367 | case IOCTL_EVTCHN_RESET: { | ||
368 | /* Initialise the ring to empty. Clear errors. */ | ||
369 | mutex_lock(&u->ring_cons_mutex); | ||
370 | spin_lock_irq(&port_user_lock); | ||
371 | u->ring_cons = u->ring_prod = u->ring_overflow = 0; | ||
372 | spin_unlock_irq(&port_user_lock); | ||
373 | mutex_unlock(&u->ring_cons_mutex); | ||
374 | rc = 0; | ||
375 | break; | ||
376 | } | ||
377 | |||
378 | default: | ||
379 | rc = -ENOSYS; | ||
380 | break; | ||
381 | } | ||
382 | mutex_unlock(&u->bind_mutex); | ||
383 | |||
384 | return rc; | ||
385 | } | ||
386 | |||
387 | static unsigned int evtchn_poll(struct file *file, poll_table *wait) | ||
388 | { | ||
389 | unsigned int mask = POLLOUT | POLLWRNORM; | ||
390 | struct per_user_data *u = file->private_data; | ||
391 | |||
392 | poll_wait(file, &u->evtchn_wait, wait); | ||
393 | if (u->ring_cons != u->ring_prod) | ||
394 | mask |= POLLIN | POLLRDNORM; | ||
395 | if (u->ring_overflow) | ||
396 | mask = POLLERR; | ||
397 | return mask; | ||
398 | } | ||
399 | |||
400 | static int evtchn_fasync(int fd, struct file *filp, int on) | ||
401 | { | ||
402 | struct per_user_data *u = filp->private_data; | ||
403 | return fasync_helper(fd, filp, on, &u->evtchn_async_queue); | ||
404 | } | ||
405 | |||
406 | static int evtchn_open(struct inode *inode, struct file *filp) | ||
407 | { | ||
408 | struct per_user_data *u; | ||
409 | |||
410 | u = kzalloc(sizeof(*u), GFP_KERNEL); | ||
411 | if (u == NULL) | ||
412 | return -ENOMEM; | ||
413 | |||
414 | u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); | ||
415 | if (u->name == NULL) { | ||
416 | kfree(u); | ||
417 | return -ENOMEM; | ||
418 | } | ||
419 | |||
420 | init_waitqueue_head(&u->evtchn_wait); | ||
421 | |||
422 | u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); | ||
423 | if (u->ring == NULL) { | ||
424 | kfree(u->name); | ||
425 | kfree(u); | ||
426 | return -ENOMEM; | ||
427 | } | ||
428 | |||
429 | mutex_init(&u->bind_mutex); | ||
430 | mutex_init(&u->ring_cons_mutex); | ||
431 | |||
432 | filp->private_data = u; | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static int evtchn_release(struct inode *inode, struct file *filp) | ||
438 | { | ||
439 | int i; | ||
440 | struct per_user_data *u = filp->private_data; | ||
441 | |||
442 | spin_lock_irq(&port_user_lock); | ||
443 | |||
444 | free_page((unsigned long)u->ring); | ||
445 | |||
446 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | ||
447 | if (port_user[i] != u) | ||
448 | continue; | ||
449 | |||
450 | evtchn_unbind_from_user(port_user[i], i); | ||
451 | } | ||
452 | |||
453 | spin_unlock_irq(&port_user_lock); | ||
454 | |||
455 | kfree(u->name); | ||
456 | kfree(u); | ||
457 | |||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | static const struct file_operations evtchn_fops = { | ||
462 | .owner = THIS_MODULE, | ||
463 | .read = evtchn_read, | ||
464 | .write = evtchn_write, | ||
465 | .unlocked_ioctl = evtchn_ioctl, | ||
466 | .poll = evtchn_poll, | ||
467 | .fasync = evtchn_fasync, | ||
468 | .open = evtchn_open, | ||
469 | .release = evtchn_release, | ||
470 | }; | ||
471 | |||
472 | static struct miscdevice evtchn_miscdev = { | ||
473 | .minor = MISC_DYNAMIC_MINOR, | ||
474 | .name = "evtchn", | ||
475 | .fops = &evtchn_fops, | ||
476 | }; | ||
477 | static int __init evtchn_init(void) | ||
478 | { | ||
479 | int err; | ||
480 | |||
481 | if (!xen_domain()) | ||
482 | return -ENODEV; | ||
483 | |||
484 | spin_lock_init(&port_user_lock); | ||
485 | memset(port_user, 0, sizeof(port_user)); | ||
486 | |||
487 | /* Create '/dev/misc/evtchn'. */ | ||
488 | err = misc_register(&evtchn_miscdev); | ||
489 | if (err != 0) { | ||
490 | printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); | ||
491 | return err; | ||
492 | } | ||
493 | |||
494 | printk(KERN_INFO "Event-channel device installed.\n"); | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | static void __exit evtchn_cleanup(void) | ||
500 | { | ||
501 | misc_deregister(&evtchn_miscdev); | ||
502 | } | ||
503 | |||
504 | module_init(evtchn_init); | ||
505 | module_exit(evtchn_cleanup); | ||
506 | |||
507 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 4b5b84837ee1..fddc2025dece 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -98,9 +98,8 @@ static void do_suspend(void) | |||
98 | goto out; | 98 | goto out; |
99 | } | 99 | } |
100 | 100 | ||
101 | printk("suspending xenbus...\n"); | 101 | printk(KERN_DEBUG "suspending xenstore...\n"); |
102 | /* XXX use normal device tree? */ | 102 | xs_suspend(); |
103 | xenbus_suspend(); | ||
104 | 103 | ||
105 | err = device_power_down(PMSG_SUSPEND); | 104 | err = device_power_down(PMSG_SUSPEND); |
106 | if (err) { | 105 | if (err) { |
@@ -116,9 +115,9 @@ static void do_suspend(void) | |||
116 | 115 | ||
117 | if (!cancelled) { | 116 | if (!cancelled) { |
118 | xen_arch_resume(); | 117 | xen_arch_resume(); |
119 | xenbus_resume(); | 118 | xs_resume(); |
120 | } else | 119 | } else |
121 | xenbus_suspend_cancel(); | 120 | xs_suspend_cancel(); |
122 | 121 | ||
123 | device_power_up(PMSG_RESUME); | 122 | device_power_up(PMSG_RESUME); |
124 | 123 | ||
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c new file mode 100644 index 000000000000..88a60e03ccf0 --- /dev/null +++ b/drivers/xen/sys-hypervisor.c | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * copyright (c) 2006 IBM Corporation | ||
3 | * Authored by: Mike D. Day <ncmike@us.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kobject.h> | ||
13 | |||
14 | #include <asm/xen/hypervisor.h> | ||
15 | #include <asm/xen/hypercall.h> | ||
16 | |||
17 | #include <xen/xenbus.h> | ||
18 | #include <xen/interface/xen.h> | ||
19 | #include <xen/interface/version.h> | ||
20 | |||
21 | #define HYPERVISOR_ATTR_RO(_name) \ | ||
22 | static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) | ||
23 | |||
24 | #define HYPERVISOR_ATTR_RW(_name) \ | ||
25 | static struct hyp_sysfs_attr _name##_attr = \ | ||
26 | __ATTR(_name, 0644, _name##_show, _name##_store) | ||
27 | |||
28 | struct hyp_sysfs_attr { | ||
29 | struct attribute attr; | ||
30 | ssize_t (*show)(struct hyp_sysfs_attr *, char *); | ||
31 | ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); | ||
32 | void *hyp_attr_data; | ||
33 | }; | ||
34 | |||
35 | static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
36 | { | ||
37 | return sprintf(buffer, "xen\n"); | ||
38 | } | ||
39 | |||
40 | HYPERVISOR_ATTR_RO(type); | ||
41 | |||
42 | static int __init xen_sysfs_type_init(void) | ||
43 | { | ||
44 | return sysfs_create_file(hypervisor_kobj, &type_attr.attr); | ||
45 | } | ||
46 | |||
47 | static void xen_sysfs_type_destroy(void) | ||
48 | { | ||
49 | sysfs_remove_file(hypervisor_kobj, &type_attr.attr); | ||
50 | } | ||
51 | |||
52 | /* xen version attributes */ | ||
53 | static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
54 | { | ||
55 | int version = HYPERVISOR_xen_version(XENVER_version, NULL); | ||
56 | if (version) | ||
57 | return sprintf(buffer, "%d\n", version >> 16); | ||
58 | return -ENODEV; | ||
59 | } | ||
60 | |||
61 | HYPERVISOR_ATTR_RO(major); | ||
62 | |||
63 | static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
64 | { | ||
65 | int version = HYPERVISOR_xen_version(XENVER_version, NULL); | ||
66 | if (version) | ||
67 | return sprintf(buffer, "%d\n", version & 0xff); | ||
68 | return -ENODEV; | ||
69 | } | ||
70 | |||
71 | HYPERVISOR_ATTR_RO(minor); | ||
72 | |||
73 | static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
74 | { | ||
75 | int ret = -ENOMEM; | ||
76 | char *extra; | ||
77 | |||
78 | extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL); | ||
79 | if (extra) { | ||
80 | ret = HYPERVISOR_xen_version(XENVER_extraversion, extra); | ||
81 | if (!ret) | ||
82 | ret = sprintf(buffer, "%s\n", extra); | ||
83 | kfree(extra); | ||
84 | } | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | HYPERVISOR_ATTR_RO(extra); | ||
90 | |||
91 | static struct attribute *version_attrs[] = { | ||
92 | &major_attr.attr, | ||
93 | &minor_attr.attr, | ||
94 | &extra_attr.attr, | ||
95 | NULL | ||
96 | }; | ||
97 | |||
98 | static struct attribute_group version_group = { | ||
99 | .name = "version", | ||
100 | .attrs = version_attrs, | ||
101 | }; | ||
102 | |||
103 | static int __init xen_sysfs_version_init(void) | ||
104 | { | ||
105 | return sysfs_create_group(hypervisor_kobj, &version_group); | ||
106 | } | ||
107 | |||
108 | static void xen_sysfs_version_destroy(void) | ||
109 | { | ||
110 | sysfs_remove_group(hypervisor_kobj, &version_group); | ||
111 | } | ||
112 | |||
113 | /* UUID */ | ||
114 | |||
115 | static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
116 | { | ||
117 | char *vm, *val; | ||
118 | int ret; | ||
119 | extern int xenstored_ready; | ||
120 | |||
121 | if (!xenstored_ready) | ||
122 | return -EBUSY; | ||
123 | |||
124 | vm = xenbus_read(XBT_NIL, "vm", "", NULL); | ||
125 | if (IS_ERR(vm)) | ||
126 | return PTR_ERR(vm); | ||
127 | val = xenbus_read(XBT_NIL, vm, "uuid", NULL); | ||
128 | kfree(vm); | ||
129 | if (IS_ERR(val)) | ||
130 | return PTR_ERR(val); | ||
131 | ret = sprintf(buffer, "%s\n", val); | ||
132 | kfree(val); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | HYPERVISOR_ATTR_RO(uuid); | ||
137 | |||
138 | static int __init xen_sysfs_uuid_init(void) | ||
139 | { | ||
140 | return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr); | ||
141 | } | ||
142 | |||
143 | static void xen_sysfs_uuid_destroy(void) | ||
144 | { | ||
145 | sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr); | ||
146 | } | ||
147 | |||
148 | /* xen compilation attributes */ | ||
149 | |||
150 | static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
151 | { | ||
152 | int ret = -ENOMEM; | ||
153 | struct xen_compile_info *info; | ||
154 | |||
155 | info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); | ||
156 | if (info) { | ||
157 | ret = HYPERVISOR_xen_version(XENVER_compile_info, info); | ||
158 | if (!ret) | ||
159 | ret = sprintf(buffer, "%s\n", info->compiler); | ||
160 | kfree(info); | ||
161 | } | ||
162 | |||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | HYPERVISOR_ATTR_RO(compiler); | ||
167 | |||
168 | static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
169 | { | ||
170 | int ret = -ENOMEM; | ||
171 | struct xen_compile_info *info; | ||
172 | |||
173 | info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); | ||
174 | if (info) { | ||
175 | ret = HYPERVISOR_xen_version(XENVER_compile_info, info); | ||
176 | if (!ret) | ||
177 | ret = sprintf(buffer, "%s\n", info->compile_by); | ||
178 | kfree(info); | ||
179 | } | ||
180 | |||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | HYPERVISOR_ATTR_RO(compiled_by); | ||
185 | |||
186 | static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
187 | { | ||
188 | int ret = -ENOMEM; | ||
189 | struct xen_compile_info *info; | ||
190 | |||
191 | info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); | ||
192 | if (info) { | ||
193 | ret = HYPERVISOR_xen_version(XENVER_compile_info, info); | ||
194 | if (!ret) | ||
195 | ret = sprintf(buffer, "%s\n", info->compile_date); | ||
196 | kfree(info); | ||
197 | } | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | HYPERVISOR_ATTR_RO(compile_date); | ||
203 | |||
204 | static struct attribute *xen_compile_attrs[] = { | ||
205 | &compiler_attr.attr, | ||
206 | &compiled_by_attr.attr, | ||
207 | &compile_date_attr.attr, | ||
208 | NULL | ||
209 | }; | ||
210 | |||
211 | static struct attribute_group xen_compilation_group = { | ||
212 | .name = "compilation", | ||
213 | .attrs = xen_compile_attrs, | ||
214 | }; | ||
215 | |||
216 | int __init static xen_compilation_init(void) | ||
217 | { | ||
218 | return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); | ||
219 | } | ||
220 | |||
221 | static void xen_compilation_destroy(void) | ||
222 | { | ||
223 | sysfs_remove_group(hypervisor_kobj, &xen_compilation_group); | ||
224 | } | ||
225 | |||
226 | /* xen properties info */ | ||
227 | |||
228 | static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
229 | { | ||
230 | int ret = -ENOMEM; | ||
231 | char *caps; | ||
232 | |||
233 | caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL); | ||
234 | if (caps) { | ||
235 | ret = HYPERVISOR_xen_version(XENVER_capabilities, caps); | ||
236 | if (!ret) | ||
237 | ret = sprintf(buffer, "%s\n", caps); | ||
238 | kfree(caps); | ||
239 | } | ||
240 | |||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | HYPERVISOR_ATTR_RO(capabilities); | ||
245 | |||
246 | static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
247 | { | ||
248 | int ret = -ENOMEM; | ||
249 | char *cset; | ||
250 | |||
251 | cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL); | ||
252 | if (cset) { | ||
253 | ret = HYPERVISOR_xen_version(XENVER_changeset, cset); | ||
254 | if (!ret) | ||
255 | ret = sprintf(buffer, "%s\n", cset); | ||
256 | kfree(cset); | ||
257 | } | ||
258 | |||
259 | return ret; | ||
260 | } | ||
261 | |||
262 | HYPERVISOR_ATTR_RO(changeset); | ||
263 | |||
264 | static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
265 | { | ||
266 | int ret = -ENOMEM; | ||
267 | struct xen_platform_parameters *parms; | ||
268 | |||
269 | parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL); | ||
270 | if (parms) { | ||
271 | ret = HYPERVISOR_xen_version(XENVER_platform_parameters, | ||
272 | parms); | ||
273 | if (!ret) | ||
274 | ret = sprintf(buffer, "%lx\n", parms->virt_start); | ||
275 | kfree(parms); | ||
276 | } | ||
277 | |||
278 | return ret; | ||
279 | } | ||
280 | |||
281 | HYPERVISOR_ATTR_RO(virtual_start); | ||
282 | |||
283 | static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
284 | { | ||
285 | int ret; | ||
286 | |||
287 | ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL); | ||
288 | if (ret > 0) | ||
289 | ret = sprintf(buffer, "%x\n", ret); | ||
290 | |||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | HYPERVISOR_ATTR_RO(pagesize); | ||
295 | |||
296 | static ssize_t xen_feature_show(int index, char *buffer) | ||
297 | { | ||
298 | ssize_t ret; | ||
299 | struct xen_feature_info info; | ||
300 | |||
301 | info.submap_idx = index; | ||
302 | ret = HYPERVISOR_xen_version(XENVER_get_features, &info); | ||
303 | if (!ret) | ||
304 | ret = sprintf(buffer, "%08x", info.submap); | ||
305 | |||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer) | ||
310 | { | ||
311 | ssize_t len; | ||
312 | int i; | ||
313 | |||
314 | len = 0; | ||
315 | for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) { | ||
316 | int ret = xen_feature_show(i, buffer + len); | ||
317 | if (ret < 0) { | ||
318 | if (len == 0) | ||
319 | len = ret; | ||
320 | break; | ||
321 | } | ||
322 | len += ret; | ||
323 | } | ||
324 | if (len > 0) | ||
325 | buffer[len++] = '\n'; | ||
326 | |||
327 | return len; | ||
328 | } | ||
329 | |||
330 | HYPERVISOR_ATTR_RO(features); | ||
331 | |||
332 | static struct attribute *xen_properties_attrs[] = { | ||
333 | &capabilities_attr.attr, | ||
334 | &changeset_attr.attr, | ||
335 | &virtual_start_attr.attr, | ||
336 | &pagesize_attr.attr, | ||
337 | &features_attr.attr, | ||
338 | NULL | ||
339 | }; | ||
340 | |||
341 | static struct attribute_group xen_properties_group = { | ||
342 | .name = "properties", | ||
343 | .attrs = xen_properties_attrs, | ||
344 | }; | ||
345 | |||
346 | static int __init xen_properties_init(void) | ||
347 | { | ||
348 | return sysfs_create_group(hypervisor_kobj, &xen_properties_group); | ||
349 | } | ||
350 | |||
351 | static void xen_properties_destroy(void) | ||
352 | { | ||
353 | sysfs_remove_group(hypervisor_kobj, &xen_properties_group); | ||
354 | } | ||
355 | |||
356 | static int __init hyper_sysfs_init(void) | ||
357 | { | ||
358 | int ret; | ||
359 | |||
360 | if (!xen_domain()) | ||
361 | return -ENODEV; | ||
362 | |||
363 | ret = xen_sysfs_type_init(); | ||
364 | if (ret) | ||
365 | goto out; | ||
366 | ret = xen_sysfs_version_init(); | ||
367 | if (ret) | ||
368 | goto version_out; | ||
369 | ret = xen_compilation_init(); | ||
370 | if (ret) | ||
371 | goto comp_out; | ||
372 | ret = xen_sysfs_uuid_init(); | ||
373 | if (ret) | ||
374 | goto uuid_out; | ||
375 | ret = xen_properties_init(); | ||
376 | if (ret) | ||
377 | goto prop_out; | ||
378 | |||
379 | goto out; | ||
380 | |||
381 | prop_out: | ||
382 | xen_sysfs_uuid_destroy(); | ||
383 | uuid_out: | ||
384 | xen_compilation_destroy(); | ||
385 | comp_out: | ||
386 | xen_sysfs_version_destroy(); | ||
387 | version_out: | ||
388 | xen_sysfs_type_destroy(); | ||
389 | out: | ||
390 | return ret; | ||
391 | } | ||
392 | |||
393 | static void __exit hyper_sysfs_exit(void) | ||
394 | { | ||
395 | xen_properties_destroy(); | ||
396 | xen_compilation_destroy(); | ||
397 | xen_sysfs_uuid_destroy(); | ||
398 | xen_sysfs_version_destroy(); | ||
399 | xen_sysfs_type_destroy(); | ||
400 | |||
401 | } | ||
402 | module_init(hyper_sysfs_init); | ||
403 | module_exit(hyper_sysfs_exit); | ||
404 | |||
405 | static ssize_t hyp_sysfs_show(struct kobject *kobj, | ||
406 | struct attribute *attr, | ||
407 | char *buffer) | ||
408 | { | ||
409 | struct hyp_sysfs_attr *hyp_attr; | ||
410 | hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr); | ||
411 | if (hyp_attr->show) | ||
412 | return hyp_attr->show(hyp_attr, buffer); | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static ssize_t hyp_sysfs_store(struct kobject *kobj, | ||
417 | struct attribute *attr, | ||
418 | const char *buffer, | ||
419 | size_t len) | ||
420 | { | ||
421 | struct hyp_sysfs_attr *hyp_attr; | ||
422 | hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr); | ||
423 | if (hyp_attr->store) | ||
424 | return hyp_attr->store(hyp_attr, buffer, len); | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static struct sysfs_ops hyp_sysfs_ops = { | ||
429 | .show = hyp_sysfs_show, | ||
430 | .store = hyp_sysfs_store, | ||
431 | }; | ||
432 | |||
433 | static struct kobj_type hyp_sysfs_kobj_type = { | ||
434 | .sysfs_ops = &hyp_sysfs_ops, | ||
435 | }; | ||
436 | |||
437 | static int __init hypervisor_subsys_init(void) | ||
438 | { | ||
439 | if (!xen_domain()) | ||
440 | return -ENODEV; | ||
441 | |||
442 | hypervisor_kobj->ktype = &hyp_sysfs_kobj_type; | ||
443 | return 0; | ||
444 | } | ||
445 | device_initcall(hypervisor_subsys_init); | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 773d1cf23283..d42e25d5968d 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -71,6 +71,9 @@ static int xenbus_probe_frontend(const char *type, const char *name); | |||
71 | 71 | ||
72 | static void xenbus_dev_shutdown(struct device *_dev); | 72 | static void xenbus_dev_shutdown(struct device *_dev); |
73 | 73 | ||
74 | static int xenbus_dev_suspend(struct device *dev, pm_message_t state); | ||
75 | static int xenbus_dev_resume(struct device *dev); | ||
76 | |||
74 | /* If something in array of ids matches this device, return it. */ | 77 | /* If something in array of ids matches this device, return it. */ |
75 | static const struct xenbus_device_id * | 78 | static const struct xenbus_device_id * |
76 | match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) | 79 | match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) |
@@ -188,6 +191,9 @@ static struct xen_bus_type xenbus_frontend = { | |||
188 | .remove = xenbus_dev_remove, | 191 | .remove = xenbus_dev_remove, |
189 | .shutdown = xenbus_dev_shutdown, | 192 | .shutdown = xenbus_dev_shutdown, |
190 | .dev_attrs = xenbus_dev_attrs, | 193 | .dev_attrs = xenbus_dev_attrs, |
194 | |||
195 | .suspend = xenbus_dev_suspend, | ||
196 | .resume = xenbus_dev_resume, | ||
191 | }, | 197 | }, |
192 | }; | 198 | }; |
193 | 199 | ||
@@ -654,6 +660,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) | |||
654 | 660 | ||
655 | kfree(root); | 661 | kfree(root); |
656 | } | 662 | } |
663 | EXPORT_SYMBOL_GPL(xenbus_dev_changed); | ||
657 | 664 | ||
658 | static void frontend_changed(struct xenbus_watch *watch, | 665 | static void frontend_changed(struct xenbus_watch *watch, |
659 | const char **vec, unsigned int len) | 666 | const char **vec, unsigned int len) |
@@ -669,7 +676,7 @@ static struct xenbus_watch fe_watch = { | |||
669 | .callback = frontend_changed, | 676 | .callback = frontend_changed, |
670 | }; | 677 | }; |
671 | 678 | ||
672 | static int suspend_dev(struct device *dev, void *data) | 679 | static int xenbus_dev_suspend(struct device *dev, pm_message_t state) |
673 | { | 680 | { |
674 | int err = 0; | 681 | int err = 0; |
675 | struct xenbus_driver *drv; | 682 | struct xenbus_driver *drv; |
@@ -682,35 +689,14 @@ static int suspend_dev(struct device *dev, void *data) | |||
682 | drv = to_xenbus_driver(dev->driver); | 689 | drv = to_xenbus_driver(dev->driver); |
683 | xdev = container_of(dev, struct xenbus_device, dev); | 690 | xdev = container_of(dev, struct xenbus_device, dev); |
684 | if (drv->suspend) | 691 | if (drv->suspend) |
685 | err = drv->suspend(xdev); | 692 | err = drv->suspend(xdev, state); |
686 | if (err) | 693 | if (err) |
687 | printk(KERN_WARNING | 694 | printk(KERN_WARNING |
688 | "xenbus: suspend %s failed: %i\n", dev_name(dev), err); | 695 | "xenbus: suspend %s failed: %i\n", dev_name(dev), err); |
689 | return 0; | 696 | return 0; |
690 | } | 697 | } |
691 | 698 | ||
692 | static int suspend_cancel_dev(struct device *dev, void *data) | 699 | static int xenbus_dev_resume(struct device *dev) |
693 | { | ||
694 | int err = 0; | ||
695 | struct xenbus_driver *drv; | ||
696 | struct xenbus_device *xdev; | ||
697 | |||
698 | DPRINTK(""); | ||
699 | |||
700 | if (dev->driver == NULL) | ||
701 | return 0; | ||
702 | drv = to_xenbus_driver(dev->driver); | ||
703 | xdev = container_of(dev, struct xenbus_device, dev); | ||
704 | if (drv->suspend_cancel) | ||
705 | err = drv->suspend_cancel(xdev); | ||
706 | if (err) | ||
707 | printk(KERN_WARNING | ||
708 | "xenbus: suspend_cancel %s failed: %i\n", | ||
709 | dev_name(dev), err); | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static int resume_dev(struct device *dev, void *data) | ||
714 | { | 700 | { |
715 | int err; | 701 | int err; |
716 | struct xenbus_driver *drv; | 702 | struct xenbus_driver *drv; |
@@ -755,33 +741,6 @@ static int resume_dev(struct device *dev, void *data) | |||
755 | return 0; | 741 | return 0; |
756 | } | 742 | } |
757 | 743 | ||
758 | void xenbus_suspend(void) | ||
759 | { | ||
760 | DPRINTK(""); | ||
761 | |||
762 | bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); | ||
763 | xenbus_backend_suspend(suspend_dev); | ||
764 | xs_suspend(); | ||
765 | } | ||
766 | EXPORT_SYMBOL_GPL(xenbus_suspend); | ||
767 | |||
768 | void xenbus_resume(void) | ||
769 | { | ||
770 | xb_init_comms(); | ||
771 | xs_resume(); | ||
772 | bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); | ||
773 | xenbus_backend_resume(resume_dev); | ||
774 | } | ||
775 | EXPORT_SYMBOL_GPL(xenbus_resume); | ||
776 | |||
777 | void xenbus_suspend_cancel(void) | ||
778 | { | ||
779 | xs_suspend_cancel(); | ||
780 | bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); | ||
781 | xenbus_backend_resume(suspend_cancel_dev); | ||
782 | } | ||
783 | EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); | ||
784 | |||
785 | /* A flag to determine if xenstored is 'ready' (i.e. has started) */ | 744 | /* A flag to determine if xenstored is 'ready' (i.e. has started) */ |
786 | int xenstored_ready = 0; | 745 | int xenstored_ready = 0; |
787 | 746 | ||
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index e325eab4724d..eab33f1dbdf7 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
@@ -673,6 +673,8 @@ void xs_resume(void) | |||
673 | struct xenbus_watch *watch; | 673 | struct xenbus_watch *watch; |
674 | char token[sizeof(watch) * 2 + 1]; | 674 | char token[sizeof(watch) * 2 + 1]; |
675 | 675 | ||
676 | xb_init_comms(); | ||
677 | |||
676 | mutex_unlock(&xs_state.response_mutex); | 678 | mutex_unlock(&xs_state.response_mutex); |
677 | mutex_unlock(&xs_state.request_mutex); | 679 | mutex_unlock(&xs_state.request_mutex); |
678 | up_write(&xs_state.transaction_mutex); | 680 | up_write(&xs_state.transaction_mutex); |
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 515741a8e6b8..6559e0c752ce 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c | |||
@@ -20,10 +20,27 @@ | |||
20 | MODULE_DESCRIPTION("Xen filesystem"); | 20 | MODULE_DESCRIPTION("Xen filesystem"); |
21 | MODULE_LICENSE("GPL"); | 21 | MODULE_LICENSE("GPL"); |
22 | 22 | ||
23 | static ssize_t capabilities_read(struct file *file, char __user *buf, | ||
24 | size_t size, loff_t *off) | ||
25 | { | ||
26 | char *tmp = ""; | ||
27 | |||
28 | if (xen_initial_domain()) | ||
29 | tmp = "control_d\n"; | ||
30 | |||
31 | return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp)); | ||
32 | } | ||
33 | |||
34 | static const struct file_operations capabilities_file_ops = { | ||
35 | .read = capabilities_read, | ||
36 | }; | ||
37 | |||
23 | static int xenfs_fill_super(struct super_block *sb, void *data, int silent) | 38 | static int xenfs_fill_super(struct super_block *sb, void *data, int silent) |
24 | { | 39 | { |
25 | static struct tree_descr xenfs_files[] = { | 40 | static struct tree_descr xenfs_files[] = { |
26 | [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR}, | 41 | [1] = {}, |
42 | { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR }, | ||
43 | { "capabilities", &capabilities_file_ops, S_IRUGO }, | ||
27 | {""}, | 44 | {""}, |
28 | }; | 45 | }; |
29 | 46 | ||