diff options
Diffstat (limited to 'drivers/pci')
33 files changed, 1579 insertions, 324 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 34ef70d562b2..5b1630e4e9e3 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
| @@ -40,6 +40,27 @@ config PCI_STUB | |||
| 40 | 40 | ||
| 41 | When in doubt, say N. | 41 | When in doubt, say N. |
| 42 | 42 | ||
| 43 | config XEN_PCIDEV_FRONTEND | ||
| 44 | tristate "Xen PCI Frontend" | ||
| 45 | depends on PCI && X86 && XEN | ||
| 46 | select HOTPLUG | ||
| 47 | select PCI_XEN | ||
| 48 | default y | ||
| 49 | help | ||
| 50 | The PCI device frontend driver allows the kernel to import arbitrary | ||
| 51 | PCI devices from a PCI backend to support PCI driver domains. | ||
| 52 | |||
| 53 | config XEN_PCIDEV_FE_DEBUG | ||
| 54 | bool "Xen PCI Frontend debugging" | ||
| 55 | depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG | ||
| 56 | help | ||
| 57 | Say Y here if you want the Xen PCI frontend to produce a bunch of debug | ||
| 58 | messages to the system log. Select this if you are having a | ||
| 59 | problem with Xen PCI frontend support and want to see more of what is | ||
| 60 | going on. | ||
| 61 | |||
| 62 | When in doubt, say N. | ||
| 63 | |||
| 43 | config HT_IRQ | 64 | config HT_IRQ |
| 44 | bool "Interrupts on hypertransport devices" | 65 | bool "Interrupts on hypertransport devices" |
| 45 | default y | 66 | default y |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index dc1aa0922868..f01e344cf4bd 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
| @@ -65,6 +65,6 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o | |||
| 65 | 65 | ||
| 66 | obj-$(CONFIG_PCI_STUB) += pci-stub.o | 66 | obj-$(CONFIG_PCI_STUB) += pci-stub.o |
| 67 | 67 | ||
| 68 | ifeq ($(CONFIG_PCI_DEBUG),y) | 68 | obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o |
| 69 | EXTRA_CFLAGS += -DDEBUG | 69 | |
| 70 | endif | 70 | ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 7f0af0e9b826..003170ea2e39 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
| @@ -64,6 +64,77 @@ void pci_bus_remove_resources(struct pci_bus *bus) | |||
| 64 | } | 64 | } |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static bool pci_bus_resource_better(struct resource *res1, bool pos1, | ||
| 68 | struct resource *res2, bool pos2) | ||
| 69 | { | ||
| 70 | /* If exactly one is positive decode, always prefer that one */ | ||
| 71 | if (pos1 != pos2) | ||
| 72 | return pos1 ? true : false; | ||
| 73 | |||
| 74 | /* Prefer the one that contains the highest address */ | ||
| 75 | if (res1->end != res2->end) | ||
| 76 | return (res1->end > res2->end) ? true : false; | ||
| 77 | |||
| 78 | /* Otherwise, prefer the one with highest "center of gravity" */ | ||
| 79 | if (res1->start != res2->start) | ||
| 80 | return (res1->start > res2->start) ? true : false; | ||
| 81 | |||
| 82 | /* Otherwise, choose one arbitrarily (but consistently) */ | ||
| 83 | return (res1 > res2) ? true : false; | ||
| 84 | } | ||
| 85 | |||
| 86 | static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res) | ||
| 87 | { | ||
| 88 | struct pci_bus_resource *bus_res; | ||
| 89 | |||
| 90 | /* | ||
| 91 | * This relies on the fact that pci_bus.resource[] refers to P2P or | ||
| 92 | * CardBus bridge base/limit registers, which are always positively | ||
| 93 | * decoded. The pci_bus.resources list contains host bridge or | ||
| 94 | * subtractively decoded resources. | ||
| 95 | */ | ||
| 96 | list_for_each_entry(bus_res, &bus->resources, list) { | ||
| 97 | if (bus_res->res == res) | ||
| 98 | return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ? | ||
| 99 | false : true; | ||
| 100 | } | ||
| 101 | return true; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Find the next-best bus resource after the cursor "res". If the cursor is | ||
| 106 | * NULL, return the best resource. "Best" means that we prefer positive | ||
| 107 | * decode regions over subtractive decode, then those at higher addresses. | ||
| 108 | */ | ||
| 109 | static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, | ||
| 110 | unsigned int type, | ||
| 111 | struct resource *res) | ||
| 112 | { | ||
| 113 | bool res_pos, r_pos, prev_pos = false; | ||
| 114 | struct resource *r, *prev = NULL; | ||
| 115 | int i; | ||
| 116 | |||
| 117 | res_pos = pci_bus_resource_positive(bus, res); | ||
| 118 | pci_bus_for_each_resource(bus, r, i) { | ||
| 119 | if (!r) | ||
| 120 | continue; | ||
| 121 | |||
| 122 | if ((r->flags & IORESOURCE_TYPE_BITS) != type) | ||
| 123 | continue; | ||
| 124 | |||
| 125 | r_pos = pci_bus_resource_positive(bus, r); | ||
| 126 | if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) { | ||
| 127 | if (!prev || pci_bus_resource_better(r, r_pos, | ||
| 128 | prev, prev_pos)) { | ||
| 129 | prev = r; | ||
| 130 | prev_pos = r_pos; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | return prev; | ||
| 136 | } | ||
| 137 | |||
| 67 | /** | 138 | /** |
| 68 | * pci_bus_alloc_resource - allocate a resource from a parent bus | 139 | * pci_bus_alloc_resource - allocate a resource from a parent bus |
| 69 | * @bus: PCI bus | 140 | * @bus: PCI bus |
| @@ -89,9 +160,10 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
| 89 | resource_size_t), | 160 | resource_size_t), |
| 90 | void *alignf_data) | 161 | void *alignf_data) |
| 91 | { | 162 | { |
| 92 | int i, ret = -ENOMEM; | 163 | int ret = -ENOMEM; |
| 93 | struct resource *r; | 164 | struct resource *r; |
| 94 | resource_size_t max = -1; | 165 | resource_size_t max = -1; |
| 166 | unsigned int type = res->flags & IORESOURCE_TYPE_BITS; | ||
| 95 | 167 | ||
| 96 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; | 168 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; |
| 97 | 169 | ||
| @@ -99,10 +171,9 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
| 99 | if (!(res->flags & IORESOURCE_MEM_64)) | 171 | if (!(res->flags & IORESOURCE_MEM_64)) |
| 100 | max = PCIBIOS_MAX_MEM_32; | 172 | max = PCIBIOS_MAX_MEM_32; |
| 101 | 173 | ||
| 102 | pci_bus_for_each_resource(bus, r, i) { | 174 | /* Look for space at highest addresses first */ |
| 103 | if (!r) | 175 | r = pci_bus_find_resource_prev(bus, type, NULL); |
| 104 | continue; | 176 | for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) { |
| 105 | |||
| 106 | /* type_mask must match */ | 177 | /* type_mask must match */ |
| 107 | if ((res->flags ^ r->flags) & type_mask) | 178 | if ((res->flags ^ r->flags) & type_mask) |
| 108 | continue; | 179 | continue; |
| @@ -299,6 +370,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), | |||
| 299 | } | 370 | } |
| 300 | up_read(&pci_bus_sem); | 371 | up_read(&pci_bus_sem); |
| 301 | } | 372 | } |
| 373 | EXPORT_SYMBOL_GPL(pci_walk_bus); | ||
| 302 | 374 | ||
| 303 | EXPORT_SYMBOL(pci_bus_alloc_resource); | 375 | EXPORT_SYMBOL(pci_bus_alloc_resource); |
| 304 | EXPORT_SYMBOL_GPL(pci_bus_add_device); | 376 | EXPORT_SYMBOL_GPL(pci_bus_add_device); |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0a19708074c2..0157708d474d 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/tboot.h> | 36 | #include <linux/tboot.h> |
| 37 | #include <linux/dmi.h> | 37 | #include <linux/dmi.h> |
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <asm/iommu_table.h> | ||
| 39 | 40 | ||
| 40 | #define PREFIX "DMAR: " | 41 | #define PREFIX "DMAR: " |
| 41 | 42 | ||
| @@ -687,7 +688,7 @@ failed: | |||
| 687 | return 0; | 688 | return 0; |
| 688 | } | 689 | } |
| 689 | 690 | ||
| 690 | void __init detect_intel_iommu(void) | 691 | int __init detect_intel_iommu(void) |
| 691 | { | 692 | { |
| 692 | int ret; | 693 | int ret; |
| 693 | 694 | ||
| @@ -723,6 +724,8 @@ void __init detect_intel_iommu(void) | |||
| 723 | } | 724 | } |
| 724 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | 725 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
| 725 | dmar_tbl = NULL; | 726 | dmar_tbl = NULL; |
| 727 | |||
| 728 | return ret ? 1 : -ENODEV; | ||
| 726 | } | 729 | } |
| 727 | 730 | ||
| 728 | 731 | ||
| @@ -1221,9 +1224,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | |||
| 1221 | } | 1224 | } |
| 1222 | } | 1225 | } |
| 1223 | 1226 | ||
| 1224 | void dmar_msi_unmask(unsigned int irq) | 1227 | void dmar_msi_unmask(struct irq_data *data) |
| 1225 | { | 1228 | { |
| 1226 | struct intel_iommu *iommu = get_irq_data(irq); | 1229 | struct intel_iommu *iommu = irq_data_get_irq_data(data); |
| 1227 | unsigned long flag; | 1230 | unsigned long flag; |
| 1228 | 1231 | ||
| 1229 | /* unmask it */ | 1232 | /* unmask it */ |
| @@ -1234,10 +1237,10 @@ void dmar_msi_unmask(unsigned int irq) | |||
| 1234 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1237 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1235 | } | 1238 | } |
| 1236 | 1239 | ||
| 1237 | void dmar_msi_mask(unsigned int irq) | 1240 | void dmar_msi_mask(struct irq_data *data) |
| 1238 | { | 1241 | { |
| 1239 | unsigned long flag; | 1242 | unsigned long flag; |
| 1240 | struct intel_iommu *iommu = get_irq_data(irq); | 1243 | struct intel_iommu *iommu = irq_data_get_irq_data(data); |
| 1241 | 1244 | ||
| 1242 | /* mask it */ | 1245 | /* mask it */ |
| 1243 | spin_lock_irqsave(&iommu->register_lock, flag); | 1246 | spin_lock_irqsave(&iommu->register_lock, flag); |
| @@ -1455,3 +1458,4 @@ int __init dmar_ir_support(void) | |||
| 1455 | return 0; | 1458 | return 0; |
| 1456 | return dmar->flags & 0x1; | 1459 | return dmar->flags & 0x1; |
| 1457 | } | 1460 | } |
| 1461 | IOMMU_INIT_POST(detect_intel_iommu); | ||
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index 56215322930a..4cb30447a486 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c | |||
| @@ -34,10 +34,11 @@ | |||
| 34 | #include <linux/workqueue.h> | 34 | #include <linux/workqueue.h> |
| 35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
| 36 | #include <linux/pci_hotplug.h> | 36 | #include <linux/pci_hotplug.h> |
| 37 | #include <linux/smp_lock.h> | 37 | #include <linux/mutex.h> |
| 38 | #include <linux/debugfs.h> | 38 | #include <linux/debugfs.h> |
| 39 | #include "cpqphp.h" | 39 | #include "cpqphp.h" |
| 40 | 40 | ||
| 41 | static DEFINE_MUTEX(cpqphp_mutex); | ||
| 41 | static int show_ctrl (struct controller *ctrl, char *buf) | 42 | static int show_ctrl (struct controller *ctrl, char *buf) |
| 42 | { | 43 | { |
| 43 | char *out = buf; | 44 | char *out = buf; |
| @@ -147,7 +148,7 @@ static int open(struct inode *inode, struct file *file) | |||
| 147 | struct ctrl_dbg *dbg; | 148 | struct ctrl_dbg *dbg; |
| 148 | int retval = -ENOMEM; | 149 | int retval = -ENOMEM; |
| 149 | 150 | ||
| 150 | lock_kernel(); | 151 | mutex_lock(&cpqphp_mutex); |
| 151 | dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); | 152 | dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); |
| 152 | if (!dbg) | 153 | if (!dbg) |
| 153 | goto exit; | 154 | goto exit; |
| @@ -160,7 +161,7 @@ static int open(struct inode *inode, struct file *file) | |||
| 160 | file->private_data = dbg; | 161 | file->private_data = dbg; |
| 161 | retval = 0; | 162 | retval = 0; |
| 162 | exit: | 163 | exit: |
| 163 | unlock_kernel(); | 164 | mutex_unlock(&cpqphp_mutex); |
| 164 | return retval; | 165 | return retval; |
| 165 | } | 166 | } |
| 166 | 167 | ||
| @@ -169,7 +170,7 @@ static loff_t lseek(struct file *file, loff_t off, int whence) | |||
| 169 | struct ctrl_dbg *dbg; | 170 | struct ctrl_dbg *dbg; |
| 170 | loff_t new = -1; | 171 | loff_t new = -1; |
| 171 | 172 | ||
| 172 | lock_kernel(); | 173 | mutex_lock(&cpqphp_mutex); |
| 173 | dbg = file->private_data; | 174 | dbg = file->private_data; |
| 174 | 175 | ||
| 175 | switch (whence) { | 176 | switch (whence) { |
| @@ -181,10 +182,10 @@ static loff_t lseek(struct file *file, loff_t off, int whence) | |||
| 181 | break; | 182 | break; |
| 182 | } | 183 | } |
| 183 | if (new < 0 || new > dbg->size) { | 184 | if (new < 0 || new > dbg->size) { |
| 184 | unlock_kernel(); | 185 | mutex_unlock(&cpqphp_mutex); |
| 185 | return -EINVAL; | 186 | return -EINVAL; |
| 186 | } | 187 | } |
| 187 | unlock_kernel(); | 188 | mutex_unlock(&cpqphp_mutex); |
| 188 | return (file->f_pos = new); | 189 | return (file->f_pos = new); |
| 189 | } | 190 | } |
| 190 | 191 | ||
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 5becbdee4027..2850e64dedae 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
| @@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void) | |||
| 276 | 276 | ||
| 277 | for (;;) { | 277 | for (;;) { |
| 278 | offset = next_offset; | 278 | offset = next_offset; |
| 279 | |||
| 280 | /* Make sure what we read is still in the mapped section */ | ||
| 281 | if (WARN(offset > (ebda_sz * 1024 - 4), | ||
| 282 | "ibmphp_ebda: next read is beyond ebda_sz\n")) | ||
| 283 | break; | ||
| 284 | |||
| 279 | next_offset = readw (io_mem + offset); /* offset of next blk */ | 285 | next_offset = readw (io_mem + offset); /* offset of next blk */ |
| 280 | 286 | ||
| 281 | offset += 2; | 287 | offset += 2; |
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 1aaf3f32d3cd..f59ed30512b5 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c | |||
| @@ -133,8 +133,8 @@ void __init ibmphp_hpc_initvars (void) | |||
| 133 | debug ("%s - Entry\n", __func__); | 133 | debug ("%s - Entry\n", __func__); |
| 134 | 134 | ||
| 135 | mutex_init(&sem_hpcaccess); | 135 | mutex_init(&sem_hpcaccess); |
| 136 | init_MUTEX (&semOperations); | 136 | sema_init(&semOperations, 1); |
| 137 | init_MUTEX_LOCKED (&sem_exit); | 137 | sema_init(&sem_exit, 0); |
| 138 | to_debug = 0; | 138 | to_debug = 0; |
| 139 | 139 | ||
| 140 | debug ("%s - Exit\n", __func__); | 140 | debug ("%s - Exit\n", __func__); |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 73d513989263..838f571027b7 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/sched.h> /* signal_pending() */ | 36 | #include <linux/sched.h> /* signal_pending() */ |
| 37 | #include <linux/pcieport_if.h> | 37 | #include <linux/pcieport_if.h> |
| 38 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
| 39 | #include <linux/workqueue.h> | ||
| 39 | 40 | ||
| 40 | #define MY_NAME "pciehp" | 41 | #define MY_NAME "pciehp" |
| 41 | 42 | ||
| @@ -44,6 +45,7 @@ extern int pciehp_poll_time; | |||
| 44 | extern int pciehp_debug; | 45 | extern int pciehp_debug; |
| 45 | extern int pciehp_force; | 46 | extern int pciehp_force; |
| 46 | extern struct workqueue_struct *pciehp_wq; | 47 | extern struct workqueue_struct *pciehp_wq; |
| 48 | extern struct workqueue_struct *pciehp_ordered_wq; | ||
| 47 | 49 | ||
| 48 | #define dbg(format, arg...) \ | 50 | #define dbg(format, arg...) \ |
| 49 | do { \ | 51 | do { \ |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index aa5f3ff629ff..7ac8358df8fd 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
| @@ -43,6 +43,7 @@ int pciehp_poll_mode; | |||
| 43 | int pciehp_poll_time; | 43 | int pciehp_poll_time; |
| 44 | int pciehp_force; | 44 | int pciehp_force; |
| 45 | struct workqueue_struct *pciehp_wq; | 45 | struct workqueue_struct *pciehp_wq; |
| 46 | struct workqueue_struct *pciehp_ordered_wq; | ||
| 46 | 47 | ||
| 47 | #define DRIVER_VERSION "0.4" | 48 | #define DRIVER_VERSION "0.4" |
| 48 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 49 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
| @@ -340,18 +341,33 @@ static int __init pcied_init(void) | |||
| 340 | { | 341 | { |
| 341 | int retval = 0; | 342 | int retval = 0; |
| 342 | 343 | ||
| 344 | pciehp_wq = alloc_workqueue("pciehp", 0, 0); | ||
| 345 | if (!pciehp_wq) | ||
| 346 | return -ENOMEM; | ||
| 347 | |||
| 348 | pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0); | ||
| 349 | if (!pciehp_ordered_wq) { | ||
| 350 | destroy_workqueue(pciehp_wq); | ||
| 351 | return -ENOMEM; | ||
| 352 | } | ||
| 353 | |||
| 343 | pciehp_firmware_init(); | 354 | pciehp_firmware_init(); |
| 344 | retval = pcie_port_service_register(&hpdriver_portdrv); | 355 | retval = pcie_port_service_register(&hpdriver_portdrv); |
| 345 | dbg("pcie_port_service_register = %d\n", retval); | 356 | dbg("pcie_port_service_register = %d\n", retval); |
| 346 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 357 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
| 347 | if (retval) | 358 | if (retval) { |
| 359 | destroy_workqueue(pciehp_ordered_wq); | ||
| 360 | destroy_workqueue(pciehp_wq); | ||
| 348 | dbg("Failure to register service\n"); | 361 | dbg("Failure to register service\n"); |
| 362 | } | ||
| 349 | return retval; | 363 | return retval; |
| 350 | } | 364 | } |
| 351 | 365 | ||
| 352 | static void __exit pcied_cleanup(void) | 366 | static void __exit pcied_cleanup(void) |
| 353 | { | 367 | { |
| 354 | dbg("unload_pciehpd()\n"); | 368 | dbg("unload_pciehpd()\n"); |
| 369 | destroy_workqueue(pciehp_ordered_wq); | ||
| 370 | destroy_workqueue(pciehp_wq); | ||
| 355 | pcie_port_service_unregister(&hpdriver_portdrv); | 371 | pcie_port_service_unregister(&hpdriver_portdrv); |
| 356 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); | 372 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); |
| 357 | } | 373 | } |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 8f58148be044..085dbb5fc168 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
| 33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
| 35 | #include <linux/workqueue.h> | ||
| 36 | #include "../pci.h" | 35 | #include "../pci.h" |
| 37 | #include "pciehp.h" | 36 | #include "pciehp.h" |
| 38 | 37 | ||
| @@ -50,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
| 50 | info->p_slot = p_slot; | 49 | info->p_slot = p_slot; |
| 51 | INIT_WORK(&info->work, interrupt_event_handler); | 50 | INIT_WORK(&info->work, interrupt_event_handler); |
| 52 | 51 | ||
| 53 | schedule_work(&info->work); | 52 | queue_work(pciehp_wq, &info->work); |
| 54 | 53 | ||
| 55 | return 0; | 54 | return 0; |
| 56 | } | 55 | } |
| @@ -345,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
| 345 | kfree(info); | 344 | kfree(info); |
| 346 | goto out; | 345 | goto out; |
| 347 | } | 346 | } |
| 348 | queue_work(pciehp_wq, &info->work); | 347 | queue_work(pciehp_ordered_wq, &info->work); |
| 349 | out: | 348 | out: |
| 350 | mutex_unlock(&p_slot->lock); | 349 | mutex_unlock(&p_slot->lock); |
| 351 | } | 350 | } |
| @@ -378,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
| 378 | if (ATTN_LED(ctrl)) | 377 | if (ATTN_LED(ctrl)) |
| 379 | pciehp_set_attention_status(p_slot, 0); | 378 | pciehp_set_attention_status(p_slot, 0); |
| 380 | 379 | ||
| 381 | schedule_delayed_work(&p_slot->work, 5*HZ); | 380 | queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); |
| 382 | break; | 381 | break; |
| 383 | case BLINKINGOFF_STATE: | 382 | case BLINKINGOFF_STATE: |
| 384 | case BLINKINGON_STATE: | 383 | case BLINKINGON_STATE: |
| @@ -440,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot) | |||
| 440 | else | 439 | else |
| 441 | p_slot->state = POWERON_STATE; | 440 | p_slot->state = POWERON_STATE; |
| 442 | 441 | ||
| 443 | queue_work(pciehp_wq, &info->work); | 442 | queue_work(pciehp_ordered_wq, &info->work); |
| 444 | } | 443 | } |
| 445 | 444 | ||
| 446 | static void interrupt_event_handler(struct work_struct *work) | 445 | static void interrupt_event_handler(struct work_struct *work) |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 0cd42047d89b..50a23da5d24d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -41,8 +41,6 @@ | |||
| 41 | #include "../pci.h" | 41 | #include "../pci.h" |
| 42 | #include "pciehp.h" | 42 | #include "pciehp.h" |
| 43 | 43 | ||
| 44 | static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); | ||
| 45 | |||
| 46 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) | 44 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) |
| 47 | { | 45 | { |
| 48 | struct pci_dev *dev = ctrl->pcie->port; | 46 | struct pci_dev *dev = ctrl->pcie->port; |
| @@ -805,8 +803,8 @@ static void pcie_cleanup_slot(struct controller *ctrl) | |||
| 805 | { | 803 | { |
| 806 | struct slot *slot = ctrl->slot; | 804 | struct slot *slot = ctrl->slot; |
| 807 | cancel_delayed_work(&slot->work); | 805 | cancel_delayed_work(&slot->work); |
| 808 | flush_scheduled_work(); | ||
| 809 | flush_workqueue(pciehp_wq); | 806 | flush_workqueue(pciehp_wq); |
| 807 | flush_workqueue(pciehp_ordered_wq); | ||
| 810 | kfree(slot); | 808 | kfree(slot); |
| 811 | } | 809 | } |
| 812 | 810 | ||
| @@ -912,16 +910,6 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
| 912 | /* Disable sotfware notification */ | 910 | /* Disable sotfware notification */ |
| 913 | pcie_disable_notification(ctrl); | 911 | pcie_disable_notification(ctrl); |
| 914 | 912 | ||
| 915 | /* | ||
| 916 | * If this is the first controller to be initialized, | ||
| 917 | * initialize the pciehp work queue | ||
| 918 | */ | ||
| 919 | if (atomic_add_return(1, &pciehp_num_controllers) == 1) { | ||
| 920 | pciehp_wq = create_singlethread_workqueue("pciehpd"); | ||
| 921 | if (!pciehp_wq) | ||
| 922 | goto abort_ctrl; | ||
| 923 | } | ||
| 924 | |||
| 925 | ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", | 913 | ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", |
| 926 | pdev->vendor, pdev->device, pdev->subsystem_vendor, | 914 | pdev->vendor, pdev->device, pdev->subsystem_vendor, |
| 927 | pdev->subsystem_device); | 915 | pdev->subsystem_device); |
| @@ -941,11 +929,5 @@ void pciehp_release_ctrl(struct controller *ctrl) | |||
| 941 | { | 929 | { |
| 942 | pcie_shutdown_notification(ctrl); | 930 | pcie_shutdown_notification(ctrl); |
| 943 | pcie_cleanup_slot(ctrl); | 931 | pcie_cleanup_slot(ctrl); |
| 944 | /* | ||
| 945 | * If this is the last controller to be released, destroy the | ||
| 946 | * pciehp work queue | ||
| 947 | */ | ||
| 948 | if (atomic_dec_and_test(&pciehp_num_controllers)) | ||
| 949 | destroy_workqueue(pciehp_wq); | ||
| 950 | kfree(ctrl); | 932 | kfree(ctrl); |
| 951 | } | 933 | } |
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index d2627e1c3ac1..e0c90e643b5f 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
| 36 | #include <linux/sched.h> /* signal_pending(), struct timer_list */ | 36 | #include <linux/sched.h> /* signal_pending(), struct timer_list */ |
| 37 | #include <linux/mutex.h> | 37 | #include <linux/mutex.h> |
| 38 | #include <linux/workqueue.h> | ||
| 38 | 39 | ||
| 39 | #if !defined(MODULE) | 40 | #if !defined(MODULE) |
| 40 | #define MY_NAME "shpchp" | 41 | #define MY_NAME "shpchp" |
| @@ -46,6 +47,7 @@ extern int shpchp_poll_mode; | |||
| 46 | extern int shpchp_poll_time; | 47 | extern int shpchp_poll_time; |
| 47 | extern int shpchp_debug; | 48 | extern int shpchp_debug; |
| 48 | extern struct workqueue_struct *shpchp_wq; | 49 | extern struct workqueue_struct *shpchp_wq; |
| 50 | extern struct workqueue_struct *shpchp_ordered_wq; | ||
| 49 | 51 | ||
| 50 | #define dbg(format, arg...) \ | 52 | #define dbg(format, arg...) \ |
| 51 | do { \ | 53 | do { \ |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index a7bd5048396e..aca972bbfb4c 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
| 36 | #include <linux/workqueue.h> | ||
| 37 | #include "shpchp.h" | 36 | #include "shpchp.h" |
| 38 | 37 | ||
| 39 | /* Global variables */ | 38 | /* Global variables */ |
| @@ -41,6 +40,7 @@ int shpchp_debug; | |||
| 41 | int shpchp_poll_mode; | 40 | int shpchp_poll_mode; |
| 42 | int shpchp_poll_time; | 41 | int shpchp_poll_time; |
| 43 | struct workqueue_struct *shpchp_wq; | 42 | struct workqueue_struct *shpchp_wq; |
| 43 | struct workqueue_struct *shpchp_ordered_wq; | ||
| 44 | 44 | ||
| 45 | #define DRIVER_VERSION "0.4" | 45 | #define DRIVER_VERSION "0.4" |
| 46 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 46 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
| @@ -174,8 +174,8 @@ void cleanup_slots(struct controller *ctrl) | |||
| 174 | slot = list_entry(tmp, struct slot, slot_list); | 174 | slot = list_entry(tmp, struct slot, slot_list); |
| 175 | list_del(&slot->slot_list); | 175 | list_del(&slot->slot_list); |
| 176 | cancel_delayed_work(&slot->work); | 176 | cancel_delayed_work(&slot->work); |
| 177 | flush_scheduled_work(); | ||
| 178 | flush_workqueue(shpchp_wq); | 177 | flush_workqueue(shpchp_wq); |
| 178 | flush_workqueue(shpchp_ordered_wq); | ||
| 179 | pci_hp_deregister(slot->hotplug_slot); | 179 | pci_hp_deregister(slot->hotplug_slot); |
| 180 | } | 180 | } |
| 181 | } | 181 | } |
| @@ -360,9 +360,23 @@ static int __init shpcd_init(void) | |||
| 360 | { | 360 | { |
| 361 | int retval = 0; | 361 | int retval = 0; |
| 362 | 362 | ||
| 363 | shpchp_wq = alloc_ordered_workqueue("shpchp", 0); | ||
| 364 | if (!shpchp_wq) | ||
| 365 | return -ENOMEM; | ||
| 366 | |||
| 367 | shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); | ||
| 368 | if (!shpchp_ordered_wq) { | ||
| 369 | destroy_workqueue(shpchp_wq); | ||
| 370 | return -ENOMEM; | ||
| 371 | } | ||
| 372 | |||
| 363 | retval = pci_register_driver(&shpc_driver); | 373 | retval = pci_register_driver(&shpc_driver); |
| 364 | dbg("%s: pci_register_driver = %d\n", __func__, retval); | 374 | dbg("%s: pci_register_driver = %d\n", __func__, retval); |
| 365 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 375 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
| 376 | if (retval) { | ||
| 377 | destroy_workqueue(shpchp_ordered_wq); | ||
| 378 | destroy_workqueue(shpchp_wq); | ||
| 379 | } | ||
| 366 | return retval; | 380 | return retval; |
| 367 | } | 381 | } |
| 368 | 382 | ||
| @@ -370,6 +384,8 @@ static void __exit shpcd_cleanup(void) | |||
| 370 | { | 384 | { |
| 371 | dbg("unload_shpchpd()\n"); | 385 | dbg("unload_shpchpd()\n"); |
| 372 | pci_unregister_driver(&shpc_driver); | 386 | pci_unregister_driver(&shpc_driver); |
| 387 | destroy_workqueue(shpchp_ordered_wq); | ||
| 388 | destroy_workqueue(shpchp_wq); | ||
| 373 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); | 389 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); |
| 374 | } | 390 | } |
| 375 | 391 | ||
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index 3387fbfb0c54..b00b09bdd38a 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
| 33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
| 35 | #include <linux/workqueue.h> | ||
| 36 | #include "../pci.h" | 35 | #include "../pci.h" |
| 37 | #include "shpchp.h" | 36 | #include "shpchp.h" |
| 38 | 37 | ||
| @@ -52,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
| 52 | info->p_slot = p_slot; | 51 | info->p_slot = p_slot; |
| 53 | INIT_WORK(&info->work, interrupt_event_handler); | 52 | INIT_WORK(&info->work, interrupt_event_handler); |
| 54 | 53 | ||
| 55 | schedule_work(&info->work); | 54 | queue_work(shpchp_wq, &info->work); |
| 56 | 55 | ||
| 57 | return 0; | 56 | return 0; |
| 58 | } | 57 | } |
| @@ -457,7 +456,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) | |||
| 457 | kfree(info); | 456 | kfree(info); |
| 458 | goto out; | 457 | goto out; |
| 459 | } | 458 | } |
| 460 | queue_work(shpchp_wq, &info->work); | 459 | queue_work(shpchp_ordered_wq, &info->work); |
| 461 | out: | 460 | out: |
| 462 | mutex_unlock(&p_slot->lock); | 461 | mutex_unlock(&p_slot->lock); |
| 463 | } | 462 | } |
| @@ -505,7 +504,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
| 505 | p_slot->hpc_ops->green_led_blink(p_slot); | 504 | p_slot->hpc_ops->green_led_blink(p_slot); |
| 506 | p_slot->hpc_ops->set_attention_status(p_slot, 0); | 505 | p_slot->hpc_ops->set_attention_status(p_slot, 0); |
| 507 | 506 | ||
| 508 | schedule_delayed_work(&p_slot->work, 5*HZ); | 507 | queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); |
| 509 | break; | 508 | break; |
| 510 | case BLINKINGOFF_STATE: | 509 | case BLINKINGOFF_STATE: |
| 511 | case BLINKINGON_STATE: | 510 | case BLINKINGON_STATE: |
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index d3985e7deab7..36547f0ce305 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c | |||
| @@ -179,8 +179,6 @@ | |||
| 179 | #define SLOT_EVENT_LATCH 0x2 | 179 | #define SLOT_EVENT_LATCH 0x2 |
| 180 | #define SLOT_SERR_INT_MASK 0x3 | 180 | #define SLOT_SERR_INT_MASK 0x3 |
| 181 | 181 | ||
| 182 | static atomic_t shpchp_num_controllers = ATOMIC_INIT(0); | ||
| 183 | |||
| 184 | static irqreturn_t shpc_isr(int irq, void *dev_id); | 182 | static irqreturn_t shpc_isr(int irq, void *dev_id); |
| 185 | static void start_int_poll_timer(struct controller *ctrl, int sec); | 183 | static void start_int_poll_timer(struct controller *ctrl, int sec); |
| 186 | static int hpc_check_cmd_status(struct controller *ctrl); | 184 | static int hpc_check_cmd_status(struct controller *ctrl); |
| @@ -614,13 +612,6 @@ static void hpc_release_ctlr(struct controller *ctrl) | |||
| 614 | 612 | ||
| 615 | iounmap(ctrl->creg); | 613 | iounmap(ctrl->creg); |
| 616 | release_mem_region(ctrl->mmio_base, ctrl->mmio_size); | 614 | release_mem_region(ctrl->mmio_base, ctrl->mmio_size); |
| 617 | |||
| 618 | /* | ||
| 619 | * If this is the last controller to be released, destroy the | ||
| 620 | * shpchpd work queue | ||
| 621 | */ | ||
| 622 | if (atomic_dec_and_test(&shpchp_num_controllers)) | ||
| 623 | destroy_workqueue(shpchp_wq); | ||
| 624 | } | 615 | } |
| 625 | 616 | ||
| 626 | static int hpc_power_on_slot(struct slot * slot) | 617 | static int hpc_power_on_slot(struct slot * slot) |
| @@ -1077,9 +1068,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
| 1077 | 1068 | ||
| 1078 | rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, | 1069 | rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, |
| 1079 | MY_NAME, (void *)ctrl); | 1070 | MY_NAME, (void *)ctrl); |
| 1080 | ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n", | 1071 | ctrl_dbg(ctrl, "request_irq %d (returns %d)\n", |
| 1081 | ctrl->pci_dev->irq, | 1072 | ctrl->pci_dev->irq, rc); |
| 1082 | atomic_read(&shpchp_num_controllers), rc); | ||
| 1083 | if (rc) { | 1073 | if (rc) { |
| 1084 | ctrl_err(ctrl, "Can't get irq %d for the hotplug " | 1074 | ctrl_err(ctrl, "Can't get irq %d for the hotplug " |
| 1085 | "controller\n", ctrl->pci_dev->irq); | 1075 | "controller\n", ctrl->pci_dev->irq); |
| @@ -1092,18 +1082,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
| 1092 | shpc_get_cur_bus_speed(ctrl); | 1082 | shpc_get_cur_bus_speed(ctrl); |
| 1093 | 1083 | ||
| 1094 | /* | 1084 | /* |
| 1095 | * If this is the first controller to be initialized, | ||
| 1096 | * initialize the shpchpd work queue | ||
| 1097 | */ | ||
| 1098 | if (atomic_add_return(1, &shpchp_num_controllers) == 1) { | ||
| 1099 | shpchp_wq = create_singlethread_workqueue("shpchpd"); | ||
| 1100 | if (!shpchp_wq) { | ||
| 1101 | rc = -ENOMEM; | ||
| 1102 | goto abort_iounmap; | ||
| 1103 | } | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | /* | ||
| 1107 | * Unmask all event interrupts of all slots | 1085 | * Unmask all event interrupts of all slots |
| 1108 | */ | 1086 | */ |
| 1109 | for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { | 1087 | for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 98abf8b91294..834842aa5bbf 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
| @@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | |||
| 57 | *msg = cfg->msg; | 57 | *msg = cfg->msg; |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | void mask_ht_irq(unsigned int irq) | 60 | void mask_ht_irq(struct irq_data *data) |
| 61 | { | 61 | { |
| 62 | struct ht_irq_cfg *cfg; | 62 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); |
| 63 | struct ht_irq_msg msg; | 63 | struct ht_irq_msg msg = cfg->msg; |
| 64 | |||
| 65 | cfg = get_irq_data(irq); | ||
| 66 | 64 | ||
| 67 | msg = cfg->msg; | ||
| 68 | msg.address_lo |= 1; | 65 | msg.address_lo |= 1; |
| 69 | write_ht_irq_msg(irq, &msg); | 66 | write_ht_irq_msg(data->irq, &msg); |
| 70 | } | 67 | } |
| 71 | 68 | ||
| 72 | void unmask_ht_irq(unsigned int irq) | 69 | void unmask_ht_irq(struct irq_data *data) |
| 73 | { | 70 | { |
| 74 | struct ht_irq_cfg *cfg; | 71 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); |
| 75 | struct ht_irq_msg msg; | 72 | struct ht_irq_msg msg = cfg->msg; |
| 76 | |||
| 77 | cfg = get_irq_data(irq); | ||
| 78 | 73 | ||
| 79 | msg = cfg->msg; | ||
| 80 | msg.address_lo &= ~1; | 74 | msg.address_lo &= ~1; |
| 81 | write_ht_irq_msg(irq, &msg); | 75 | write_ht_irq_msg(data->irq, &msg); |
| 82 | } | 76 | } |
| 83 | 77 | ||
| 84 | /** | 78 | /** |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index fd1d2867cdcc..ec87cd66f3eb 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
| @@ -46,109 +46,24 @@ static __init int setup_intremap(char *str) | |||
| 46 | } | 46 | } |
| 47 | early_param("intremap", setup_intremap); | 47 | early_param("intremap", setup_intremap); |
| 48 | 48 | ||
| 49 | struct irq_2_iommu { | ||
| 50 | struct intel_iommu *iommu; | ||
| 51 | u16 irte_index; | ||
| 52 | u16 sub_handle; | ||
| 53 | u8 irte_mask; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
| 57 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) | ||
| 58 | { | ||
| 59 | struct irq_2_iommu *iommu; | ||
| 60 | |||
| 61 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
| 62 | printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); | ||
| 63 | |||
| 64 | return iommu; | ||
| 65 | } | ||
| 66 | |||
| 67 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
| 68 | { | ||
| 69 | struct irq_desc *desc; | ||
| 70 | |||
| 71 | desc = irq_to_desc(irq); | ||
| 72 | |||
| 73 | if (WARN_ON_ONCE(!desc)) | ||
| 74 | return NULL; | ||
| 75 | |||
| 76 | return desc->irq_2_iommu; | ||
| 77 | } | ||
| 78 | |||
| 79 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
| 80 | { | ||
| 81 | struct irq_desc *desc; | ||
| 82 | struct irq_2_iommu *irq_iommu; | ||
| 83 | |||
| 84 | desc = irq_to_desc(irq); | ||
| 85 | if (!desc) { | ||
| 86 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
| 87 | return NULL; | ||
| 88 | } | ||
| 89 | |||
| 90 | irq_iommu = desc->irq_2_iommu; | ||
| 91 | |||
| 92 | if (!irq_iommu) | ||
| 93 | desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); | ||
| 94 | |||
| 95 | return desc->irq_2_iommu; | ||
| 96 | } | ||
| 97 | |||
| 98 | #else /* !CONFIG_SPARSE_IRQ */ | ||
| 99 | |||
| 100 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
| 101 | |||
| 102 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
| 103 | { | ||
| 104 | if (irq < nr_irqs) | ||
| 105 | return &irq_2_iommuX[irq]; | ||
| 106 | |||
| 107 | return NULL; | ||
| 108 | } | ||
| 109 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
| 110 | { | ||
| 111 | return irq_2_iommu(irq); | ||
| 112 | } | ||
| 113 | #endif | ||
| 114 | |||
| 115 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 49 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
| 116 | 50 | ||
| 117 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) | 51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
| 118 | { | ||
| 119 | struct irq_2_iommu *irq_iommu; | ||
| 120 | |||
| 121 | irq_iommu = irq_2_iommu(irq); | ||
| 122 | |||
| 123 | if (!irq_iommu) | ||
| 124 | return NULL; | ||
| 125 | |||
| 126 | if (!irq_iommu->iommu) | ||
| 127 | return NULL; | ||
| 128 | |||
| 129 | return irq_iommu; | ||
| 130 | } | ||
| 131 | |||
| 132 | int irq_remapped(int irq) | ||
| 133 | { | 52 | { |
| 134 | return valid_irq_2_iommu(irq) != NULL; | 53 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
| 54 | return cfg ? &cfg->irq_2_iommu : NULL; | ||
| 135 | } | 55 | } |
| 136 | 56 | ||
| 137 | int get_irte(int irq, struct irte *entry) | 57 | int get_irte(int irq, struct irte *entry) |
| 138 | { | 58 | { |
| 139 | int index; | 59 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 140 | struct irq_2_iommu *irq_iommu; | ||
| 141 | unsigned long flags; | 60 | unsigned long flags; |
| 61 | int index; | ||
| 142 | 62 | ||
| 143 | if (!entry) | 63 | if (!entry || !irq_iommu) |
| 144 | return -1; | 64 | return -1; |
| 145 | 65 | ||
| 146 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 66 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 147 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 148 | if (!irq_iommu) { | ||
| 149 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 150 | return -1; | ||
| 151 | } | ||
| 152 | 67 | ||
| 153 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 68 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
| 154 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 69 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
| @@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry) | |||
| 160 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 75 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
| 161 | { | 76 | { |
| 162 | struct ir_table *table = iommu->ir_table; | 77 | struct ir_table *table = iommu->ir_table; |
| 163 | struct irq_2_iommu *irq_iommu; | 78 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 164 | u16 index, start_index; | 79 | u16 index, start_index; |
| 165 | unsigned int mask = 0; | 80 | unsigned int mask = 0; |
| 166 | unsigned long flags; | 81 | unsigned long flags; |
| 167 | int i; | 82 | int i; |
| 168 | 83 | ||
| 169 | if (!count) | 84 | if (!count || !irq_iommu) |
| 170 | return -1; | ||
| 171 | |||
| 172 | #ifndef CONFIG_SPARSE_IRQ | ||
| 173 | /* protect irq_2_iommu_alloc later */ | ||
| 174 | if (irq >= nr_irqs) | ||
| 175 | return -1; | 85 | return -1; |
| 176 | #endif | ||
| 177 | 86 | ||
| 178 | /* | 87 | /* |
| 179 | * start the IRTE search from index 0. | 88 | * start the IRTE search from index 0. |
| @@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
| 214 | for (i = index; i < index + count; i++) | 123 | for (i = index; i < index + count; i++) |
| 215 | table->base[i].present = 1; | 124 | table->base[i].present = 1; |
| 216 | 125 | ||
| 217 | irq_iommu = irq_2_iommu_alloc(irq); | ||
| 218 | if (!irq_iommu) { | ||
| 219 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 220 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
| 221 | return -1; | ||
| 222 | } | ||
| 223 | |||
| 224 | irq_iommu->iommu = iommu; | 126 | irq_iommu->iommu = iommu; |
| 225 | irq_iommu->irte_index = index; | 127 | irq_iommu->irte_index = index; |
| 226 | irq_iommu->sub_handle = 0; | 128 | irq_iommu->sub_handle = 0; |
| @@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
| 244 | 146 | ||
| 245 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 147 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
| 246 | { | 148 | { |
| 247 | int index; | 149 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 248 | struct irq_2_iommu *irq_iommu; | ||
| 249 | unsigned long flags; | 150 | unsigned long flags; |
| 151 | int index; | ||
| 250 | 152 | ||
| 251 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 153 | if (!irq_iommu) |
| 252 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 253 | if (!irq_iommu) { | ||
| 254 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 255 | return -1; | 154 | return -1; |
| 256 | } | ||
| 257 | 155 | ||
| 156 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 258 | *sub_handle = irq_iommu->sub_handle; | 157 | *sub_handle = irq_iommu->sub_handle; |
| 259 | index = irq_iommu->irte_index; | 158 | index = irq_iommu->irte_index; |
| 260 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 159 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| @@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
| 263 | 162 | ||
| 264 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 163 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
| 265 | { | 164 | { |
| 266 | struct irq_2_iommu *irq_iommu; | 165 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 267 | unsigned long flags; | 166 | unsigned long flags; |
| 268 | 167 | ||
| 269 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 168 | if (!irq_iommu) |
| 270 | |||
| 271 | irq_iommu = irq_2_iommu_alloc(irq); | ||
| 272 | |||
| 273 | if (!irq_iommu) { | ||
| 274 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 275 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
| 276 | return -1; | 169 | return -1; |
| 277 | } | 170 | |
| 171 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 278 | 172 | ||
| 279 | irq_iommu->iommu = iommu; | 173 | irq_iommu->iommu = iommu; |
| 280 | irq_iommu->irte_index = index; | 174 | irq_iommu->irte_index = index; |
| @@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
| 286 | return 0; | 180 | return 0; |
| 287 | } | 181 | } |
| 288 | 182 | ||
| 289 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | ||
| 290 | { | ||
| 291 | struct irq_2_iommu *irq_iommu; | ||
| 292 | unsigned long flags; | ||
| 293 | |||
| 294 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 295 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 296 | if (!irq_iommu) { | ||
| 297 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 298 | return -1; | ||
| 299 | } | ||
| 300 | |||
| 301 | irq_iommu->iommu = NULL; | ||
| 302 | irq_iommu->irte_index = 0; | ||
| 303 | irq_iommu->sub_handle = 0; | ||
| 304 | irq_2_iommu(irq)->irte_mask = 0; | ||
| 305 | |||
| 306 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | int modify_irte(int irq, struct irte *irte_modified) | 183 | int modify_irte(int irq, struct irte *irte_modified) |
| 312 | { | 184 | { |
| 313 | int rc; | 185 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 314 | int index; | ||
| 315 | struct irte *irte; | ||
| 316 | struct intel_iommu *iommu; | 186 | struct intel_iommu *iommu; |
| 317 | struct irq_2_iommu *irq_iommu; | ||
| 318 | unsigned long flags; | 187 | unsigned long flags; |
| 188 | struct irte *irte; | ||
| 189 | int rc, index; | ||
| 319 | 190 | ||
| 320 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 191 | if (!irq_iommu) |
| 321 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 322 | if (!irq_iommu) { | ||
| 323 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 324 | return -1; | 192 | return -1; |
| 325 | } | 193 | |
| 194 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 326 | 195 | ||
| 327 | iommu = irq_iommu->iommu; | 196 | iommu = irq_iommu->iommu; |
| 328 | 197 | ||
| @@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
| 339 | return rc; | 208 | return rc; |
| 340 | } | 209 | } |
| 341 | 210 | ||
| 342 | int flush_irte(int irq) | ||
| 343 | { | ||
| 344 | int rc; | ||
| 345 | int index; | ||
| 346 | struct intel_iommu *iommu; | ||
| 347 | struct irq_2_iommu *irq_iommu; | ||
| 348 | unsigned long flags; | ||
| 349 | |||
| 350 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 351 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 352 | if (!irq_iommu) { | ||
| 353 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 354 | return -1; | ||
| 355 | } | ||
| 356 | |||
| 357 | iommu = irq_iommu->iommu; | ||
| 358 | |||
| 359 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
| 360 | |||
| 361 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
| 362 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 363 | |||
| 364 | return rc; | ||
| 365 | } | ||
| 366 | |||
| 367 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | 211 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
| 368 | { | 212 | { |
| 369 | int i; | 213 | int i; |
| @@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) | |||
| 420 | 264 | ||
| 421 | int free_irte(int irq) | 265 | int free_irte(int irq) |
| 422 | { | 266 | { |
| 423 | int rc = 0; | 267 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 424 | struct irq_2_iommu *irq_iommu; | ||
| 425 | unsigned long flags; | 268 | unsigned long flags; |
| 269 | int rc; | ||
| 426 | 270 | ||
| 427 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 271 | if (!irq_iommu) |
| 428 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 429 | if (!irq_iommu) { | ||
| 430 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 431 | return -1; | 272 | return -1; |
| 432 | } | 273 | |
| 274 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 433 | 275 | ||
| 434 | rc = clear_entries(irq_iommu); | 276 | rc = clear_entries(irq_iommu); |
| 435 | 277 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 69b7be33b3a2..7c24dcef2989 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -35,7 +35,12 @@ int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) | |||
| 35 | #endif | 35 | #endif |
| 36 | 36 | ||
| 37 | #ifndef arch_setup_msi_irqs | 37 | #ifndef arch_setup_msi_irqs |
| 38 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 38 | # define arch_setup_msi_irqs default_setup_msi_irqs |
| 39 | # define HAVE_DEFAULT_MSI_SETUP_IRQS | ||
| 40 | #endif | ||
| 41 | |||
| 42 | #ifdef HAVE_DEFAULT_MSI_SETUP_IRQS | ||
| 43 | int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
| 39 | { | 44 | { |
| 40 | struct msi_desc *entry; | 45 | struct msi_desc *entry; |
| 41 | int ret; | 46 | int ret; |
| @@ -60,7 +65,12 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
| 60 | #endif | 65 | #endif |
| 61 | 66 | ||
| 62 | #ifndef arch_teardown_msi_irqs | 67 | #ifndef arch_teardown_msi_irqs |
| 63 | void arch_teardown_msi_irqs(struct pci_dev *dev) | 68 | # define arch_teardown_msi_irqs default_teardown_msi_irqs |
| 69 | # define HAVE_DEFAULT_MSI_TEARDOWN_IRQS | ||
| 70 | #endif | ||
| 71 | |||
| 72 | #ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS | ||
| 73 | void default_teardown_msi_irqs(struct pci_dev *dev) | ||
| 64 | { | 74 | { |
| 65 | struct msi_desc *entry; | 75 | struct msi_desc *entry; |
| 66 | 76 | ||
| @@ -170,33 +180,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) | |||
| 170 | desc->masked = __msix_mask_irq(desc, flag); | 180 | desc->masked = __msix_mask_irq(desc, flag); |
| 171 | } | 181 | } |
| 172 | 182 | ||
| 173 | static void msi_set_mask_bit(unsigned irq, u32 flag) | 183 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) |
| 174 | { | 184 | { |
| 175 | struct msi_desc *desc = get_irq_msi(irq); | 185 | struct msi_desc *desc = irq_data_get_msi(data); |
| 176 | 186 | ||
| 177 | if (desc->msi_attrib.is_msix) { | 187 | if (desc->msi_attrib.is_msix) { |
| 178 | msix_mask_irq(desc, flag); | 188 | msix_mask_irq(desc, flag); |
| 179 | readl(desc->mask_base); /* Flush write to device */ | 189 | readl(desc->mask_base); /* Flush write to device */ |
| 180 | } else { | 190 | } else { |
| 181 | unsigned offset = irq - desc->dev->irq; | 191 | unsigned offset = data->irq - desc->dev->irq; |
| 182 | msi_mask_irq(desc, 1 << offset, flag << offset); | 192 | msi_mask_irq(desc, 1 << offset, flag << offset); |
| 183 | } | 193 | } |
| 184 | } | 194 | } |
| 185 | 195 | ||
| 186 | void mask_msi_irq(unsigned int irq) | 196 | void mask_msi_irq(struct irq_data *data) |
| 187 | { | 197 | { |
| 188 | msi_set_mask_bit(irq, 1); | 198 | msi_set_mask_bit(data, 1); |
| 189 | } | 199 | } |
| 190 | 200 | ||
| 191 | void unmask_msi_irq(unsigned int irq) | 201 | void unmask_msi_irq(struct irq_data *data) |
| 192 | { | 202 | { |
| 193 | msi_set_mask_bit(irq, 0); | 203 | msi_set_mask_bit(data, 0); |
| 194 | } | 204 | } |
| 195 | 205 | ||
| 196 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 206 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
| 197 | { | 207 | { |
| 198 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
| 199 | |||
| 200 | BUG_ON(entry->dev->current_state != PCI_D0); | 208 | BUG_ON(entry->dev->current_state != PCI_D0); |
| 201 | 209 | ||
| 202 | if (entry->msi_attrib.is_msix) { | 210 | if (entry->msi_attrib.is_msix) { |
| @@ -227,15 +235,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 227 | 235 | ||
| 228 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 236 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 229 | { | 237 | { |
| 230 | struct irq_desc *desc = irq_to_desc(irq); | 238 | struct msi_desc *entry = get_irq_msi(irq); |
| 231 | 239 | ||
| 232 | read_msi_msg_desc(desc, msg); | 240 | __read_msi_msg(entry, msg); |
| 233 | } | 241 | } |
| 234 | 242 | ||
| 235 | void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 243 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
| 236 | { | 244 | { |
| 237 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
| 238 | |||
| 239 | /* Assert that the cache is valid, assuming that | 245 | /* Assert that the cache is valid, assuming that |
| 240 | * valid messages are not all-zeroes. */ | 246 | * valid messages are not all-zeroes. */ |
| 241 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | | 247 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | |
| @@ -246,15 +252,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 246 | 252 | ||
| 247 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | 253 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 248 | { | 254 | { |
| 249 | struct irq_desc *desc = irq_to_desc(irq); | 255 | struct msi_desc *entry = get_irq_msi(irq); |
| 250 | 256 | ||
| 251 | get_cached_msi_msg_desc(desc, msg); | 257 | __get_cached_msi_msg(entry, msg); |
| 252 | } | 258 | } |
| 253 | 259 | ||
| 254 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 260 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
| 255 | { | 261 | { |
| 256 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
| 257 | |||
| 258 | if (entry->dev->current_state != PCI_D0) { | 262 | if (entry->dev->current_state != PCI_D0) { |
| 259 | /* Don't touch the hardware now */ | 263 | /* Don't touch the hardware now */ |
| 260 | } else if (entry->msi_attrib.is_msix) { | 264 | } else if (entry->msi_attrib.is_msix) { |
| @@ -292,9 +296,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 292 | 296 | ||
| 293 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 297 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 294 | { | 298 | { |
| 295 | struct irq_desc *desc = irq_to_desc(irq); | 299 | struct msi_desc *entry = get_irq_msi(irq); |
| 296 | 300 | ||
| 297 | write_msi_msg_desc(desc, msg); | 301 | __write_msi_msg(entry, msg); |
| 298 | } | 302 | } |
| 299 | 303 | ||
| 300 | static void free_msi_irqs(struct pci_dev *dev) | 304 | static void free_msi_irqs(struct pci_dev *dev) |
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index de27c1cb5a2b..feff3bee6fe5 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h | |||
| @@ -22,8 +22,8 @@ | |||
| 22 | #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) | 22 | #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) |
| 23 | #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) | 23 | #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) |
| 24 | 24 | ||
| 25 | #define msix_table_offset_reg(base) (base + 0x04) | 25 | #define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE) |
| 26 | #define msix_pba_offset_reg(base) (base + 0x08) | 26 | #define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA) |
| 27 | #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) | 27 | #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) |
| 28 | #define multi_msix_capable(control) msix_table_size((control)) | 28 | #define multi_msix_capable(control) msix_table_size((control)) |
| 29 | 29 | ||
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index b5a7d9bfcb24..63d5042f2079 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b) | |||
| 705 | 705 | ||
| 706 | #ifdef HAVE_PCI_MMAP | 706 | #ifdef HAVE_PCI_MMAP |
| 707 | 707 | ||
| 708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) | 708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, |
| 709 | enum pci_mmap_api mmap_api) | ||
| 709 | { | 710 | { |
| 710 | unsigned long nr, start, size; | 711 | unsigned long nr, start, size, pci_start; |
| 711 | 712 | ||
| 713 | if (pci_resource_len(pdev, resno) == 0) | ||
| 714 | return 0; | ||
| 712 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 715 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 713 | start = vma->vm_pgoff; | 716 | start = vma->vm_pgoff; |
| 714 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; | 717 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; |
| 715 | if (start < size && size - start >= nr) | 718 | pci_start = (mmap_api == PCI_MMAP_PROCFS) ? |
| 719 | pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; | ||
| 720 | if (start >= pci_start && start < pci_start + size && | ||
| 721 | start + nr <= pci_start + size) | ||
| 716 | return 1; | 722 | return 1; |
| 717 | WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", | ||
| 718 | current->comm, start, start+nr, pci_name(pdev), resno, size); | ||
| 719 | return 0; | 723 | return 0; |
| 720 | } | 724 | } |
| 721 | 725 | ||
| @@ -745,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
| 745 | if (i >= PCI_ROM_RESOURCE) | 749 | if (i >= PCI_ROM_RESOURCE) |
| 746 | return -ENODEV; | 750 | return -ENODEV; |
| 747 | 751 | ||
| 748 | if (!pci_mmap_fits(pdev, i, vma)) | 752 | if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { |
| 753 | WARN(1, "process \"%s\" tried to map 0x%08lx bytes " | ||
| 754 | "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", | ||
| 755 | current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, | ||
| 756 | pci_name(pdev), i, | ||
| 757 | (u64)pci_resource_start(pdev, i), | ||
| 758 | (u64)pci_resource_len(pdev, i)); | ||
| 749 | return -EINVAL; | 759 | return -EINVAL; |
| 760 | } | ||
| 750 | 761 | ||
| 751 | /* pci_mmap_page_range() expects the same kind of entry as coming | 762 | /* pci_mmap_page_range() expects the same kind of entry as coming |
| 752 | * from /proc/bus/pci/ which is a "user visible" value. If this is | 763 | * from /proc/bus/pci/ which is a "user visible" value. If this is |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7fa3cbd742c5..710c8a29be0d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -38,6 +38,19 @@ EXPORT_SYMBOL(pci_pci_problems); | |||
| 38 | 38 | ||
| 39 | unsigned int pci_pm_d3_delay; | 39 | unsigned int pci_pm_d3_delay; |
| 40 | 40 | ||
| 41 | static void pci_pme_list_scan(struct work_struct *work); | ||
| 42 | |||
| 43 | static LIST_HEAD(pci_pme_list); | ||
| 44 | static DEFINE_MUTEX(pci_pme_list_mutex); | ||
| 45 | static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); | ||
| 46 | |||
| 47 | struct pci_pme_device { | ||
| 48 | struct list_head list; | ||
| 49 | struct pci_dev *dev; | ||
| 50 | }; | ||
| 51 | |||
| 52 | #define PME_TIMEOUT 1000 /* How long between PME checks */ | ||
| 53 | |||
| 41 | static void pci_dev_d3_sleep(struct pci_dev *dev) | 54 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
| 42 | { | 55 | { |
| 43 | unsigned int delay = dev->d3_delay; | 56 | unsigned int delay = dev->d3_delay; |
| @@ -994,6 +1007,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev, | |||
| 994 | int err; | 1007 | int err; |
| 995 | int i, bars = 0; | 1008 | int i, bars = 0; |
| 996 | 1009 | ||
| 1010 | /* | ||
| 1011 | * Power state could be unknown at this point, either due to a fresh | ||
| 1012 | * boot or a device removal call. So get the current power state | ||
| 1013 | * so that things like MSI message writing will behave as expected | ||
| 1014 | * (e.g. if the device really is in D0 at enable time). | ||
| 1015 | */ | ||
| 1016 | if (dev->pm_cap) { | ||
| 1017 | u16 pmcsr; | ||
| 1018 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
| 1019 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | ||
| 1020 | } | ||
| 1021 | |||
| 997 | if (atomic_add_return(1, &dev->enable_cnt) > 1) | 1022 | if (atomic_add_return(1, &dev->enable_cnt) > 1) |
| 998 | return 0; /* already enabled */ | 1023 | return 0; /* already enabled */ |
| 999 | 1024 | ||
| @@ -1331,6 +1356,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) | |||
| 1331 | return !!(dev->pme_support & (1 << state)); | 1356 | return !!(dev->pme_support & (1 << state)); |
| 1332 | } | 1357 | } |
| 1333 | 1358 | ||
| 1359 | static void pci_pme_list_scan(struct work_struct *work) | ||
| 1360 | { | ||
| 1361 | struct pci_pme_device *pme_dev; | ||
| 1362 | |||
| 1363 | mutex_lock(&pci_pme_list_mutex); | ||
| 1364 | if (!list_empty(&pci_pme_list)) { | ||
| 1365 | list_for_each_entry(pme_dev, &pci_pme_list, list) | ||
| 1366 | pci_pme_wakeup(pme_dev->dev, NULL); | ||
| 1367 | schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); | ||
| 1368 | } | ||
| 1369 | mutex_unlock(&pci_pme_list_mutex); | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | /** | ||
| 1373 | * pci_external_pme - is a device an external PCI PME source? | ||
| 1374 | * @dev: PCI device to check | ||
| 1375 | * | ||
| 1376 | */ | ||
| 1377 | |||
| 1378 | static bool pci_external_pme(struct pci_dev *dev) | ||
| 1379 | { | ||
| 1380 | if (pci_is_pcie(dev) || dev->bus->number == 0) | ||
| 1381 | return false; | ||
| 1382 | return true; | ||
| 1383 | } | ||
| 1384 | |||
| 1334 | /** | 1385 | /** |
| 1335 | * pci_pme_active - enable or disable PCI device's PME# function | 1386 | * pci_pme_active - enable or disable PCI device's PME# function |
| 1336 | * @dev: PCI device to handle. | 1387 | * @dev: PCI device to handle. |
| @@ -1354,6 +1405,44 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
| 1354 | 1405 | ||
| 1355 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 1406 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| 1356 | 1407 | ||
| 1408 | /* PCI (as opposed to PCIe) PME requires that the device have | ||
| 1409 | its PME# line hooked up correctly. Not all hardware vendors | ||
| 1410 | do this, so the PME never gets delivered and the device | ||
| 1411 | remains asleep. The easiest way around this is to | ||
| 1412 | periodically walk the list of suspended devices and check | ||
| 1413 | whether any have their PME flag set. The assumption is that | ||
| 1414 | we'll wake up often enough anyway that this won't be a huge | ||
| 1415 | hit, and the power savings from the devices will still be a | ||
| 1416 | win. */ | ||
| 1417 | |||
| 1418 | if (pci_external_pme(dev)) { | ||
| 1419 | struct pci_pme_device *pme_dev; | ||
| 1420 | if (enable) { | ||
| 1421 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | ||
| 1422 | GFP_KERNEL); | ||
| 1423 | if (!pme_dev) | ||
| 1424 | goto out; | ||
| 1425 | pme_dev->dev = dev; | ||
| 1426 | mutex_lock(&pci_pme_list_mutex); | ||
| 1427 | list_add(&pme_dev->list, &pci_pme_list); | ||
| 1428 | if (list_is_singular(&pci_pme_list)) | ||
| 1429 | schedule_delayed_work(&pci_pme_work, | ||
| 1430 | msecs_to_jiffies(PME_TIMEOUT)); | ||
| 1431 | mutex_unlock(&pci_pme_list_mutex); | ||
| 1432 | } else { | ||
| 1433 | mutex_lock(&pci_pme_list_mutex); | ||
| 1434 | list_for_each_entry(pme_dev, &pci_pme_list, list) { | ||
| 1435 | if (pme_dev->dev == dev) { | ||
| 1436 | list_del(&pme_dev->list); | ||
| 1437 | kfree(pme_dev); | ||
| 1438 | break; | ||
| 1439 | } | ||
| 1440 | } | ||
| 1441 | mutex_unlock(&pci_pme_list_mutex); | ||
| 1442 | } | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | out: | ||
| 1357 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", | 1446 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", |
| 1358 | enable ? "enabled" : "disabled"); | 1447 | enable ? "enabled" : "disabled"); |
| 1359 | } | 1448 | } |
| @@ -2689,7 +2778,7 @@ int pcie_get_readrq(struct pci_dev *dev) | |||
| 2689 | 2778 | ||
| 2690 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | 2779 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); |
| 2691 | if (!ret) | 2780 | if (!ret) |
| 2692 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); | 2781 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
| 2693 | 2782 | ||
| 2694 | return ret; | 2783 | return ret; |
| 2695 | } | 2784 | } |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6beb11b617a9..7d33f6673868 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev); | |||
| 22 | #endif | 22 | #endif |
| 23 | extern void pci_cleanup_rom(struct pci_dev *dev); | 23 | extern void pci_cleanup_rom(struct pci_dev *dev); |
| 24 | #ifdef HAVE_PCI_MMAP | 24 | #ifdef HAVE_PCI_MMAP |
| 25 | enum pci_mmap_api { | ||
| 26 | PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ | ||
| 27 | PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ | ||
| 28 | }; | ||
| 25 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | 29 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, |
| 26 | struct vm_area_struct *vma); | 30 | struct vm_area_struct *vmai, |
| 31 | enum pci_mmap_api mmap_api); | ||
| 27 | #endif | 32 | #endif |
| 28 | int pci_probe_reset_function(struct pci_dev *dev); | 33 | int pci_probe_reset_function(struct pci_dev *dev); |
| 29 | 34 | ||
| @@ -63,11 +68,8 @@ struct pci_platform_pm_ops { | |||
| 63 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); | 68 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); |
| 64 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); | 69 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); |
| 65 | extern void pci_disable_enabled_device(struct pci_dev *dev); | 70 | extern void pci_disable_enabled_device(struct pci_dev *dev); |
| 66 | extern bool pci_check_pme_status(struct pci_dev *dev); | ||
| 67 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); | 71 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); |
| 68 | extern void pci_wakeup_event(struct pci_dev *dev); | ||
| 69 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | 72 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); |
| 70 | extern void pci_pme_wakeup_bus(struct pci_bus *bus); | ||
| 71 | extern void pci_pm_init(struct pci_dev *dev); | 73 | extern void pci_pm_init(struct pci_dev *dev); |
| 72 | extern void platform_pci_wakeup_init(struct pci_dev *dev); | 74 | extern void platform_pci_wakeup_init(struct pci_dev *dev); |
| 73 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); | 75 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); |
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 909924692b8a..b3cf6223f63a 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
| @@ -472,6 +472,7 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, | |||
| 472 | static const struct file_operations aer_inject_fops = { | 472 | static const struct file_operations aer_inject_fops = { |
| 473 | .write = aer_inject_write, | 473 | .write = aer_inject_write, |
| 474 | .owner = THIS_MODULE, | 474 | .owner = THIS_MODULE, |
| 475 | .llseek = noop_llseek, | ||
| 475 | }; | 476 | }; |
| 476 | 477 | ||
| 477 | static struct miscdevice aer_inject_device = { | 478 | static struct miscdevice aer_inject_device = { |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index f409948e1a9b..2b2b6508efde 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
| @@ -416,7 +416,7 @@ static void aer_error_resume(struct pci_dev *dev) | |||
| 416 | */ | 416 | */ |
| 417 | static int __init aer_service_init(void) | 417 | static int __init aer_service_init(void) |
| 418 | { | 418 | { |
| 419 | if (!pci_aer_available()) | 419 | if (!pci_aer_available() || aer_acpi_firmware_first()) |
| 420 | return -ENXIO; | 420 | return -ENXIO; |
| 421 | return pcie_port_service_register(&aerdriver); | 421 | return pcie_port_service_register(&aerdriver); |
| 422 | } | 422 | } |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 80c11d131499..9656e3060412 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
| @@ -132,6 +132,7 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) | |||
| 132 | 132 | ||
| 133 | #ifdef CONFIG_ACPI_APEI | 133 | #ifdef CONFIG_ACPI_APEI |
| 134 | extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); | 134 | extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); |
| 135 | extern bool aer_acpi_firmware_first(void); | ||
| 135 | #else | 136 | #else |
| 136 | static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) | 137 | static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) |
| 137 | { | 138 | { |
| @@ -139,6 +140,8 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) | |||
| 139 | return pci_dev->__aer_firmware_first; | 140 | return pci_dev->__aer_firmware_first; |
| 140 | return 0; | 141 | return 0; |
| 141 | } | 142 | } |
| 143 | |||
| 144 | static inline bool aer_acpi_firmware_first(void) { return false; } | ||
| 142 | #endif | 145 | #endif |
| 143 | 146 | ||
| 144 | static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, | 147 | static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 2bb9b8972211..275bf158ffa7 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
| @@ -93,4 +93,38 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev) | |||
| 93 | aer_set_firmware_first(dev); | 93 | aer_set_firmware_first(dev); |
| 94 | return dev->__aer_firmware_first; | 94 | return dev->__aer_firmware_first; |
| 95 | } | 95 | } |
| 96 | |||
| 97 | static bool aer_firmware_first; | ||
| 98 | |||
| 99 | static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data) | ||
| 100 | { | ||
| 101 | struct acpi_hest_aer_common *p; | ||
| 102 | |||
| 103 | if (aer_firmware_first) | ||
| 104 | return 0; | ||
| 105 | |||
| 106 | switch (hest_hdr->type) { | ||
| 107 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
| 108 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
| 109 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
| 110 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | ||
| 111 | aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
| 112 | default: | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | /** | ||
| 118 | * aer_acpi_firmware_first - Check if APEI should control AER. | ||
| 119 | */ | ||
| 120 | bool aer_acpi_firmware_first(void) | ||
| 121 | { | ||
| 122 | static bool parsed = false; | ||
| 123 | |||
| 124 | if (!parsed) { | ||
| 125 | apei_hest_parse(aer_hest_parse_aff, NULL); | ||
| 126 | parsed = true; | ||
| 127 | } | ||
| 128 | return aer_firmware_first; | ||
| 129 | } | ||
| 96 | #endif | 130 | #endif |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 29e268fadf14..43421fbe080a 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
| @@ -754,7 +754,7 @@ void aer_isr(struct work_struct *work) | |||
| 754 | { | 754 | { |
| 755 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); | 755 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); |
| 756 | struct pcie_device *p_device = rpc->rpd; | 756 | struct pcie_device *p_device = rpc->rpd; |
| 757 | struct aer_err_source e_src; | 757 | struct aer_err_source uninitialized_var(e_src); |
| 758 | 758 | ||
| 759 | mutex_lock(&rpc->rpc_mutex); | 759 | mutex_lock(&rpc->rpc_mutex); |
| 760 | while (get_e_source(rpc, &e_src)) | 760 | while (get_e_source(rpc, &e_src)) |
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c index b7c4cb1ccb23..5982b6a63b89 100644 --- a/drivers/pci/pcie/portdrv_acpi.c +++ b/drivers/pci/pcie/portdrv_acpi.c | |||
| @@ -49,7 +49,7 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) | |||
| 49 | | OSC_PCI_EXPRESS_PME_CONTROL; | 49 | | OSC_PCI_EXPRESS_PME_CONTROL; |
| 50 | 50 | ||
| 51 | if (pci_aer_available()) { | 51 | if (pci_aer_available()) { |
| 52 | if (pcie_aer_get_firmware_first(port)) | 52 | if (aer_acpi_firmware_first()) |
| 53 | dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n"); | 53 | dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n"); |
| 54 | else | 54 | else |
| 55 | flags |= OSC_PCI_EXPRESS_AER_CONTROL; | 55 | flags |= OSC_PCI_EXPRESS_AER_CONTROL; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 12625d90f8b5..c84900da3c59 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -961,8 +961,8 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 961 | dev->class = class; | 961 | dev->class = class; |
| 962 | class >>= 8; | 962 | class >>= 8; |
| 963 | 963 | ||
| 964 | dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", | 964 | dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n", |
| 965 | dev->vendor, dev->device, class, dev->hdr_type); | 965 | dev->vendor, dev->device, dev->hdr_type, class); |
| 966 | 966 | ||
| 967 | /* need to have dev->class ready */ | 967 | /* need to have dev->class ready */ |
| 968 | dev->cfg_size = pci_cfg_space_size(dev); | 968 | dev->cfg_size = pci_cfg_space_size(dev); |
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 01f0306525a5..27911b55c2a5 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
| 12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
| 13 | #include <linux/smp_lock.h> | ||
| 14 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
| 15 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
| 16 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
| @@ -212,8 +211,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
| 212 | #endif /* HAVE_PCI_MMAP */ | 211 | #endif /* HAVE_PCI_MMAP */ |
| 213 | int ret = 0; | 212 | int ret = 0; |
| 214 | 213 | ||
| 215 | lock_kernel(); | ||
| 216 | |||
| 217 | switch (cmd) { | 214 | switch (cmd) { |
| 218 | case PCIIOC_CONTROLLER: | 215 | case PCIIOC_CONTROLLER: |
| 219 | ret = pci_domain_nr(dev->bus); | 216 | ret = pci_domain_nr(dev->bus); |
| @@ -242,7 +239,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
| 242 | break; | 239 | break; |
| 243 | }; | 240 | }; |
| 244 | 241 | ||
| 245 | unlock_kernel(); | ||
| 246 | return ret; | 242 | return ret; |
| 247 | } | 243 | } |
| 248 | 244 | ||
| @@ -260,7 +256,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 260 | 256 | ||
| 261 | /* Make sure the caller is mapping a real resource for this device */ | 257 | /* Make sure the caller is mapping a real resource for this device */ |
| 262 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 258 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
| 263 | if (pci_mmap_fits(dev, i, vma)) | 259 | if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) |
| 264 | break; | 260 | break; |
| 265 | } | 261 | } |
| 266 | 262 | ||
| @@ -306,6 +302,7 @@ static const struct file_operations proc_bus_pci_operations = { | |||
| 306 | .read = proc_bus_pci_read, | 302 | .read = proc_bus_pci_read, |
| 307 | .write = proc_bus_pci_write, | 303 | .write = proc_bus_pci_write, |
| 308 | .unlocked_ioctl = proc_bus_pci_ioctl, | 304 | .unlocked_ioctl = proc_bus_pci_ioctl, |
| 305 | .compat_ioctl = proc_bus_pci_ioctl, | ||
| 309 | #ifdef HAVE_PCI_MMAP | 306 | #ifdef HAVE_PCI_MMAP |
| 310 | .open = proc_bus_pci_open, | 307 | .open = proc_bus_pci_open, |
| 311 | .release = proc_bus_pci_release, | 308 | .release = proc_bus_pci_release, |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 857ae01734a6..f5c63fe9db5c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -226,6 +226,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir | |||
| 226 | * VIA Apollo KT133 needs PCI latency patch | 226 | * VIA Apollo KT133 needs PCI latency patch |
| 227 | * Made according to a windows driver based patch by George E. Breese | 227 | * Made according to a windows driver based patch by George E. Breese |
| 228 | * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm | 228 | * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm |
| 229 | * and http://www.georgebreese.com/net/software/#PCI | ||
| 229 | * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for | 230 | * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for |
| 230 | * the info on which Mr Breese based his work. | 231 | * the info on which Mr Breese based his work. |
| 231 | * | 232 | * |
| @@ -1016,7 +1017,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge) | |||
| 1016 | /* | 1017 | /* |
| 1017 | * Common misconfiguration of the MediaGX/Geode PCI master that will | 1018 | * Common misconfiguration of the MediaGX/Geode PCI master that will |
| 1018 | * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 | 1019 | * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 |
| 1019 | * datasheets found at http://www.national.com/ds/GX for info on what | 1020 | * datasheets found at http://www.national.com/analog for info on what |
| 1020 | * these bits do. <christer@weinigel.se> | 1021 | * these bits do. <christer@weinigel.se> |
| 1021 | */ | 1022 | */ |
| 1022 | static void quirk_mediagx_master(struct pci_dev *dev) | 1023 | static void quirk_mediagx_master(struct pci_dev *dev) |
| @@ -2296,6 +2297,37 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, | |||
| 2296 | PCI_DEVICE_ID_NVIDIA_NVENET_15, | 2297 | PCI_DEVICE_ID_NVIDIA_NVENET_15, |
| 2297 | nvenet_msi_disable); | 2298 | nvenet_msi_disable); |
| 2298 | 2299 | ||
| 2300 | /* | ||
| 2301 | * Some versions of the MCP55 bridge from nvidia have a legacy irq routing | ||
| 2302 | * config register. This register controls the routing of legacy interrupts | ||
| 2303 | * from devices that route through the MCP55. If this register is misprogramed | ||
| 2304 | * interrupts are only sent to the bsp, unlike conventional systems where the | ||
| 2305 | * irq is broadxast to all online cpus. Not having this register set | ||
| 2306 | * properly prevents kdump from booting up properly, so lets make sure that | ||
| 2307 | * we have it set correctly. | ||
| 2308 | * Note this is an undocumented register. | ||
| 2309 | */ | ||
| 2310 | static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev) | ||
| 2311 | { | ||
| 2312 | u32 cfg; | ||
| 2313 | |||
| 2314 | pci_read_config_dword(dev, 0x74, &cfg); | ||
| 2315 | |||
| 2316 | if (cfg & ((1 << 2) | (1 << 15))) { | ||
| 2317 | printk(KERN_INFO "Rewriting irq routing register on MCP55\n"); | ||
| 2318 | cfg &= ~((1 << 2) | (1 << 15)); | ||
| 2319 | pci_write_config_dword(dev, 0x74, cfg); | ||
| 2320 | } | ||
| 2321 | } | ||
| 2322 | |||
| 2323 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, | ||
| 2324 | PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0, | ||
| 2325 | nvbridge_check_legacy_irq_routing); | ||
| 2326 | |||
| 2327 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, | ||
| 2328 | PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4, | ||
| 2329 | nvbridge_check_legacy_irq_routing); | ||
| 2330 | |||
| 2299 | static int __devinit ht_check_msi_mapping(struct pci_dev *dev) | 2331 | static int __devinit ht_check_msi_mapping(struct pci_dev *dev) |
| 2300 | { | 2332 | { |
| 2301 | int pos, ttl = 48; | 2333 | int pos, ttl = 48; |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 2aaa13150de3..bc0e6eea0fff 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
| @@ -85,7 +85,7 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
| 85 | } | 85 | } |
| 86 | } | 86 | } |
| 87 | res->flags &= ~IORESOURCE_UNSET; | 87 | res->flags &= ~IORESOURCE_UNSET; |
| 88 | dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n", | 88 | dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n", |
| 89 | resno, res, (unsigned long long)region.start, | 89 | resno, res, (unsigned long long)region.start, |
| 90 | (unsigned long long)region.end); | 90 | (unsigned long long)region.end); |
| 91 | } | 91 | } |
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c new file mode 100644 index 000000000000..3a5a6fcc0ead --- /dev/null +++ b/drivers/pci/xen-pcifront.c | |||
| @@ -0,0 +1,1148 @@ | |||
| 1 | /* | ||
| 2 | * Xen PCI Frontend. | ||
| 3 | * | ||
| 4 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | ||
| 5 | */ | ||
| 6 | #include <linux/module.h> | ||
| 7 | #include <linux/init.h> | ||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <xen/xenbus.h> | ||
| 10 | #include <xen/events.h> | ||
| 11 | #include <xen/grant_table.h> | ||
| 12 | #include <xen/page.h> | ||
| 13 | #include <linux/spinlock.h> | ||
| 14 | #include <linux/pci.h> | ||
| 15 | #include <linux/msi.h> | ||
| 16 | #include <xen/interface/io/pciif.h> | ||
| 17 | #include <asm/xen/pci.h> | ||
| 18 | #include <linux/interrupt.h> | ||
| 19 | #include <asm/atomic.h> | ||
| 20 | #include <linux/workqueue.h> | ||
| 21 | #include <linux/bitops.h> | ||
| 22 | #include <linux/time.h> | ||
| 23 | |||
| 24 | #define INVALID_GRANT_REF (0) | ||
| 25 | #define INVALID_EVTCHN (-1) | ||
| 26 | |||
| 27 | struct pci_bus_entry { | ||
| 28 | struct list_head list; | ||
| 29 | struct pci_bus *bus; | ||
| 30 | }; | ||
| 31 | |||
| 32 | #define _PDEVB_op_active (0) | ||
| 33 | #define PDEVB_op_active (1 << (_PDEVB_op_active)) | ||
| 34 | |||
| 35 | struct pcifront_device { | ||
| 36 | struct xenbus_device *xdev; | ||
| 37 | struct list_head root_buses; | ||
| 38 | |||
| 39 | int evtchn; | ||
| 40 | int gnt_ref; | ||
| 41 | |||
| 42 | int irq; | ||
| 43 | |||
| 44 | /* Lock this when doing any operations in sh_info */ | ||
| 45 | spinlock_t sh_info_lock; | ||
| 46 | struct xen_pci_sharedinfo *sh_info; | ||
| 47 | struct work_struct op_work; | ||
| 48 | unsigned long flags; | ||
| 49 | |||
| 50 | }; | ||
| 51 | |||
| 52 | struct pcifront_sd { | ||
| 53 | int domain; | ||
| 54 | struct pcifront_device *pdev; | ||
| 55 | }; | ||
| 56 | |||
| 57 | static inline struct pcifront_device * | ||
| 58 | pcifront_get_pdev(struct pcifront_sd *sd) | ||
| 59 | { | ||
| 60 | return sd->pdev; | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline void pcifront_init_sd(struct pcifront_sd *sd, | ||
| 64 | unsigned int domain, unsigned int bus, | ||
| 65 | struct pcifront_device *pdev) | ||
| 66 | { | ||
| 67 | sd->domain = domain; | ||
| 68 | sd->pdev = pdev; | ||
| 69 | } | ||
| 70 | |||
| 71 | static DEFINE_SPINLOCK(pcifront_dev_lock); | ||
| 72 | static struct pcifront_device *pcifront_dev; | ||
| 73 | |||
| 74 | static int verbose_request; | ||
| 75 | module_param(verbose_request, int, 0644); | ||
| 76 | |||
| 77 | static int errno_to_pcibios_err(int errno) | ||
| 78 | { | ||
| 79 | switch (errno) { | ||
| 80 | case XEN_PCI_ERR_success: | ||
| 81 | return PCIBIOS_SUCCESSFUL; | ||
| 82 | |||
| 83 | case XEN_PCI_ERR_dev_not_found: | ||
| 84 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 85 | |||
| 86 | case XEN_PCI_ERR_invalid_offset: | ||
| 87 | case XEN_PCI_ERR_op_failed: | ||
| 88 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 89 | |||
| 90 | case XEN_PCI_ERR_not_implemented: | ||
| 91 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
| 92 | |||
| 93 | case XEN_PCI_ERR_access_denied: | ||
| 94 | return PCIBIOS_SET_FAILED; | ||
| 95 | } | ||
| 96 | return errno; | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline void schedule_pcifront_aer_op(struct pcifront_device *pdev) | ||
| 100 | { | ||
| 101 | if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) | ||
| 102 | && !test_and_set_bit(_PDEVB_op_active, &pdev->flags)) { | ||
| 103 | dev_dbg(&pdev->xdev->dev, "schedule aer frontend job\n"); | ||
| 104 | schedule_work(&pdev->op_work); | ||
| 105 | } | ||
| 106 | } | ||
| 107 | |||
| 108 | static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op) | ||
| 109 | { | ||
| 110 | int err = 0; | ||
| 111 | struct xen_pci_op *active_op = &pdev->sh_info->op; | ||
| 112 | unsigned long irq_flags; | ||
| 113 | evtchn_port_t port = pdev->evtchn; | ||
| 114 | unsigned irq = pdev->irq; | ||
| 115 | s64 ns, ns_timeout; | ||
| 116 | struct timeval tv; | ||
| 117 | |||
| 118 | spin_lock_irqsave(&pdev->sh_info_lock, irq_flags); | ||
| 119 | |||
| 120 | memcpy(active_op, op, sizeof(struct xen_pci_op)); | ||
| 121 | |||
| 122 | /* Go */ | ||
| 123 | wmb(); | ||
| 124 | set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); | ||
| 125 | notify_remote_via_evtchn(port); | ||
| 126 | |||
| 127 | /* | ||
| 128 | * We set a poll timeout of 3 seconds but give up on return after | ||
| 129 | * 2 seconds. It is better to time out too late rather than too early | ||
| 130 | * (in the latter case we end up continually re-executing poll() with a | ||
| 131 | * timeout in the past). 1s difference gives plenty of slack for error. | ||
| 132 | */ | ||
| 133 | do_gettimeofday(&tv); | ||
| 134 | ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC; | ||
| 135 | |||
| 136 | xen_clear_irq_pending(irq); | ||
| 137 | |||
| 138 | while (test_bit(_XEN_PCIF_active, | ||
| 139 | (unsigned long *)&pdev->sh_info->flags)) { | ||
| 140 | xen_poll_irq_timeout(irq, jiffies + 3*HZ); | ||
| 141 | xen_clear_irq_pending(irq); | ||
| 142 | do_gettimeofday(&tv); | ||
| 143 | ns = timeval_to_ns(&tv); | ||
| 144 | if (ns > ns_timeout) { | ||
| 145 | dev_err(&pdev->xdev->dev, | ||
| 146 | "pciback not responding!!!\n"); | ||
| 147 | clear_bit(_XEN_PCIF_active, | ||
| 148 | (unsigned long *)&pdev->sh_info->flags); | ||
| 149 | err = XEN_PCI_ERR_dev_not_found; | ||
| 150 | goto out; | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 154 | /* | ||
| 155 | * We might lose backend service request since we | ||
| 156 | * reuse same evtchn with pci_conf backend response. So re-schedule | ||
| 157 | * aer pcifront service. | ||
| 158 | */ | ||
| 159 | if (test_bit(_XEN_PCIB_active, | ||
| 160 | (unsigned long *)&pdev->sh_info->flags)) { | ||
| 161 | dev_err(&pdev->xdev->dev, | ||
| 162 | "schedule aer pcifront service\n"); | ||
| 163 | schedule_pcifront_aer_op(pdev); | ||
| 164 | } | ||
| 165 | |||
| 166 | memcpy(op, active_op, sizeof(struct xen_pci_op)); | ||
| 167 | |||
| 168 | err = op->err; | ||
| 169 | out: | ||
| 170 | spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags); | ||
| 171 | return err; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* Access to this function is spinlocked in drivers/pci/access.c */ | ||
| 175 | static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn, | ||
| 176 | int where, int size, u32 *val) | ||
| 177 | { | ||
| 178 | int err = 0; | ||
| 179 | struct xen_pci_op op = { | ||
| 180 | .cmd = XEN_PCI_OP_conf_read, | ||
| 181 | .domain = pci_domain_nr(bus), | ||
| 182 | .bus = bus->number, | ||
| 183 | .devfn = devfn, | ||
| 184 | .offset = where, | ||
| 185 | .size = size, | ||
| 186 | }; | ||
| 187 | struct pcifront_sd *sd = bus->sysdata; | ||
| 188 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
| 189 | |||
| 190 | if (verbose_request) | ||
| 191 | dev_info(&pdev->xdev->dev, | ||
| 192 | "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n", | ||
| 193 | pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), | ||
| 194 | PCI_FUNC(devfn), where, size); | ||
| 195 | |||
| 196 | err = do_pci_op(pdev, &op); | ||
| 197 | |||
| 198 | if (likely(!err)) { | ||
| 199 | if (verbose_request) | ||
| 200 | dev_info(&pdev->xdev->dev, "read got back value %x\n", | ||
| 201 | op.value); | ||
| 202 | |||
| 203 | *val = op.value; | ||
| 204 | } else if (err == -ENODEV) { | ||
| 205 | /* No device here, pretend that it just returned 0 */ | ||
| 206 | err = 0; | ||
| 207 | *val = 0; | ||
| 208 | } | ||
| 209 | |||
| 210 | return errno_to_pcibios_err(err); | ||
| 211 | } | ||
| 212 | |||
| 213 | /* Access to this function is spinlocked in drivers/pci/access.c */ | ||
| 214 | static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn, | ||
| 215 | int where, int size, u32 val) | ||
| 216 | { | ||
| 217 | struct xen_pci_op op = { | ||
| 218 | .cmd = XEN_PCI_OP_conf_write, | ||
| 219 | .domain = pci_domain_nr(bus), | ||
| 220 | .bus = bus->number, | ||
| 221 | .devfn = devfn, | ||
| 222 | .offset = where, | ||
| 223 | .size = size, | ||
| 224 | .value = val, | ||
| 225 | }; | ||
| 226 | struct pcifront_sd *sd = bus->sysdata; | ||
| 227 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
| 228 | |||
| 229 | if (verbose_request) | ||
| 230 | dev_info(&pdev->xdev->dev, | ||
| 231 | "write dev=%04x:%02x:%02x.%01x - " | ||
| 232 | "offset %x size %d val %x\n", | ||
| 233 | pci_domain_nr(bus), bus->number, | ||
| 234 | PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); | ||
| 235 | |||
| 236 | return errno_to_pcibios_err(do_pci_op(pdev, &op)); | ||
| 237 | } | ||
| 238 | |||
| 239 | struct pci_ops pcifront_bus_ops = { | ||
| 240 | .read = pcifront_bus_read, | ||
| 241 | .write = pcifront_bus_write, | ||
| 242 | }; | ||
| 243 | |||
| 244 | #ifdef CONFIG_PCI_MSI | ||
| 245 | static int pci_frontend_enable_msix(struct pci_dev *dev, | ||
| 246 | int **vector, int nvec) | ||
| 247 | { | ||
| 248 | int err; | ||
| 249 | int i; | ||
| 250 | struct xen_pci_op op = { | ||
| 251 | .cmd = XEN_PCI_OP_enable_msix, | ||
| 252 | .domain = pci_domain_nr(dev->bus), | ||
| 253 | .bus = dev->bus->number, | ||
| 254 | .devfn = dev->devfn, | ||
| 255 | .value = nvec, | ||
| 256 | }; | ||
| 257 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
| 258 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
| 259 | struct msi_desc *entry; | ||
| 260 | |||
| 261 | if (nvec > SH_INFO_MAX_VEC) { | ||
| 262 | dev_err(&dev->dev, "too much vector for pci frontend: %x." | ||
| 263 | " Increase SH_INFO_MAX_VEC.\n", nvec); | ||
| 264 | return -EINVAL; | ||
| 265 | } | ||
| 266 | |||
| 267 | i = 0; | ||
| 268 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
| 269 | op.msix_entries[i].entry = entry->msi_attrib.entry_nr; | ||
| 270 | /* Vector is useless at this point. */ | ||
| 271 | op.msix_entries[i].vector = -1; | ||
| 272 | i++; | ||
| 273 | } | ||
| 274 | |||
| 275 | err = do_pci_op(pdev, &op); | ||
| 276 | |||
| 277 | if (likely(!err)) { | ||
| 278 | if (likely(!op.value)) { | ||
| 279 | /* we get the result */ | ||
| 280 | for (i = 0; i < nvec; i++) | ||
| 281 | *(*vector+i) = op.msix_entries[i].vector; | ||
| 282 | return 0; | ||
| 283 | } else { | ||
| 284 | printk(KERN_DEBUG "enable msix get value %x\n", | ||
| 285 | op.value); | ||
| 286 | return op.value; | ||
| 287 | } | ||
| 288 | } else { | ||
| 289 | dev_err(&dev->dev, "enable msix get err %x\n", err); | ||
| 290 | return err; | ||
| 291 | } | ||
| 292 | } | ||
| 293 | |||
| 294 | static void pci_frontend_disable_msix(struct pci_dev *dev) | ||
| 295 | { | ||
| 296 | int err; | ||
| 297 | struct xen_pci_op op = { | ||
| 298 | .cmd = XEN_PCI_OP_disable_msix, | ||
| 299 | .domain = pci_domain_nr(dev->bus), | ||
| 300 | .bus = dev->bus->number, | ||
| 301 | .devfn = dev->devfn, | ||
| 302 | }; | ||
| 303 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
| 304 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
| 305 | |||
| 306 | err = do_pci_op(pdev, &op); | ||
| 307 | |||
| 308 | /* What should do for error ? */ | ||
| 309 | if (err) | ||
| 310 | dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); | ||
| 311 | } | ||
| 312 | |||
| 313 | static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector) | ||
| 314 | { | ||
| 315 | int err; | ||
| 316 | struct xen_pci_op op = { | ||
| 317 | .cmd = XEN_PCI_OP_enable_msi, | ||
| 318 | .domain = pci_domain_nr(dev->bus), | ||
| 319 | .bus = dev->bus->number, | ||
| 320 | .devfn = dev->devfn, | ||
| 321 | }; | ||
| 322 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
| 323 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
| 324 | |||
| 325 | err = do_pci_op(pdev, &op); | ||
| 326 | if (likely(!err)) { | ||
| 327 | *(*vector) = op.value; | ||
| 328 | } else { | ||
| 329 | dev_err(&dev->dev, "pci frontend enable msi failed for dev " | ||
| 330 | "%x:%x\n", op.bus, op.devfn); | ||
| 331 | err = -EINVAL; | ||
| 332 | } | ||
| 333 | return err; | ||
| 334 | } | ||
| 335 | |||
| 336 | static void pci_frontend_disable_msi(struct pci_dev *dev) | ||
| 337 | { | ||
| 338 | int err; | ||
| 339 | struct xen_pci_op op = { | ||
| 340 | .cmd = XEN_PCI_OP_disable_msi, | ||
| 341 | .domain = pci_domain_nr(dev->bus), | ||
| 342 | .bus = dev->bus->number, | ||
| 343 | .devfn = dev->devfn, | ||
| 344 | }; | ||
| 345 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
| 346 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
| 347 | |||
| 348 | err = do_pci_op(pdev, &op); | ||
| 349 | if (err == XEN_PCI_ERR_dev_not_found) { | ||
| 350 | /* XXX No response from backend, what shall we do? */ | ||
| 351 | printk(KERN_DEBUG "get no response from backend for disable MSI\n"); | ||
| 352 | return; | ||
| 353 | } | ||
| 354 | if (err) | ||
| 355 | /* how can pciback notify us fail? */ | ||
| 356 | printk(KERN_DEBUG "get fake response frombackend\n"); | ||
| 357 | } | ||
| 358 | |||
| 359 | static struct xen_pci_frontend_ops pci_frontend_ops = { | ||
| 360 | .enable_msi = pci_frontend_enable_msi, | ||
| 361 | .disable_msi = pci_frontend_disable_msi, | ||
| 362 | .enable_msix = pci_frontend_enable_msix, | ||
| 363 | .disable_msix = pci_frontend_disable_msix, | ||
| 364 | }; | ||
| 365 | |||
| 366 | static void pci_frontend_registrar(int enable) | ||
| 367 | { | ||
| 368 | if (enable) | ||
| 369 | xen_pci_frontend = &pci_frontend_ops; | ||
| 370 | else | ||
| 371 | xen_pci_frontend = NULL; | ||
| 372 | }; | ||
| 373 | #else | ||
| 374 | static inline void pci_frontend_registrar(int enable) { }; | ||
| 375 | #endif /* CONFIG_PCI_MSI */ | ||
| 376 | |||
| 377 | /* Claim resources for the PCI frontend as-is, backend won't allow changes */ | ||
| 378 | static int pcifront_claim_resource(struct pci_dev *dev, void *data) | ||
| 379 | { | ||
| 380 | struct pcifront_device *pdev = data; | ||
| 381 | int i; | ||
| 382 | struct resource *r; | ||
| 383 | |||
| 384 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
| 385 | r = &dev->resource[i]; | ||
| 386 | |||
| 387 | if (!r->parent && r->start && r->flags) { | ||
| 388 | dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n", | ||
| 389 | pci_name(dev), i); | ||
| 390 | if (pci_claim_resource(dev, i)) { | ||
| 391 | dev_err(&pdev->xdev->dev, "Could not claim " | ||
| 392 | "resource %s/%d! Device offline. Try " | ||
| 393 | "giving less than 4GB to domain.\n", | ||
| 394 | pci_name(dev), i); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | } | ||
| 398 | |||
| 399 | return 0; | ||
| 400 | } | ||
| 401 | |||
| 402 | static int __devinit pcifront_scan_bus(struct pcifront_device *pdev, | ||
| 403 | unsigned int domain, unsigned int bus, | ||
| 404 | struct pci_bus *b) | ||
| 405 | { | ||
| 406 | struct pci_dev *d; | ||
| 407 | unsigned int devfn; | ||
| 408 | |||
| 409 | /* Scan the bus for functions and add. | ||
| 410 | * We omit handling of PCI bridge attachment because pciback prevents | ||
| 411 | * bridges from being exported. | ||
| 412 | */ | ||
| 413 | for (devfn = 0; devfn < 0x100; devfn++) { | ||
| 414 | d = pci_get_slot(b, devfn); | ||
| 415 | if (d) { | ||
| 416 | /* Device is already known. */ | ||
| 417 | pci_dev_put(d); | ||
| 418 | continue; | ||
| 419 | } | ||
| 420 | |||
| 421 | d = pci_scan_single_device(b, devfn); | ||
| 422 | if (d) | ||
| 423 | dev_info(&pdev->xdev->dev, "New device on " | ||
| 424 | "%04x:%02x:%02x.%02x found.\n", domain, bus, | ||
| 425 | PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
| 426 | } | ||
| 427 | |||
| 428 | return 0; | ||
| 429 | } | ||
| 430 | |||
| 431 | static int __devinit pcifront_scan_root(struct pcifront_device *pdev, | ||
| 432 | unsigned int domain, unsigned int bus) | ||
| 433 | { | ||
| 434 | struct pci_bus *b; | ||
| 435 | struct pcifront_sd *sd = NULL; | ||
| 436 | struct pci_bus_entry *bus_entry = NULL; | ||
| 437 | int err = 0; | ||
| 438 | |||
| 439 | #ifndef CONFIG_PCI_DOMAINS | ||
| 440 | if (domain != 0) { | ||
| 441 | dev_err(&pdev->xdev->dev, | ||
| 442 | "PCI Root in non-zero PCI Domain! domain=%d\n", domain); | ||
| 443 | dev_err(&pdev->xdev->dev, | ||
| 444 | "Please compile with CONFIG_PCI_DOMAINS\n"); | ||
| 445 | err = -EINVAL; | ||
| 446 | goto err_out; | ||
| 447 | } | ||
| 448 | #endif | ||
| 449 | |||
| 450 | dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", | ||
| 451 | domain, bus); | ||
| 452 | |||
| 453 | bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); | ||
| 454 | sd = kmalloc(sizeof(*sd), GFP_KERNEL); | ||
| 455 | if (!bus_entry || !sd) { | ||
| 456 | err = -ENOMEM; | ||
| 457 | goto err_out; | ||
| 458 | } | ||
| 459 | pcifront_init_sd(sd, domain, bus, pdev); | ||
| 460 | |||
| 461 | b = pci_scan_bus_parented(&pdev->xdev->dev, bus, | ||
| 462 | &pcifront_bus_ops, sd); | ||
| 463 | if (!b) { | ||
| 464 | dev_err(&pdev->xdev->dev, | ||
| 465 | "Error creating PCI Frontend Bus!\n"); | ||
| 466 | err = -ENOMEM; | ||
| 467 | goto err_out; | ||
| 468 | } | ||
| 469 | |||
| 470 | bus_entry->bus = b; | ||
| 471 | |||
| 472 | list_add(&bus_entry->list, &pdev->root_buses); | ||
| 473 | |||
| 474 | /* pci_scan_bus_parented skips devices which do not have a have | ||
| 475 | * devfn==0. The pcifront_scan_bus enumerates all devfn. */ | ||
| 476 | err = pcifront_scan_bus(pdev, domain, bus, b); | ||
| 477 | |||
| 478 | /* Claim resources before going "live" with our devices */ | ||
| 479 | pci_walk_bus(b, pcifront_claim_resource, pdev); | ||
| 480 | |||
| 481 | /* Create SysFS and notify udev of the devices. Aka: "going live" */ | ||
| 482 | pci_bus_add_devices(b); | ||
| 483 | |||
| 484 | return err; | ||
| 485 | |||
| 486 | err_out: | ||
| 487 | kfree(bus_entry); | ||
| 488 | kfree(sd); | ||
| 489 | |||
| 490 | return err; | ||
| 491 | } | ||
| 492 | |||
| 493 | static int __devinit pcifront_rescan_root(struct pcifront_device *pdev, | ||
| 494 | unsigned int domain, unsigned int bus) | ||
| 495 | { | ||
| 496 | int err; | ||
| 497 | struct pci_bus *b; | ||
| 498 | |||
| 499 | #ifndef CONFIG_PCI_DOMAINS | ||
| 500 | if (domain != 0) { | ||
| 501 | dev_err(&pdev->xdev->dev, | ||
| 502 | "PCI Root in non-zero PCI Domain! domain=%d\n", domain); | ||
| 503 | dev_err(&pdev->xdev->dev, | ||
| 504 | "Please compile with CONFIG_PCI_DOMAINS\n"); | ||
| 505 | return -EINVAL; | ||
| 506 | } | ||
| 507 | #endif | ||
| 508 | |||
| 509 | dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n", | ||
| 510 | domain, bus); | ||
| 511 | |||
| 512 | b = pci_find_bus(domain, bus); | ||
| 513 | if (!b) | ||
| 514 | /* If the bus is unknown, create it. */ | ||
| 515 | return pcifront_scan_root(pdev, domain, bus); | ||
| 516 | |||
| 517 | err = pcifront_scan_bus(pdev, domain, bus, b); | ||
| 518 | |||
| 519 | /* Claim resources before going "live" with our devices */ | ||
| 520 | pci_walk_bus(b, pcifront_claim_resource, pdev); | ||
| 521 | |||
| 522 | /* Create SysFS and notify udev of the devices. Aka: "going live" */ | ||
| 523 | pci_bus_add_devices(b); | ||
| 524 | |||
| 525 | return err; | ||
| 526 | } | ||
| 527 | |||
| 528 | static void free_root_bus_devs(struct pci_bus *bus) | ||
| 529 | { | ||
| 530 | struct pci_dev *dev; | ||
| 531 | |||
| 532 | while (!list_empty(&bus->devices)) { | ||
| 533 | dev = container_of(bus->devices.next, struct pci_dev, | ||
| 534 | bus_list); | ||
| 535 | dev_dbg(&dev->dev, "removing device\n"); | ||
| 536 | pci_remove_bus_device(dev); | ||
| 537 | } | ||
| 538 | } | ||
| 539 | |||
| 540 | static void pcifront_free_roots(struct pcifront_device *pdev) | ||
| 541 | { | ||
| 542 | struct pci_bus_entry *bus_entry, *t; | ||
| 543 | |||
| 544 | dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n"); | ||
| 545 | |||
| 546 | list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) { | ||
| 547 | list_del(&bus_entry->list); | ||
| 548 | |||
| 549 | free_root_bus_devs(bus_entry->bus); | ||
| 550 | |||
| 551 | kfree(bus_entry->bus->sysdata); | ||
| 552 | |||
| 553 | device_unregister(bus_entry->bus->bridge); | ||
| 554 | pci_remove_bus(bus_entry->bus); | ||
| 555 | |||
| 556 | kfree(bus_entry); | ||
| 557 | } | ||
| 558 | } | ||
| 559 | |||
| 560 | static pci_ers_result_t pcifront_common_process(int cmd, | ||
| 561 | struct pcifront_device *pdev, | ||
| 562 | pci_channel_state_t state) | ||
| 563 | { | ||
| 564 | pci_ers_result_t result; | ||
| 565 | struct pci_driver *pdrv; | ||
| 566 | int bus = pdev->sh_info->aer_op.bus; | ||
| 567 | int devfn = pdev->sh_info->aer_op.devfn; | ||
| 568 | struct pci_dev *pcidev; | ||
| 569 | int flag = 0; | ||
| 570 | |||
| 571 | dev_dbg(&pdev->xdev->dev, | ||
| 572 | "pcifront AER process: cmd %x (bus:%x, devfn%x)", | ||
| 573 | cmd, bus, devfn); | ||
| 574 | result = PCI_ERS_RESULT_NONE; | ||
| 575 | |||
| 576 | pcidev = pci_get_bus_and_slot(bus, devfn); | ||
| 577 | if (!pcidev || !pcidev->driver) { | ||
| 578 | dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); | ||
| 579 | if (pcidev) | ||
| 580 | pci_dev_put(pcidev); | ||
| 581 | return result; | ||
| 582 | } | ||
| 583 | pdrv = pcidev->driver; | ||
| 584 | |||
| 585 | if (get_driver(&pdrv->driver)) { | ||
| 586 | if (pdrv->err_handler && pdrv->err_handler->error_detected) { | ||
| 587 | dev_dbg(&pcidev->dev, | ||
| 588 | "trying to call AER service\n"); | ||
| 589 | if (pcidev) { | ||
| 590 | flag = 1; | ||
| 591 | switch (cmd) { | ||
| 592 | case XEN_PCI_OP_aer_detected: | ||
| 593 | result = pdrv->err_handler-> | ||
| 594 | error_detected(pcidev, state); | ||
| 595 | break; | ||
| 596 | case XEN_PCI_OP_aer_mmio: | ||
| 597 | result = pdrv->err_handler-> | ||
| 598 | mmio_enabled(pcidev); | ||
| 599 | break; | ||
| 600 | case XEN_PCI_OP_aer_slotreset: | ||
| 601 | result = pdrv->err_handler-> | ||
| 602 | slot_reset(pcidev); | ||
| 603 | break; | ||
| 604 | case XEN_PCI_OP_aer_resume: | ||
| 605 | pdrv->err_handler->resume(pcidev); | ||
| 606 | break; | ||
| 607 | default: | ||
| 608 | dev_err(&pdev->xdev->dev, | ||
| 609 | "bad request in aer recovery " | ||
| 610 | "operation!\n"); | ||
| 611 | |||
| 612 | } | ||
| 613 | } | ||
| 614 | } | ||
| 615 | put_driver(&pdrv->driver); | ||
| 616 | } | ||
| 617 | if (!flag) | ||
| 618 | result = PCI_ERS_RESULT_NONE; | ||
| 619 | |||
| 620 | return result; | ||
| 621 | } | ||
| 622 | |||
| 623 | |||
| 624 | static void pcifront_do_aer(struct work_struct *data) | ||
| 625 | { | ||
| 626 | struct pcifront_device *pdev = | ||
| 627 | container_of(data, struct pcifront_device, op_work); | ||
| 628 | int cmd = pdev->sh_info->aer_op.cmd; | ||
| 629 | pci_channel_state_t state = | ||
| 630 | (pci_channel_state_t)pdev->sh_info->aer_op.err; | ||
| 631 | |||
| 632 | /*If a pci_conf op is in progress, | ||
| 633 | we have to wait until it is done before service aer op*/ | ||
| 634 | dev_dbg(&pdev->xdev->dev, | ||
| 635 | "pcifront service aer bus %x devfn %x\n", | ||
| 636 | pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn); | ||
| 637 | |||
| 638 | pdev->sh_info->aer_op.err = pcifront_common_process(cmd, pdev, state); | ||
| 639 | |||
| 640 | /* Post the operation to the guest. */ | ||
| 641 | wmb(); | ||
| 642 | clear_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags); | ||
| 643 | notify_remote_via_evtchn(pdev->evtchn); | ||
| 644 | |||
| 645 | /*in case of we lost an aer request in four lines time_window*/ | ||
| 646 | smp_mb__before_clear_bit(); | ||
| 647 | clear_bit(_PDEVB_op_active, &pdev->flags); | ||
| 648 | smp_mb__after_clear_bit(); | ||
| 649 | |||
| 650 | schedule_pcifront_aer_op(pdev); | ||
| 651 | |||
| 652 | } | ||
| 653 | |||
| 654 | static irqreturn_t pcifront_handler_aer(int irq, void *dev) | ||
| 655 | { | ||
| 656 | struct pcifront_device *pdev = dev; | ||
| 657 | schedule_pcifront_aer_op(pdev); | ||
| 658 | return IRQ_HANDLED; | ||
| 659 | } | ||
| 660 | static int pcifront_connect(struct pcifront_device *pdev) | ||
| 661 | { | ||
| 662 | int err = 0; | ||
| 663 | |||
| 664 | spin_lock(&pcifront_dev_lock); | ||
| 665 | |||
| 666 | if (!pcifront_dev) { | ||
| 667 | dev_info(&pdev->xdev->dev, "Installing PCI frontend\n"); | ||
| 668 | pcifront_dev = pdev; | ||
| 669 | } else { | ||
| 670 | dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n"); | ||
| 671 | err = -EEXIST; | ||
| 672 | } | ||
| 673 | |||
| 674 | spin_unlock(&pcifront_dev_lock); | ||
| 675 | |||
| 676 | return err; | ||
| 677 | } | ||
| 678 | |||
| 679 | static void pcifront_disconnect(struct pcifront_device *pdev) | ||
| 680 | { | ||
| 681 | spin_lock(&pcifront_dev_lock); | ||
| 682 | |||
| 683 | if (pdev == pcifront_dev) { | ||
| 684 | dev_info(&pdev->xdev->dev, | ||
| 685 | "Disconnecting PCI Frontend Buses\n"); | ||
| 686 | pcifront_dev = NULL; | ||
| 687 | } | ||
| 688 | |||
| 689 | spin_unlock(&pcifront_dev_lock); | ||
| 690 | } | ||
| 691 | static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev) | ||
| 692 | { | ||
| 693 | struct pcifront_device *pdev; | ||
| 694 | |||
| 695 | pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL); | ||
| 696 | if (pdev == NULL) | ||
| 697 | goto out; | ||
| 698 | |||
| 699 | pdev->sh_info = | ||
| 700 | (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL); | ||
| 701 | if (pdev->sh_info == NULL) { | ||
| 702 | kfree(pdev); | ||
| 703 | pdev = NULL; | ||
| 704 | goto out; | ||
| 705 | } | ||
| 706 | pdev->sh_info->flags = 0; | ||
| 707 | |||
| 708 | /*Flag for registering PV AER handler*/ | ||
| 709 | set_bit(_XEN_PCIB_AERHANDLER, (void *)&pdev->sh_info->flags); | ||
| 710 | |||
| 711 | dev_set_drvdata(&xdev->dev, pdev); | ||
| 712 | pdev->xdev = xdev; | ||
| 713 | |||
| 714 | INIT_LIST_HEAD(&pdev->root_buses); | ||
| 715 | |||
| 716 | spin_lock_init(&pdev->sh_info_lock); | ||
| 717 | |||
| 718 | pdev->evtchn = INVALID_EVTCHN; | ||
| 719 | pdev->gnt_ref = INVALID_GRANT_REF; | ||
| 720 | pdev->irq = -1; | ||
| 721 | |||
| 722 | INIT_WORK(&pdev->op_work, pcifront_do_aer); | ||
| 723 | |||
| 724 | dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n", | ||
| 725 | pdev, pdev->sh_info); | ||
| 726 | out: | ||
| 727 | return pdev; | ||
| 728 | } | ||
| 729 | |||
| 730 | static void free_pdev(struct pcifront_device *pdev) | ||
| 731 | { | ||
| 732 | dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev); | ||
| 733 | |||
| 734 | pcifront_free_roots(pdev); | ||
| 735 | |||
| 736 | /*For PCIE_AER error handling job*/ | ||
| 737 | flush_scheduled_work(); | ||
| 738 | |||
| 739 | if (pdev->irq >= 0) | ||
| 740 | unbind_from_irqhandler(pdev->irq, pdev); | ||
| 741 | |||
| 742 | if (pdev->evtchn != INVALID_EVTCHN) | ||
| 743 | xenbus_free_evtchn(pdev->xdev, pdev->evtchn); | ||
| 744 | |||
| 745 | if (pdev->gnt_ref != INVALID_GRANT_REF) | ||
| 746 | gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */, | ||
| 747 | (unsigned long)pdev->sh_info); | ||
| 748 | else | ||
| 749 | free_page((unsigned long)pdev->sh_info); | ||
| 750 | |||
| 751 | dev_set_drvdata(&pdev->xdev->dev, NULL); | ||
| 752 | |||
| 753 | kfree(pdev); | ||
| 754 | } | ||
| 755 | |||
| 756 | static int pcifront_publish_info(struct pcifront_device *pdev) | ||
| 757 | { | ||
| 758 | int err = 0; | ||
| 759 | struct xenbus_transaction trans; | ||
| 760 | |||
| 761 | err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info)); | ||
| 762 | if (err < 0) | ||
| 763 | goto out; | ||
| 764 | |||
| 765 | pdev->gnt_ref = err; | ||
| 766 | |||
| 767 | err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); | ||
| 768 | if (err) | ||
| 769 | goto out; | ||
| 770 | |||
| 771 | err = bind_evtchn_to_irqhandler(pdev->evtchn, pcifront_handler_aer, | ||
| 772 | 0, "pcifront", pdev); | ||
| 773 | |||
| 774 | if (err < 0) | ||
| 775 | return err; | ||
| 776 | |||
| 777 | pdev->irq = err; | ||
| 778 | |||
| 779 | do_publish: | ||
| 780 | err = xenbus_transaction_start(&trans); | ||
| 781 | if (err) { | ||
| 782 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 783 | "Error writing configuration for backend " | ||
| 784 | "(start transaction)"); | ||
| 785 | goto out; | ||
| 786 | } | ||
| 787 | |||
| 788 | err = xenbus_printf(trans, pdev->xdev->nodename, | ||
| 789 | "pci-op-ref", "%u", pdev->gnt_ref); | ||
| 790 | if (!err) | ||
| 791 | err = xenbus_printf(trans, pdev->xdev->nodename, | ||
| 792 | "event-channel", "%u", pdev->evtchn); | ||
| 793 | if (!err) | ||
| 794 | err = xenbus_printf(trans, pdev->xdev->nodename, | ||
| 795 | "magic", XEN_PCI_MAGIC); | ||
| 796 | |||
| 797 | if (err) { | ||
| 798 | xenbus_transaction_end(trans, 1); | ||
| 799 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 800 | "Error writing configuration for backend"); | ||
| 801 | goto out; | ||
| 802 | } else { | ||
| 803 | err = xenbus_transaction_end(trans, 0); | ||
| 804 | if (err == -EAGAIN) | ||
| 805 | goto do_publish; | ||
| 806 | else if (err) { | ||
| 807 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 808 | "Error completing transaction " | ||
| 809 | "for backend"); | ||
| 810 | goto out; | ||
| 811 | } | ||
| 812 | } | ||
| 813 | |||
| 814 | xenbus_switch_state(pdev->xdev, XenbusStateInitialised); | ||
| 815 | |||
| 816 | dev_dbg(&pdev->xdev->dev, "publishing successful!\n"); | ||
| 817 | |||
| 818 | out: | ||
| 819 | return err; | ||
| 820 | } | ||
| 821 | |||
| 822 | static int __devinit pcifront_try_connect(struct pcifront_device *pdev) | ||
| 823 | { | ||
| 824 | int err = -EFAULT; | ||
| 825 | int i, num_roots, len; | ||
| 826 | char str[64]; | ||
| 827 | unsigned int domain, bus; | ||
| 828 | |||
| 829 | |||
| 830 | /* Only connect once */ | ||
| 831 | if (xenbus_read_driver_state(pdev->xdev->nodename) != | ||
| 832 | XenbusStateInitialised) | ||
| 833 | goto out; | ||
| 834 | |||
| 835 | err = pcifront_connect(pdev); | ||
| 836 | if (err) { | ||
| 837 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 838 | "Error connecting PCI Frontend"); | ||
| 839 | goto out; | ||
| 840 | } | ||
| 841 | |||
| 842 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, | ||
| 843 | "root_num", "%d", &num_roots); | ||
| 844 | if (err == -ENOENT) { | ||
| 845 | xenbus_dev_error(pdev->xdev, err, | ||
| 846 | "No PCI Roots found, trying 0000:00"); | ||
| 847 | err = pcifront_scan_root(pdev, 0, 0); | ||
| 848 | num_roots = 0; | ||
| 849 | } else if (err != 1) { | ||
| 850 | if (err == 0) | ||
| 851 | err = -EINVAL; | ||
| 852 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 853 | "Error reading number of PCI roots"); | ||
| 854 | goto out; | ||
| 855 | } | ||
| 856 | |||
| 857 | for (i = 0; i < num_roots; i++) { | ||
| 858 | len = snprintf(str, sizeof(str), "root-%d", i); | ||
| 859 | if (unlikely(len >= (sizeof(str) - 1))) { | ||
| 860 | err = -ENOMEM; | ||
| 861 | goto out; | ||
| 862 | } | ||
| 863 | |||
| 864 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, | ||
| 865 | "%x:%x", &domain, &bus); | ||
| 866 | if (err != 2) { | ||
| 867 | if (err >= 0) | ||
| 868 | err = -EINVAL; | ||
| 869 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 870 | "Error reading PCI root %d", i); | ||
| 871 | goto out; | ||
| 872 | } | ||
| 873 | |||
| 874 | err = pcifront_scan_root(pdev, domain, bus); | ||
| 875 | if (err) { | ||
| 876 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 877 | "Error scanning PCI root %04x:%02x", | ||
| 878 | domain, bus); | ||
| 879 | goto out; | ||
| 880 | } | ||
| 881 | } | ||
| 882 | |||
| 883 | err = xenbus_switch_state(pdev->xdev, XenbusStateConnected); | ||
| 884 | |||
| 885 | out: | ||
| 886 | return err; | ||
| 887 | } | ||
| 888 | |||
| 889 | static int pcifront_try_disconnect(struct pcifront_device *pdev) | ||
| 890 | { | ||
| 891 | int err = 0; | ||
| 892 | enum xenbus_state prev_state; | ||
| 893 | |||
| 894 | |||
| 895 | prev_state = xenbus_read_driver_state(pdev->xdev->nodename); | ||
| 896 | |||
| 897 | if (prev_state >= XenbusStateClosing) | ||
| 898 | goto out; | ||
| 899 | |||
| 900 | if (prev_state == XenbusStateConnected) { | ||
| 901 | pcifront_free_roots(pdev); | ||
| 902 | pcifront_disconnect(pdev); | ||
| 903 | } | ||
| 904 | |||
| 905 | err = xenbus_switch_state(pdev->xdev, XenbusStateClosed); | ||
| 906 | |||
| 907 | out: | ||
| 908 | |||
| 909 | return err; | ||
| 910 | } | ||
| 911 | |||
| 912 | static int __devinit pcifront_attach_devices(struct pcifront_device *pdev) | ||
| 913 | { | ||
| 914 | int err = -EFAULT; | ||
| 915 | int i, num_roots, len; | ||
| 916 | unsigned int domain, bus; | ||
| 917 | char str[64]; | ||
| 918 | |||
| 919 | if (xenbus_read_driver_state(pdev->xdev->nodename) != | ||
| 920 | XenbusStateReconfiguring) | ||
| 921 | goto out; | ||
| 922 | |||
| 923 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, | ||
| 924 | "root_num", "%d", &num_roots); | ||
| 925 | if (err == -ENOENT) { | ||
| 926 | xenbus_dev_error(pdev->xdev, err, | ||
| 927 | "No PCI Roots found, trying 0000:00"); | ||
| 928 | err = pcifront_rescan_root(pdev, 0, 0); | ||
| 929 | num_roots = 0; | ||
| 930 | } else if (err != 1) { | ||
| 931 | if (err == 0) | ||
| 932 | err = -EINVAL; | ||
| 933 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 934 | "Error reading number of PCI roots"); | ||
| 935 | goto out; | ||
| 936 | } | ||
| 937 | |||
| 938 | for (i = 0; i < num_roots; i++) { | ||
| 939 | len = snprintf(str, sizeof(str), "root-%d", i); | ||
| 940 | if (unlikely(len >= (sizeof(str) - 1))) { | ||
| 941 | err = -ENOMEM; | ||
| 942 | goto out; | ||
| 943 | } | ||
| 944 | |||
| 945 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, | ||
| 946 | "%x:%x", &domain, &bus); | ||
| 947 | if (err != 2) { | ||
| 948 | if (err >= 0) | ||
| 949 | err = -EINVAL; | ||
| 950 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 951 | "Error reading PCI root %d", i); | ||
| 952 | goto out; | ||
| 953 | } | ||
| 954 | |||
| 955 | err = pcifront_rescan_root(pdev, domain, bus); | ||
| 956 | if (err) { | ||
| 957 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 958 | "Error scanning PCI root %04x:%02x", | ||
| 959 | domain, bus); | ||
| 960 | goto out; | ||
| 961 | } | ||
| 962 | } | ||
| 963 | |||
| 964 | xenbus_switch_state(pdev->xdev, XenbusStateConnected); | ||
| 965 | |||
| 966 | out: | ||
| 967 | return err; | ||
| 968 | } | ||
| 969 | |||
| 970 | static int pcifront_detach_devices(struct pcifront_device *pdev) | ||
| 971 | { | ||
| 972 | int err = 0; | ||
| 973 | int i, num_devs; | ||
| 974 | unsigned int domain, bus, slot, func; | ||
| 975 | struct pci_bus *pci_bus; | ||
| 976 | struct pci_dev *pci_dev; | ||
| 977 | char str[64]; | ||
| 978 | |||
| 979 | if (xenbus_read_driver_state(pdev->xdev->nodename) != | ||
| 980 | XenbusStateConnected) | ||
| 981 | goto out; | ||
| 982 | |||
| 983 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d", | ||
| 984 | &num_devs); | ||
| 985 | if (err != 1) { | ||
| 986 | if (err >= 0) | ||
| 987 | err = -EINVAL; | ||
| 988 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 989 | "Error reading number of PCI devices"); | ||
| 990 | goto out; | ||
| 991 | } | ||
| 992 | |||
| 993 | /* Find devices being detached and remove them. */ | ||
| 994 | for (i = 0; i < num_devs; i++) { | ||
| 995 | int l, state; | ||
| 996 | l = snprintf(str, sizeof(str), "state-%d", i); | ||
| 997 | if (unlikely(l >= (sizeof(str) - 1))) { | ||
| 998 | err = -ENOMEM; | ||
| 999 | goto out; | ||
| 1000 | } | ||
| 1001 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", | ||
| 1002 | &state); | ||
| 1003 | if (err != 1) | ||
| 1004 | state = XenbusStateUnknown; | ||
| 1005 | |||
| 1006 | if (state != XenbusStateClosing) | ||
| 1007 | continue; | ||
| 1008 | |||
| 1009 | /* Remove device. */ | ||
| 1010 | l = snprintf(str, sizeof(str), "vdev-%d", i); | ||
| 1011 | if (unlikely(l >= (sizeof(str) - 1))) { | ||
| 1012 | err = -ENOMEM; | ||
| 1013 | goto out; | ||
| 1014 | } | ||
| 1015 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, | ||
| 1016 | "%x:%x:%x.%x", &domain, &bus, &slot, &func); | ||
| 1017 | if (err != 4) { | ||
| 1018 | if (err >= 0) | ||
| 1019 | err = -EINVAL; | ||
| 1020 | xenbus_dev_fatal(pdev->xdev, err, | ||
| 1021 | "Error reading PCI device %d", i); | ||
| 1022 | goto out; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | pci_bus = pci_find_bus(domain, bus); | ||
| 1026 | if (!pci_bus) { | ||
| 1027 | dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n", | ||
| 1028 | domain, bus); | ||
| 1029 | continue; | ||
| 1030 | } | ||
| 1031 | pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func)); | ||
| 1032 | if (!pci_dev) { | ||
| 1033 | dev_dbg(&pdev->xdev->dev, | ||
| 1034 | "Cannot get PCI device %04x:%02x:%02x.%02x\n", | ||
| 1035 | domain, bus, slot, func); | ||
| 1036 | continue; | ||
| 1037 | } | ||
| 1038 | pci_remove_bus_device(pci_dev); | ||
| 1039 | pci_dev_put(pci_dev); | ||
| 1040 | |||
| 1041 | dev_dbg(&pdev->xdev->dev, | ||
| 1042 | "PCI device %04x:%02x:%02x.%02x removed.\n", | ||
| 1043 | domain, bus, slot, func); | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring); | ||
| 1047 | |||
| 1048 | out: | ||
| 1049 | return err; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev, | ||
| 1053 | enum xenbus_state be_state) | ||
| 1054 | { | ||
| 1055 | struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); | ||
| 1056 | |||
| 1057 | switch (be_state) { | ||
| 1058 | case XenbusStateUnknown: | ||
| 1059 | case XenbusStateInitialising: | ||
| 1060 | case XenbusStateInitWait: | ||
| 1061 | case XenbusStateInitialised: | ||
| 1062 | case XenbusStateClosed: | ||
| 1063 | break; | ||
| 1064 | |||
| 1065 | case XenbusStateConnected: | ||
| 1066 | pcifront_try_connect(pdev); | ||
| 1067 | break; | ||
| 1068 | |||
| 1069 | case XenbusStateClosing: | ||
| 1070 | dev_warn(&xdev->dev, "backend going away!\n"); | ||
| 1071 | pcifront_try_disconnect(pdev); | ||
| 1072 | break; | ||
| 1073 | |||
| 1074 | case XenbusStateReconfiguring: | ||
| 1075 | pcifront_detach_devices(pdev); | ||
| 1076 | break; | ||
| 1077 | |||
| 1078 | case XenbusStateReconfigured: | ||
| 1079 | pcifront_attach_devices(pdev); | ||
| 1080 | break; | ||
| 1081 | } | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | static int pcifront_xenbus_probe(struct xenbus_device *xdev, | ||
| 1085 | const struct xenbus_device_id *id) | ||
| 1086 | { | ||
| 1087 | int err = 0; | ||
| 1088 | struct pcifront_device *pdev = alloc_pdev(xdev); | ||
| 1089 | |||
| 1090 | if (pdev == NULL) { | ||
| 1091 | err = -ENOMEM; | ||
| 1092 | xenbus_dev_fatal(xdev, err, | ||
| 1093 | "Error allocating pcifront_device struct"); | ||
| 1094 | goto out; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | err = pcifront_publish_info(pdev); | ||
| 1098 | if (err) | ||
| 1099 | free_pdev(pdev); | ||
| 1100 | |||
| 1101 | out: | ||
| 1102 | return err; | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | static int pcifront_xenbus_remove(struct xenbus_device *xdev) | ||
| 1106 | { | ||
| 1107 | struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); | ||
| 1108 | if (pdev) | ||
| 1109 | free_pdev(pdev); | ||
| 1110 | |||
| 1111 | return 0; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | static const struct xenbus_device_id xenpci_ids[] = { | ||
| 1115 | {"pci"}, | ||
| 1116 | {""}, | ||
| 1117 | }; | ||
| 1118 | |||
| 1119 | static struct xenbus_driver xenbus_pcifront_driver = { | ||
| 1120 | .name = "pcifront", | ||
| 1121 | .owner = THIS_MODULE, | ||
| 1122 | .ids = xenpci_ids, | ||
| 1123 | .probe = pcifront_xenbus_probe, | ||
| 1124 | .remove = pcifront_xenbus_remove, | ||
| 1125 | .otherend_changed = pcifront_backend_changed, | ||
| 1126 | }; | ||
| 1127 | |||
| 1128 | static int __init pcifront_init(void) | ||
| 1129 | { | ||
| 1130 | if (!xen_pv_domain() || xen_initial_domain()) | ||
| 1131 | return -ENODEV; | ||
| 1132 | |||
| 1133 | pci_frontend_registrar(1 /* enable */); | ||
| 1134 | |||
| 1135 | return xenbus_register_frontend(&xenbus_pcifront_driver); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | static void __exit pcifront_cleanup(void) | ||
| 1139 | { | ||
| 1140 | xenbus_unregister_driver(&xenbus_pcifront_driver); | ||
| 1141 | pci_frontend_registrar(0 /* disable */); | ||
| 1142 | } | ||
| 1143 | module_init(pcifront_init); | ||
| 1144 | module_exit(pcifront_cleanup); | ||
| 1145 | |||
| 1146 | MODULE_DESCRIPTION("Xen PCI passthrough frontend."); | ||
| 1147 | MODULE_LICENSE("GPL"); | ||
| 1148 | MODULE_ALIAS("xen:pci"); | ||
