aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig10
-rw-r--r--drivers/pci/Makefile6
-rw-r--r--drivers/pci/access.c26
-rw-r--r--drivers/pci/bus.c26
-rw-r--r--drivers/pci/dmar.c379
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c58
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c77
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h167
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1100
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c371
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c97
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c599
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c3
-rw-r--r--drivers/pci/hotplug/fakephp.c444
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c56
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c155
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c21
-rw-r--r--drivers/pci/hotplug/pciehp_core.c130
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c65
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c5
-rw-r--r--drivers/pci/hotplug/shpchp.h10
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/htirq.c5
-rw-r--r--drivers/pci/intel-iommu.c638
-rw-r--r--drivers/pci/intr_remapping.c219
-rw-r--r--drivers/pci/iov.c683
-rw-r--r--drivers/pci/msi.c512
-rw-r--r--drivers/pci/msi.h20
-rw-r--r--drivers/pci/pci-acpi.c219
-rw-r--r--drivers/pci/pci-driver.c261
-rw-r--r--drivers/pci/pci-sysfs.c134
-rw-r--r--drivers/pci/pci.c587
-rw-r--r--drivers/pci/pci.h66
-rw-r--r--drivers/pci/pcie/aer/Kconfig15
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c473
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c31
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c288
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c787
-rw-r--r--drivers/pci/pcie/portdrv.h14
-rw-r--r--drivers/pci/pcie/portdrv_bus.c18
-rw-r--r--drivers/pci/pcie/portdrv_core.c381
-rw-r--r--drivers/pci/pcie/portdrv_pci.c52
-rw-r--r--drivers/pci/probe.c225
-rw-r--r--drivers/pci/quirks.c300
-rw-r--r--drivers/pci/remove.c6
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c66
-rw-r--r--drivers/pci/setup-res.c68
-rw-r--r--drivers/pci/slot.c61
62 files changed, 6277 insertions, 3880 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2a4501dd2515..fdc864f9cf23 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -59,3 +59,13 @@ config HT_IRQ
59 This allows native hypertransport devices to use interrupts. 59 This allows native hypertransport devices to use interrupts.
60 60
61 If unsure say Y. 61 If unsure say Y.
62
63config PCI_IOV
64 bool "PCI IOV support"
65 depends on PCI
66 help
67 I/O Virtualization is a PCI feature supported by some devices
68 which allows them to create virtual devices which share their
69 physical resources.
70
71 If unsure, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d07ce24f6a8..1ebd6b4c743b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
2# Makefile for the PCI bus specific drivers. 2# Makefile for the PCI bus specific drivers.
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o 7 irq.o
8obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
9obj-$(CONFIG_SYSFS) += slot.o
9 10
10# Build PCI Express stuff if needed 11# Build PCI Express stuff if needed
11obj-$(CONFIG_PCIEPORTBUS) += pcie/ 12obj-$(CONFIG_PCIEPORTBUS) += pcie/
@@ -29,6 +30,8 @@ obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
29 30
30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o 31obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
31 32
33obj-$(CONFIG_PCI_IOV) += iov.o
34
32# 35#
33# Some architectures use the generic PCI setup functions 36# Some architectures use the generic PCI setup functions
34# 37#
@@ -37,7 +40,6 @@ obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
37obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o 40obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
38obj-$(CONFIG_PARISC) += setup-bus.o 41obj-$(CONFIG_PARISC) += setup-bus.o
39obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o 42obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
40obj-$(CONFIG_PPC32) += setup-irq.o
41obj-$(CONFIG_PPC) += setup-bus.o 43obj-$(CONFIG_PPC) += setup-bus.o
42obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 44obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
43obj-$(CONFIG_X86_VISWS) += setup-irq.o 45obj-$(CONFIG_X86_VISWS) += setup-irq.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 381444794778..db23200c4874 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
66EXPORT_SYMBOL(pci_bus_write_config_word); 66EXPORT_SYMBOL(pci_bus_write_config_word);
67EXPORT_SYMBOL(pci_bus_write_config_dword); 67EXPORT_SYMBOL(pci_bus_write_config_dword);
68 68
69/**
70 * pci_bus_set_ops - Set raw operations of pci bus
71 * @bus: pci bus struct
72 * @ops: new raw operations
73 *
74 * Return previous raw operations
75 */
76struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
77{
78 struct pci_ops *old_ops;
79 unsigned long flags;
80
81 spin_lock_irqsave(&pci_lock, flags);
82 old_ops = bus->ops;
83 bus->ops = ops;
84 spin_unlock_irqrestore(&pci_lock, flags);
85 return old_ops;
86}
87EXPORT_SYMBOL(pci_bus_set_ops);
69 88
70/** 89/**
71 * pci_read_vpd - Read one entry from Vital Product Data 90 * pci_read_vpd - Read one entry from Vital Product Data
@@ -87,8 +106,8 @@ EXPORT_SYMBOL(pci_read_vpd);
87 * pci_write_vpd - Write entry to Vital Product Data 106 * pci_write_vpd - Write entry to Vital Product Data
88 * @dev: pci device struct 107 * @dev: pci device struct
89 * @pos: offset in vpd space 108 * @pos: offset in vpd space
90 * @count: number of bytes to read 109 * @count: number of bytes to write
91 * @val: value to write 110 * @buf: buffer containing write data
92 * 111 *
93 */ 112 */
94ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) 113ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
@@ -356,7 +375,8 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
356 return -EINVAL; 375 return -EINVAL;
357 376
358 dev->vpd->len = size; 377 dev->vpd->len = size;
359 dev->vpd->attr->size = size; 378 if (dev->vpd->attr)
379 dev->vpd->attr->size = size;
360 380
361 return 0; 381 return 0;
362} 382}
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 52b54f053be0..cef28a79103f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
41 void *alignf_data) 41 void *alignf_data)
42{ 42{
43 int i, ret = -ENOMEM; 43 int i, ret = -ENOMEM;
44 resource_size_t max = -1;
44 45
45 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; 46 type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
46 47
48 /* don't allocate too high if the pref mem doesn't support 64bit*/
49 if (!(res->flags & IORESOURCE_MEM_64))
50 max = PCIBIOS_MAX_MEM_32;
51
47 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 52 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
48 struct resource *r = bus->resource[i]; 53 struct resource *r = bus->resource[i];
49 if (!r) 54 if (!r)
@@ -62,7 +67,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
62 /* Ok, try it out.. */ 67 /* Ok, try it out.. */
63 ret = allocate_resource(r, res, size, 68 ret = allocate_resource(r, res, size,
64 r->start ? : min, 69 r->start ? : min,
65 -1, align, 70 max, align,
66 alignf, alignf_data); 71 alignf, alignf_data);
67 if (ret == 0) 72 if (ret == 0)
68 break; 73 break;
@@ -133,7 +138,7 @@ int pci_bus_add_child(struct pci_bus *bus)
133 * 138 *
134 * Call hotplug for each new devices. 139 * Call hotplug for each new devices.
135 */ 140 */
136void pci_bus_add_devices(struct pci_bus *bus) 141void pci_bus_add_devices(const struct pci_bus *bus)
137{ 142{
138 struct pci_dev *dev; 143 struct pci_dev *dev;
139 struct pci_bus *child; 144 struct pci_bus *child;
@@ -184,8 +189,10 @@ void pci_enable_bridges(struct pci_bus *bus)
184 189
185 list_for_each_entry(dev, &bus->devices, bus_list) { 190 list_for_each_entry(dev, &bus->devices, bus_list) {
186 if (dev->subordinate) { 191 if (dev->subordinate) {
187 retval = pci_enable_device(dev); 192 if (!pci_is_enabled(dev)) {
188 pci_set_master(dev); 193 retval = pci_enable_device(dev);
194 pci_set_master(dev);
195 }
189 pci_enable_bridges(dev->subordinate); 196 pci_enable_bridges(dev->subordinate);
190 } 197 }
191 } 198 }
@@ -199,13 +206,18 @@ void pci_enable_bridges(struct pci_bus *bus)
199 * Walk the given bus, including any bridged devices 206 * Walk the given bus, including any bridged devices
200 * on buses under this bus. Call the provided callback 207 * on buses under this bus. Call the provided callback
201 * on each device found. 208 * on each device found.
209 *
210 * We check the return of @cb each time. If it returns anything
211 * other than 0, we break out.
212 *
202 */ 213 */
203void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), 214void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
204 void *userdata) 215 void *userdata)
205{ 216{
206 struct pci_dev *dev; 217 struct pci_dev *dev;
207 struct pci_bus *bus; 218 struct pci_bus *bus;
208 struct list_head *next; 219 struct list_head *next;
220 int retval;
209 221
210 bus = top; 222 bus = top;
211 down_read(&pci_bus_sem); 223 down_read(&pci_bus_sem);
@@ -229,8 +241,10 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
229 241
230 /* Run device routines with the device locked */ 242 /* Run device routines with the device locked */
231 down(&dev->dev.sem); 243 down(&dev->dev.sem);
232 cb(dev, userdata); 244 retval = cb(dev, userdata);
233 up(&dev->dev.sem); 245 up(&dev->dev.sem);
246 if (retval)
247 break;
234 } 248 }
235 up_read(&pci_bus_sem); 249 up_read(&pci_bus_sem);
236} 250}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 26c536b51c5a..fa3a11365ec3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -31,6 +31,8 @@
31#include <linux/iova.h> 31#include <linux/iova.h>
32#include <linux/intel-iommu.h> 32#include <linux/intel-iommu.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/irq.h>
35#include <linux/interrupt.h>
34 36
35#undef PREFIX 37#undef PREFIX
36#define PREFIX "DMAR:" 38#define PREFIX "DMAR:"
@@ -42,6 +44,7 @@
42LIST_HEAD(dmar_drhd_units); 44LIST_HEAD(dmar_drhd_units);
43 45
44static struct acpi_table_header * __initdata dmar_tbl; 46static struct acpi_table_header * __initdata dmar_tbl;
47static acpi_size dmar_tbl_size;
45 48
46static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 49static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
47{ 50{
@@ -170,13 +173,23 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
170 struct dmar_drhd_unit *dmaru; 173 struct dmar_drhd_unit *dmaru;
171 int ret = 0; 174 int ret = 0;
172 175
176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
185 }
173 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
174 if (!dmaru) 187 if (!dmaru)
175 return -ENOMEM; 188 return -ENOMEM;
176 189
177 dmaru->hdr = header; 190 dmaru->hdr = header;
178 drhd = (struct acpi_dmar_hardware_unit *)header;
179 dmaru->reg_base_addr = drhd->address; 191 dmaru->reg_base_addr = drhd->address;
192 dmaru->segment = drhd->segment;
180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
181 194
182 ret = alloc_iommu(dmaru); 195 ret = alloc_iommu(dmaru);
@@ -288,8 +301,9 @@ static int __init dmar_table_detect(void)
288 acpi_status status = AE_OK; 301 acpi_status status = AE_OK;
289 302
290 /* if we could find DMAR table, then there are DMAR devices */ 303 /* if we could find DMAR table, then there are DMAR devices */
291 status = acpi_get_table(ACPI_SIG_DMAR, 0, 304 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl); 305 (struct acpi_table_header **)&dmar_tbl,
306 &dmar_tbl_size);
293 307
294 if (ACPI_SUCCESS(status) && !dmar_tbl) { 308 if (ACPI_SUCCESS(status) && !dmar_tbl) {
295 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 309 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@@ -489,6 +503,7 @@ void __init detect_intel_iommu(void)
489 iommu_detected = 1; 503 iommu_detected = 1;
490#endif 504#endif
491 } 505 }
506 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
492 dmar_tbl = NULL; 507 dmar_tbl = NULL;
493} 508}
494 509
@@ -506,6 +521,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
506 return -ENOMEM; 521 return -ENOMEM;
507 522
508 iommu->seq_id = iommu_allocated++; 523 iommu->seq_id = iommu_allocated++;
524 sprintf (iommu->name, "dmar%d", iommu->seq_id);
509 525
510 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); 526 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
511 if (!iommu->reg) { 527 if (!iommu->reg) {
@@ -748,14 +764,77 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
748} 764}
749 765
750/* 766/*
767 * Disable Queued Invalidation interface.
768 */
769void dmar_disable_qi(struct intel_iommu *iommu)
770{
771 unsigned long flags;
772 u32 sts;
773 cycles_t start_time = get_cycles();
774
775 if (!ecap_qis(iommu->ecap))
776 return;
777
778 spin_lock_irqsave(&iommu->register_lock, flags);
779
780 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
781 if (!(sts & DMA_GSTS_QIES))
782 goto end;
783
784 /*
785 * Give a chance to HW to complete the pending invalidation requests.
786 */
787 while ((readl(iommu->reg + DMAR_IQT_REG) !=
788 readl(iommu->reg + DMAR_IQH_REG)) &&
789 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
790 cpu_relax();
791
792 iommu->gcmd &= ~DMA_GCMD_QIE;
793
794 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
795
796 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
797 !(sts & DMA_GSTS_QIES), sts);
798end:
799 spin_unlock_irqrestore(&iommu->register_lock, flags);
800}
801
802/*
803 * Enable queued invalidation.
804 */
805static void __dmar_enable_qi(struct intel_iommu *iommu)
806{
807 u32 cmd, sts;
808 unsigned long flags;
809 struct q_inval *qi = iommu->qi;
810
811 qi->free_head = qi->free_tail = 0;
812 qi->free_cnt = QI_LENGTH;
813
814 spin_lock_irqsave(&iommu->register_lock, flags);
815
816 /* write zero to the tail reg */
817 writel(0, iommu->reg + DMAR_IQT_REG);
818
819 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
820
821 cmd = iommu->gcmd | DMA_GCMD_QIE;
822 iommu->gcmd |= DMA_GCMD_QIE;
823 writel(cmd, iommu->reg + DMAR_GCMD_REG);
824
825 /* Make sure hardware complete it */
826 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
827
828 spin_unlock_irqrestore(&iommu->register_lock, flags);
829}
830
831/*
751 * Enable Queued Invalidation interface. This is a must to support 832 * Enable Queued Invalidation interface. This is a must to support
752 * interrupt-remapping. Also used by DMA-remapping, which replaces 833 * interrupt-remapping. Also used by DMA-remapping, which replaces
753 * register based IOTLB invalidation. 834 * register based IOTLB invalidation.
754 */ 835 */
755int dmar_enable_qi(struct intel_iommu *iommu) 836int dmar_enable_qi(struct intel_iommu *iommu)
756{ 837{
757 u32 cmd, sts;
758 unsigned long flags;
759 struct q_inval *qi; 838 struct q_inval *qi;
760 839
761 if (!ecap_qis(iommu->ecap)) 840 if (!ecap_qis(iommu->ecap))
@@ -767,20 +846,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
767 if (iommu->qi) 846 if (iommu->qi)
768 return 0; 847 return 0;
769 848
770 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); 849 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
771 if (!iommu->qi) 850 if (!iommu->qi)
772 return -ENOMEM; 851 return -ENOMEM;
773 852
774 qi = iommu->qi; 853 qi = iommu->qi;
775 854
776 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); 855 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
777 if (!qi->desc) { 856 if (!qi->desc) {
778 kfree(qi); 857 kfree(qi);
779 iommu->qi = 0; 858 iommu->qi = 0;
780 return -ENOMEM; 859 return -ENOMEM;
781 } 860 }
782 861
783 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); 862 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
784 if (!qi->desc_status) { 863 if (!qi->desc_status) {
785 free_page((unsigned long) qi->desc); 864 free_page((unsigned long) qi->desc);
786 kfree(qi); 865 kfree(qi);
@@ -793,19 +872,283 @@ int dmar_enable_qi(struct intel_iommu *iommu)
793 872
794 spin_lock_init(&qi->q_lock); 873 spin_lock_init(&qi->q_lock);
795 874
796 spin_lock_irqsave(&iommu->register_lock, flags); 875 __dmar_enable_qi(iommu);
797 /* write zero to the tail reg */
798 writel(0, iommu->reg + DMAR_IQT_REG);
799 876
800 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); 877 return 0;
878}
801 879
802 cmd = iommu->gcmd | DMA_GCMD_QIE; 880/* iommu interrupt handling. Most stuff are MSI-like. */
803 iommu->gcmd |= DMA_GCMD_QIE;
804 writel(cmd, iommu->reg + DMAR_GCMD_REG);
805 881
806 /* Make sure hardware complete it */ 882enum faulttype {
807 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 883 DMA_REMAP,
808 spin_unlock_irqrestore(&iommu->register_lock, flags); 884 INTR_REMAP,
885 UNKNOWN,
886};
887
888static const char *dma_remap_fault_reasons[] =
889{
890 "Software",
891 "Present bit in root entry is clear",
892 "Present bit in context entry is clear",
893 "Invalid context entry",
894 "Access beyond MGAW",
895 "PTE Write access is not set",
896 "PTE Read access is not set",
897 "Next page table ptr is invalid",
898 "Root table address invalid",
899 "Context table ptr is invalid",
900 "non-zero reserved fields in RTP",
901 "non-zero reserved fields in CTP",
902 "non-zero reserved fields in PTE",
903};
904
905static const char *intr_remap_fault_reasons[] =
906{
907 "Detected reserved fields in the decoded interrupt-remapped request",
908 "Interrupt index exceeded the interrupt-remapping table size",
909 "Present field in the IRTE entry is clear",
910 "Error accessing interrupt-remapping table pointed by IRTA_REG",
911 "Detected reserved fields in the IRTE entry",
912 "Blocked a compatibility format interrupt request",
913 "Blocked an interrupt request due to source-id verification failure",
914};
915
916#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
917
918const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
919{
920 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
921 ARRAY_SIZE(intr_remap_fault_reasons))) {
922 *fault_type = INTR_REMAP;
923 return intr_remap_fault_reasons[fault_reason - 0x20];
924 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
925 *fault_type = DMA_REMAP;
926 return dma_remap_fault_reasons[fault_reason];
927 } else {
928 *fault_type = UNKNOWN;
929 return "Unknown";
930 }
931}
932
933void dmar_msi_unmask(unsigned int irq)
934{
935 struct intel_iommu *iommu = get_irq_data(irq);
936 unsigned long flag;
937
938 /* unmask it */
939 spin_lock_irqsave(&iommu->register_lock, flag);
940 writel(0, iommu->reg + DMAR_FECTL_REG);
941 /* Read a reg to force flush the post write */
942 readl(iommu->reg + DMAR_FECTL_REG);
943 spin_unlock_irqrestore(&iommu->register_lock, flag);
944}
945
946void dmar_msi_mask(unsigned int irq)
947{
948 unsigned long flag;
949 struct intel_iommu *iommu = get_irq_data(irq);
950
951 /* mask it */
952 spin_lock_irqsave(&iommu->register_lock, flag);
953 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
954 /* Read a reg to force flush the post write */
955 readl(iommu->reg + DMAR_FECTL_REG);
956 spin_unlock_irqrestore(&iommu->register_lock, flag);
957}
958
959void dmar_msi_write(int irq, struct msi_msg *msg)
960{
961 struct intel_iommu *iommu = get_irq_data(irq);
962 unsigned long flag;
963
964 spin_lock_irqsave(&iommu->register_lock, flag);
965 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
966 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
967 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
968 spin_unlock_irqrestore(&iommu->register_lock, flag);
969}
970
971void dmar_msi_read(int irq, struct msi_msg *msg)
972{
973 struct intel_iommu *iommu = get_irq_data(irq);
974 unsigned long flag;
975
976 spin_lock_irqsave(&iommu->register_lock, flag);
977 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
978 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
979 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
980 spin_unlock_irqrestore(&iommu->register_lock, flag);
981}
982
983static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
984 u8 fault_reason, u16 source_id, unsigned long long addr)
985{
986 const char *reason;
987 int fault_type;
988
989 reason = dmar_get_fault_reason(fault_reason, &fault_type);
990
991 if (fault_type == INTR_REMAP)
992 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
993 "fault index %llx\n"
994 "INTR-REMAP:[fault reason %02d] %s\n",
995 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
996 PCI_FUNC(source_id & 0xFF), addr >> 48,
997 fault_reason, reason);
998 else
999 printk(KERN_ERR
1000 "DMAR:[%s] Request device [%02x:%02x.%d] "
1001 "fault addr %llx \n"
1002 "DMAR:[fault reason %02d] %s\n",
1003 (type ? "DMA Read" : "DMA Write"),
1004 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1005 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1006 return 0;
1007}
1008
1009#define PRIMARY_FAULT_REG_LEN (16)
1010irqreturn_t dmar_fault(int irq, void *dev_id)
1011{
1012 struct intel_iommu *iommu = dev_id;
1013 int reg, fault_index;
1014 u32 fault_status;
1015 unsigned long flag;
1016
1017 spin_lock_irqsave(&iommu->register_lock, flag);
1018 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1019 if (fault_status)
1020 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1021 fault_status);
1022
1023 /* TBD: ignore advanced fault log currently */
1024 if (!(fault_status & DMA_FSTS_PPF))
1025 goto clear_rest;
1026
1027 fault_index = dma_fsts_fault_record_index(fault_status);
1028 reg = cap_fault_reg_offset(iommu->cap);
1029 while (1) {
1030 u8 fault_reason;
1031 u16 source_id;
1032 u64 guest_addr;
1033 int type;
1034 u32 data;
1035
1036 /* highest 32 bits */
1037 data = readl(iommu->reg + reg +
1038 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1039 if (!(data & DMA_FRCD_F))
1040 break;
1041
1042 fault_reason = dma_frcd_fault_reason(data);
1043 type = dma_frcd_type(data);
1044
1045 data = readl(iommu->reg + reg +
1046 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1047 source_id = dma_frcd_source_id(data);
1048
1049 guest_addr = dmar_readq(iommu->reg + reg +
1050 fault_index * PRIMARY_FAULT_REG_LEN);
1051 guest_addr = dma_frcd_page_addr(guest_addr);
1052 /* clear the fault */
1053 writel(DMA_FRCD_F, iommu->reg + reg +
1054 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1055
1056 spin_unlock_irqrestore(&iommu->register_lock, flag);
1057
1058 dmar_fault_do_one(iommu, type, fault_reason,
1059 source_id, guest_addr);
1060
1061 fault_index++;
1062 if (fault_index > cap_num_fault_regs(iommu->cap))
1063 fault_index = 0;
1064 spin_lock_irqsave(&iommu->register_lock, flag);
1065 }
1066clear_rest:
1067 /* clear all the other faults */
1068 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1069 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1070
1071 spin_unlock_irqrestore(&iommu->register_lock, flag);
1072 return IRQ_HANDLED;
1073}
1074
1075int dmar_set_interrupt(struct intel_iommu *iommu)
1076{
1077 int irq, ret;
1078
1079 /*
1080 * Check if the fault interrupt is already initialized.
1081 */
1082 if (iommu->irq)
1083 return 0;
1084
1085 irq = create_irq();
1086 if (!irq) {
1087 printk(KERN_ERR "IOMMU: no free vectors\n");
1088 return -EINVAL;
1089 }
1090
1091 set_irq_data(irq, iommu);
1092 iommu->irq = irq;
1093
1094 ret = arch_setup_dmar_msi(irq);
1095 if (ret) {
1096 set_irq_data(irq, NULL);
1097 iommu->irq = 0;
1098 destroy_irq(irq);
1099 return 0;
1100 }
1101
1102 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1103 if (ret)
1104 printk(KERN_ERR "IOMMU: can't request irq\n");
1105 return ret;
1106}
1107
1108int __init enable_drhd_fault_handling(void)
1109{
1110 struct dmar_drhd_unit *drhd;
1111
1112 /*
1113 * Enable fault control interrupt.
1114 */
1115 for_each_drhd_unit(drhd) {
1116 int ret;
1117 struct intel_iommu *iommu = drhd->iommu;
1118 ret = dmar_set_interrupt(iommu);
1119
1120 if (ret) {
1121 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1122 " interrupt, ret %d\n",
1123 (unsigned long long)drhd->reg_base_addr, ret);
1124 return -1;
1125 }
1126 }
1127
1128 return 0;
1129}
1130
1131/*
1132 * Re-enable Queued Invalidation interface.
1133 */
1134int dmar_reenable_qi(struct intel_iommu *iommu)
1135{
1136 if (!ecap_qis(iommu->ecap))
1137 return -ENOENT;
1138
1139 if (!iommu->qi)
1140 return -ENOENT;
1141
1142 /*
1143 * First disable queued invalidation.
1144 */
1145 dmar_disable_qi(iommu);
1146 /*
1147 * Then enable queued invalidation again. Since there is no pending
1148 * invalidation requests now, it's safe to re-enable queued
1149 * invalidation.
1150 */
1151 __dmar_enable_qi(iommu);
809 1152
810 return 0; 1153 return 0;
811} 1154}
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe100a0d..66f29bc00be4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig HOTPLUG_PCI 5menuconfig HOTPLUG_PCI
6 tristate "Support for PCI Hotplug" 6 tristate "Support for PCI Hotplug"
7 depends on PCI && HOTPLUG 7 depends on PCI && HOTPLUG && SYSFS
8 ---help--- 8 ---help---
9 Say Y here if you have a motherboard with a PCI Hotplug controller. 9 Say Y here if you have a motherboard with a PCI Hotplug controller.
10 This allows you to add and remove PCI cards while the machine is 10 This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@ config HOTPLUG_PCI_FAKE
41 41
42config HOTPLUG_PCI_COMPAQ 42config HOTPLUG_PCI_COMPAQ
43 tristate "Compaq PCI Hotplug driver" 43 tristate "Compaq PCI Hotplug driver"
44 depends on X86 && PCI_BIOS && PCI_LEGACY 44 depends on X86 && PCI_BIOS
45 help 45 help
46 Say Y here if you have a motherboard with a Compaq PCI Hotplug 46 Say Y here if you have a motherboard with a Compaq PCI Hotplug
47 controller. 47 controller.
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1c1141801060..fbc63d5e459f 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,9 +30,8 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/acpi.h>
33#include <linux/pci-acpi.h> 34#include <linux/pci-acpi.h>
34#include <acpi/acpi.h>
35#include <acpi/acpi_bus.h>
36 35
37#define MY_NAME "acpi_pcihp" 36#define MY_NAME "acpi_pcihp"
38 37
@@ -333,19 +332,14 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
333{ 332{
334 acpi_status status = AE_NOT_FOUND; 333 acpi_status status = AE_NOT_FOUND;
335 acpi_handle handle, phandle; 334 acpi_handle handle, phandle;
336 struct pci_bus *pbus = bus; 335 struct pci_bus *pbus;
337 struct pci_dev *pdev; 336
338 337 handle = NULL;
339 do { 338 for (pbus = bus; pbus; pbus = pbus->parent) {
340 pdev = pbus->self; 339 handle = acpi_pci_get_bridge_handle(pbus);
341 if (!pdev) { 340 if (handle)
342 handle = acpi_get_pci_rootbridge_handle(
343 pci_domain_nr(pbus), pbus->number);
344 break; 341 break;
345 } 342 }
346 handle = DEVICE_ACPI_HANDLE(&(pdev->dev));
347 pbus = pbus->parent;
348 } while (!handle);
349 343
350 /* 344 /*
351 * _HPP settings apply to all child buses, until another _HPP is 345 * _HPP settings apply to all child buses, until another _HPP is
@@ -378,12 +372,10 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
378 * 372 *
379 * Attempt to take hotplug control from firmware. 373 * Attempt to take hotplug control from firmware.
380 */ 374 */
381int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) 375int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
382{ 376{
383 acpi_status status; 377 acpi_status status;
384 acpi_handle chandle, handle; 378 acpi_handle chandle, handle;
385 struct pci_dev *pdev = dev;
386 struct pci_bus *parent;
387 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 379 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
388 380
389 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 381 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
@@ -408,33 +400,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
408 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 400 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
409 dbg("Trying to get hotplug control for %s\n", 401 dbg("Trying to get hotplug control for %s\n",
410 (char *)string.pointer); 402 (char *)string.pointer);
411 status = pci_osc_control_set(handle, flags); 403 status = acpi_pci_osc_control_set(handle, flags);
412 if (ACPI_SUCCESS(status)) 404 if (ACPI_SUCCESS(status))
413 goto got_one; 405 goto got_one;
414 kfree(string.pointer); 406 kfree(string.pointer);
415 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 407 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
416 } 408 }
417 409
418 pdev = dev; 410 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
419 handle = DEVICE_ACPI_HANDLE(&dev->dev); 411 if (!handle) {
420 while (!handle) {
421 /* 412 /*
422 * This hotplug controller was not listed in the ACPI name 413 * This hotplug controller was not listed in the ACPI name
423 * space at all. Try to get acpi handle of parent pci bus. 414 * space at all. Try to get acpi handle of parent pci bus.
424 */ 415 */
425 if (!pdev || !pdev->bus->parent) 416 struct pci_bus *pbus;
426 break; 417 for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
427 parent = pdev->bus->parent; 418 handle = acpi_pci_get_bridge_handle(pbus);
428 dbg("Could not find %s in acpi namespace, trying parent\n", 419 if (handle)
429 pci_name(pdev)); 420 break;
430 if (!parent->self) 421 }
431 /* Parent must be a host bridge */
432 handle = acpi_get_pci_rootbridge_handle(
433 pci_domain_nr(parent),
434 parent->number);
435 else
436 handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
437 pdev = parent->self;
438 } 422 }
439 423
440 while (handle) { 424 while (handle) {
@@ -453,13 +437,13 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
453 } 437 }
454 438
455 dbg("Cannot get control of hotplug hardware for pci %s\n", 439 dbg("Cannot get control of hotplug hardware for pci %s\n",
456 pci_name(dev)); 440 pci_name(pdev));
457 441
458 kfree(string.pointer); 442 kfree(string.pointer);
459 return -ENODEV; 443 return -ENODEV;
460got_one: 444got_one:
461 dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), 445 dbg("Gained control for hotplug HW for pci %s (%s)\n",
462 (char *)string.pointer); 446 pci_name(pdev), (char *)string.pointer);
463 kfree(string.pointer); 447 kfree(string.pointer);
464 return 0; 448 return 0;
465} 449}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 4fc168b70095..e68d5f20ffb3 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -129,7 +129,6 @@ struct acpiphp_func {
129 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */ 129 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
130 130
131 struct list_head sibling; 131 struct list_head sibling;
132 struct pci_dev *pci_dev;
133 struct notifier_block nb; 132 struct notifier_block nb;
134 acpi_handle handle; 133 acpi_handle handle;
135 134
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd261b4..4dd7114964ac 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
77static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 77static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
78 78
79static struct hotplug_slot_ops acpi_hotplug_slot_ops = { 79static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
80 .owner = THIS_MODULE,
81 .enable_slot = enable_slot, 80 .enable_slot = enable_slot,
82 .disable_slot = disable_slot, 81 .disable_slot = disable_slot,
83 .set_attention_status = set_attention_status, 82 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 803d9ddd6e75..3a6064bce561 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -32,12 +32,11 @@
32 32
33/* 33/*
34 * Lifetime rules for pci_dev: 34 * Lifetime rules for pci_dev:
35 * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
36 * when the driver is loaded or when an insertion event occurs. It loses
37 * a refcount when its ejected or the driver unloads.
38 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot() 35 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
39 * when the bridge is scanned and it loses a refcount when the bridge 36 * when the bridge is scanned and it loses a refcount when the bridge
40 * is removed. 37 * is removed.
38 * - When a P2P bridge is present, we elevate the refcount on the subordinate
39 * bus. It loses the refcount when the the driver unloads.
41 */ 40 */
42 41
43#include <linux/init.h> 42#include <linux/init.h>
@@ -128,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
128 unsigned long long adr, sun; 127 unsigned long long adr, sun;
129 int device, function, retval; 128 int device, function, retval;
130 struct pci_bus *pbus = bridge->pci_bus; 129 struct pci_bus *pbus = bridge->pci_bus;
130 struct pci_dev *pdev;
131 131
132 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) 132 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
133 return AE_OK; 133 return AE_OK;
@@ -211,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
211 newfunc->slot = slot; 211 newfunc->slot = slot;
212 list_add_tail(&newfunc->sibling, &slot->funcs); 212 list_add_tail(&newfunc->sibling, &slot->funcs);
213 213
214 /* associate corresponding pci_dev */ 214 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
215 newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); 215 if (pdev) {
216 if (newfunc->pci_dev) {
217 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 216 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
217 pci_dev_put(pdev);
218 } 218 }
219 219
220 if (is_dock_device(handle)) { 220 if (is_dock_device(handle)) {
@@ -440,6 +440,12 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
440 goto err; 440 goto err;
441 } 441 }
442 442
443 /*
444 * Grab a ref to the subordinate PCI bus in case the bus is
445 * removed via PCI core logical hotplug. The ref pins the bus
446 * (which we access during module unload).
447 */
448 get_device(&bridge->pci_bus->dev);
443 spin_lock_init(&bridge->res_lock); 449 spin_lock_init(&bridge->res_lock);
444 450
445 init_bridge_misc(bridge); 451 init_bridge_misc(bridge);
@@ -609,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
609 if (ACPI_FAILURE(status)) 615 if (ACPI_FAILURE(status))
610 err("failed to remove notify handler\n"); 616 err("failed to remove notify handler\n");
611 } 617 }
612 pci_dev_put(func->pci_dev);
613 list_del(list); 618 list_del(list);
614 kfree(func); 619 kfree(func);
615 } 620 }
@@ -619,6 +624,12 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
619 slot = next; 624 slot = next;
620 } 625 }
621 626
627 /*
628 * Only P2P bridges have a pci_dev
629 */
630 if (bridge->pci_dev)
631 put_device(&bridge->pci_bus->dev);
632
622 pci_dev_put(bridge->pci_dev); 633 pci_dev_put(bridge->pci_dev);
623 list_del(&bridge->list); 634 list_del(&bridge->list);
624 kfree(bridge); 635 kfree(bridge);
@@ -1087,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot)
1087 pci_enable_bridges(bus); 1098 pci_enable_bridges(bus);
1088 pci_bus_add_devices(bus); 1099 pci_bus_add_devices(bus);
1089 1100
1090 /* associate pci_dev to our representation */
1091 list_for_each (l, &slot->funcs) { 1101 list_for_each (l, &slot->funcs) {
1092 func = list_entry(l, struct acpiphp_func, sibling); 1102 func = list_entry(l, struct acpiphp_func, sibling);
1093 func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 1103 dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
1094 func->function)); 1104 func->function));
1095 if (!func->pci_dev) 1105 if (!dev)
1096 continue; 1106 continue;
1097 1107
1098 if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE && 1108 if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
1099 func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) 1109 dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
1110 pci_dev_put(dev);
1100 continue; 1111 continue;
1112 }
1101 1113
1102 status = find_p2p_bridge(func->handle, (u32)1, bus, NULL); 1114 status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
1103 if (ACPI_FAILURE(status)) 1115 if (ACPI_FAILURE(status))
1104 warn("find_p2p_bridge failed (error code = 0x%x)\n", 1116 warn("find_p2p_bridge failed (error code = 0x%x)\n",
1105 status); 1117 status);
1118 pci_dev_put(dev);
1106 } 1119 }
1107 1120
1108 slot->flags |= SLOT_ENABLED; 1121 slot->flags |= SLOT_ENABLED;
@@ -1128,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus)
1128 */ 1141 */
1129static int disable_device(struct acpiphp_slot *slot) 1142static int disable_device(struct acpiphp_slot *slot)
1130{ 1143{
1131 int retval = 0;
1132 struct acpiphp_func *func; 1144 struct acpiphp_func *func;
1133 struct list_head *l; 1145 struct pci_dev *pdev;
1134 1146
1135 /* is this slot already disabled? */ 1147 /* is this slot already disabled? */
1136 if (!(slot->flags & SLOT_ENABLED)) 1148 if (!(slot->flags & SLOT_ENABLED))
1137 goto err_exit; 1149 goto err_exit;
1138 1150
1139 list_for_each (l, &slot->funcs) { 1151 list_for_each_entry(func, &slot->funcs, sibling) {
1140 func = list_entry(l, struct acpiphp_func, sibling);
1141
1142 if (func->bridge) { 1152 if (func->bridge) {
1143 /* cleanup p2p bridges under this P2P bridge */ 1153 /* cleanup p2p bridges under this P2P bridge */
1144 cleanup_p2p_bridge(func->bridge->handle, 1154 cleanup_p2p_bridge(func->bridge->handle,
@@ -1146,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot)
1146 func->bridge = NULL; 1156 func->bridge = NULL;
1147 } 1157 }
1148 1158
1149 if (func->pci_dev) { 1159 pdev = pci_get_slot(slot->bridge->pci_bus,
1150 pci_stop_bus_device(func->pci_dev); 1160 PCI_DEVFN(slot->device, func->function));
1151 if (func->pci_dev->subordinate) { 1161 if (pdev) {
1152 disable_bridges(func->pci_dev->subordinate); 1162 pci_stop_bus_device(pdev);
1153 pci_disable_device(func->pci_dev); 1163 if (pdev->subordinate) {
1164 disable_bridges(pdev->subordinate);
1165 pci_disable_device(pdev);
1154 } 1166 }
1167 pci_remove_bus_device(pdev);
1168 pci_dev_put(pdev);
1155 } 1169 }
1156 } 1170 }
1157 1171
1158 list_for_each (l, &slot->funcs) { 1172 list_for_each_entry(func, &slot->funcs, sibling) {
1159 func = list_entry(l, struct acpiphp_func, sibling);
1160
1161 acpiphp_unconfigure_ioapics(func->handle); 1173 acpiphp_unconfigure_ioapics(func->handle);
1162 acpiphp_bus_trim(func->handle); 1174 acpiphp_bus_trim(func->handle);
1163 /* try to remove anyway.
1164 * acpiphp_bus_add might have been failed */
1165
1166 if (!func->pci_dev)
1167 continue;
1168
1169 pci_remove_bus_device(func->pci_dev);
1170 pci_dev_put(func->pci_dev);
1171 func->pci_dev = NULL;
1172 } 1175 }
1173 1176
1174 slot->flags &= (~SLOT_ENABLED); 1177 slot->flags &= (~SLOT_ENABLED);
1175 1178
1176 err_exit: 1179err_exit:
1177 return retval; 1180 return 0;
1178} 1181}
1179 1182
1180 1183
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4feef8c..a5b9f6ae507b 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@ static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
72static int get_latch_status(struct hotplug_slot *slot, u8 * value); 72static int get_latch_status(struct hotplug_slot *slot, u8 * value);
73 73
74static struct hotplug_slot_ops cpci_hotplug_slot_ops = { 74static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
75 .owner = THIS_MODULE,
76 .enable_slot = enable_slot, 75 .enable_slot = enable_slot,
77 .disable_slot = disable_slot, 76 .disable_slot = disable_slot,
78 .set_attention_status = set_attention_status, 77 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f69f73e..53836001d511 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@ struct ctrl_reg { /* offset */
150 150
151/* offsets to the controller registers based on the above structure layout */ 151/* offsets to the controller registers based on the above structure layout */
152enum ctrl_offsets { 152enum ctrl_offsets {
153 SLOT_RST = offsetof(struct ctrl_reg, slot_RST), 153 SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
154 SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable), 154 SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
155 MISC = offsetof(struct ctrl_reg, misc), 155 MISC = offsetof(struct ctrl_reg, misc),
156 LED_CONTROL = offsetof(struct ctrl_reg, led_control), 156 LED_CONTROL = offsetof(struct ctrl_reg, led_control),
157 INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear), 157 INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
158 INT_MASK = offsetof(struct ctrl_reg, int_mask), 158 INT_MASK = offsetof(struct ctrl_reg, int_mask),
159 CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0), 159 CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
160 CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1), 160 CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
161 CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1), 161 CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
162 GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB), 162 GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
163 NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input), 163 NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
164 CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3), 164 CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
165 CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4), 165 CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
166 CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5), 166 CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
167 CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6), 167 CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
168 CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7), 168 CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
169 CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8), 169 CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
170 SLOT_MASK = offsetof(struct ctrl_reg, slot_mask), 170 SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
171 CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9), 171 CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
172 CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10), 172 CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
173 CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11), 173 CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
174 SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR), 174 SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@ struct hrt {
190 u32 reserved2; 190 u32 reserved2;
191} __attribute__ ((packed)); 191} __attribute__ ((packed));
192 192
193/* offsets to the hotplug resource table registers based on the above structure layout */ 193/* offsets to the hotplug resource table registers based on the above
194 * structure layout
195 */
194enum hrt_offsets { 196enum hrt_offsets {
195 SIG0 = offsetof(struct hrt, sig0), 197 SIG0 = offsetof(struct hrt, sig0),
196 SIG1 = offsetof(struct hrt, sig1), 198 SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@ struct slot_rt {
217 u16 pre_mem_length; 219 u16 pre_mem_length;
218} __attribute__ ((packed)); 220} __attribute__ ((packed));
219 221
220/* offsets to the hotplug slot resource table registers based on the above structure layout */ 222/* offsets to the hotplug slot resource table registers based on the above
223 * structure layout
224 */
221enum slot_rt_offsets { 225enum slot_rt_offsets {
222 DEV_FUNC = offsetof(struct slot_rt, dev_func), 226 DEV_FUNC = offsetof(struct slot_rt, dev_func),
223 PRIMARY_BUS = offsetof(struct slot_rt, primary_bus), 227 PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
224 SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus), 228 SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
225 MAX_BUS = offsetof(struct slot_rt, max_bus), 229 MAX_BUS = offsetof(struct slot_rt, max_bus),
226 IO_BASE = offsetof(struct slot_rt, io_base), 230 IO_BASE = offsetof(struct slot_rt, io_base),
227 IO_LENGTH = offsetof(struct slot_rt, io_length), 231 IO_LENGTH = offsetof(struct slot_rt, io_length),
228 MEM_BASE = offsetof(struct slot_rt, mem_base), 232 MEM_BASE = offsetof(struct slot_rt, mem_base),
229 MEM_LENGTH = offsetof(struct slot_rt, mem_length), 233 MEM_LENGTH = offsetof(struct slot_rt, mem_length),
230 PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base), 234 PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
231 PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length), 235 PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
232}; 236};
233 237
234struct pci_func { 238struct pci_func {
@@ -286,8 +290,8 @@ struct event_info {
286struct controller { 290struct controller {
287 struct controller *next; 291 struct controller *next;
288 u32 ctrl_int_comp; 292 u32 ctrl_int_comp;
289 struct mutex crit_sect; /* critical section mutex */ 293 struct mutex crit_sect; /* critical section mutex */
290 void __iomem *hpc_reg; /* cookie for our pci controller location */ 294 void __iomem *hpc_reg; /* cookie for our pci controller location */
291 struct pci_resource *mem_head; 295 struct pci_resource *mem_head;
292 struct pci_resource *p_mem_head; 296 struct pci_resource *p_mem_head;
293 struct pci_resource *io_head; 297 struct pci_resource *io_head;
@@ -299,7 +303,7 @@ struct controller {
299 u8 next_event; 303 u8 next_event;
300 u8 interrupt; 304 u8 interrupt;
301 u8 cfgspc_irq; 305 u8 cfgspc_irq;
302 u8 bus; /* bus number for the pci hotplug controller */ 306 u8 bus; /* bus number for the pci hotplug controller */
303 u8 rev; 307 u8 rev;
304 u8 slot_device_offset; 308 u8 slot_device_offset;
305 u8 first_slot; 309 u8 first_slot;
@@ -401,46 +405,57 @@ struct resource_lists {
401 405
402 406
403/* debugfs functions for the hotplug controller info */ 407/* debugfs functions for the hotplug controller info */
404extern void cpqhp_initialize_debugfs (void); 408extern void cpqhp_initialize_debugfs(void);
405extern void cpqhp_shutdown_debugfs (void); 409extern void cpqhp_shutdown_debugfs(void);
406extern void cpqhp_create_debugfs_files (struct controller *ctrl); 410extern void cpqhp_create_debugfs_files(struct controller *ctrl);
407extern void cpqhp_remove_debugfs_files (struct controller *ctrl); 411extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
408 412
409/* controller functions */ 413/* controller functions */
410extern void cpqhp_pushbutton_thread (unsigned long event_pointer); 414extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
411extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data); 415extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
412extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start); 416extern int cpqhp_find_available_resources(struct controller *ctrl,
413extern int cpqhp_event_start_thread (void); 417 void __iomem *rom_start);
414extern void cpqhp_event_stop_thread (void); 418extern int cpqhp_event_start_thread(void);
415extern struct pci_func *cpqhp_slot_create (unsigned char busnumber); 419extern void cpqhp_event_stop_thread(void);
416extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index); 420extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
417extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func); 421extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
418extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func); 422 unsigned char index);
419extern int cpqhp_hardware_test (struct controller *ctrl, int test_num); 423extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
424extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
425extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
420 426
421/* resource functions */ 427/* resource functions */
422extern int cpqhp_resource_sort_and_combine (struct pci_resource **head); 428extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
423 429
424/* pci functions */ 430/* pci functions */
425extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num); 431extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
426extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot); 432extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
427extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug); 433 u8 slot);
428extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func); 434extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
429extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func); 435 int is_hot_plug);
430extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func); 436extern int cpqhp_save_base_addr_length(struct controller *ctrl,
431extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot); 437 struct pci_func *func);
432extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func); 438extern int cpqhp_save_used_resources(struct controller *ctrl,
433extern void cpqhp_destroy_board_resources (struct pci_func * func); 439 struct pci_func *func);
434extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources); 440extern int cpqhp_configure_board(struct controller *ctrl,
435extern void cpqhp_destroy_resource_list (struct resource_lists * resources); 441 struct pci_func *func);
436extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func); 442extern int cpqhp_save_slot_config(struct controller *ctrl,
437extern int cpqhp_unconfigure_device (struct pci_func* func); 443 struct pci_func *new_slot);
444extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
445extern void cpqhp_destroy_board_resources(struct pci_func *func);
446extern int cpqhp_return_board_resources (struct pci_func *func,
447 struct resource_lists *resources);
448extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
449extern int cpqhp_configure_device(struct controller *ctrl,
450 struct pci_func *func);
451extern int cpqhp_unconfigure_device(struct pci_func *func);
438 452
439/* Global variables */ 453/* Global variables */
440extern int cpqhp_debug; 454extern int cpqhp_debug;
441extern int cpqhp_legacy_mode; 455extern int cpqhp_legacy_mode;
442extern struct controller *cpqhp_ctrl_list; 456extern struct controller *cpqhp_ctrl_list;
443extern struct pci_func *cpqhp_slot_list[256]; 457extern struct pci_func *cpqhp_slot_list[256];
458extern struct irq_routing_table *cpqhp_routing_table;
444 459
445/* these can be gotten rid of, but for debugging they are purty */ 460/* these can be gotten rid of, but for debugging they are purty */
446extern u8 cpqhp_nic_irq; 461extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@ extern u8 cpqhp_disk_irq;
449 464
450/* inline functions */ 465/* inline functions */
451 466
452static inline char *slot_name(struct slot *slot) 467static inline const char *slot_name(struct slot *slot)
453{ 468{
454 return hotplug_slot_name(slot->hotplug_slot); 469 return hotplug_slot_name(slot->hotplug_slot);
455} 470}
@@ -458,9 +473,9 @@ static inline char *slot_name(struct slot *slot)
458 * return_resource 473 * return_resource
459 * 474 *
460 * Puts node back in the resource list pointed to by head 475 * Puts node back in the resource list pointed to by head
461 *
462 */ 476 */
463static inline void return_resource(struct pci_resource **head, struct pci_resource *node) 477static inline void return_resource(struct pci_resource **head,
478 struct pci_resource *node)
464{ 479{
465 if (!node || !head) 480 if (!node || !head)
466 return; 481 return;
@@ -471,7 +486,7 @@ static inline void return_resource(struct pci_resource **head, struct pci_resour
471static inline void set_SOGO(struct controller *ctrl) 486static inline void set_SOGO(struct controller *ctrl)
472{ 487{
473 u16 misc; 488 u16 misc;
474 489
475 misc = readw(ctrl->hpc_reg + MISC); 490 misc = readw(ctrl->hpc_reg + MISC);
476 misc = (misc | 0x0001) & 0xFFFB; 491 misc = (misc | 0x0001) & 0xFFFB;
477 writew(misc, ctrl->hpc_reg + MISC); 492 writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@ static inline void set_SOGO(struct controller *ctrl)
481static inline void amber_LED_on(struct controller *ctrl, u8 slot) 496static inline void amber_LED_on(struct controller *ctrl, u8 slot)
482{ 497{
483 u32 led_control; 498 u32 led_control;
484 499
485 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 500 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
486 led_control |= (0x01010000L << slot); 501 led_control |= (0x01010000L << slot);
487 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 502 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@ static inline void amber_LED_on(struct controller *ctrl, u8 slot)
491static inline void amber_LED_off(struct controller *ctrl, u8 slot) 506static inline void amber_LED_off(struct controller *ctrl, u8 slot)
492{ 507{
493 u32 led_control; 508 u32 led_control;
494 509
495 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 510 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
496 led_control &= ~(0x01010000L << slot); 511 led_control &= ~(0x01010000L << slot);
497 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 512 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
504 519
505 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 520 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
506 led_control &= (0x01010000L << slot); 521 led_control &= (0x01010000L << slot);
507 522
508 return led_control ? 1 : 0; 523 return led_control ? 1 : 0;
509} 524}
510 525
@@ -512,7 +527,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
512static inline void green_LED_on(struct controller *ctrl, u8 slot) 527static inline void green_LED_on(struct controller *ctrl, u8 slot)
513{ 528{
514 u32 led_control; 529 u32 led_control;
515 530
516 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 531 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
517 led_control |= 0x0101L << slot; 532 led_control |= 0x0101L << slot;
518 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 533 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@ static inline void green_LED_on(struct controller *ctrl, u8 slot)
521static inline void green_LED_off(struct controller *ctrl, u8 slot) 536static inline void green_LED_off(struct controller *ctrl, u8 slot)
522{ 537{
523 u32 led_control; 538 u32 led_control;
524 539
525 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 540 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
526 led_control &= ~(0x0101L << slot); 541 led_control &= ~(0x0101L << slot);
527 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 542 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@ static inline void green_LED_off(struct controller *ctrl, u8 slot)
531static inline void green_LED_blink(struct controller *ctrl, u8 slot) 546static inline void green_LED_blink(struct controller *ctrl, u8 slot)
532{ 547{
533 u32 led_control; 548 u32 led_control;
534 549
535 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 550 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
536 led_control &= ~(0x0101L << slot); 551 led_control &= ~(0x0101L << slot);
537 led_control |= (0x0001L << slot); 552 led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@ static inline u8 read_slot_enable(struct controller *ctrl)
575} 590}
576 591
577 592
578/* 593/**
579 * get_controller_speed - find the current frequency/mode of controller. 594 * get_controller_speed - find the current frequency/mode of controller.
580 * 595 *
581 * @ctrl: controller to get frequency/mode for. 596 * @ctrl: controller to get frequency/mode for.
582 * 597 *
583 * Returns controller speed. 598 * Returns controller speed.
584 *
585 */ 599 */
586static inline u8 get_controller_speed(struct controller *ctrl) 600static inline u8 get_controller_speed(struct controller *ctrl)
587{ 601{
588 u8 curr_freq; 602 u8 curr_freq;
589 u16 misc; 603 u16 misc;
590 604
591 if (ctrl->pcix_support) { 605 if (ctrl->pcix_support) {
592 curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ); 606 curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
593 if ((curr_freq & 0xB0) == 0xB0) 607 if ((curr_freq & 0xB0) == 0xB0)
594 return PCI_SPEED_133MHz_PCIX; 608 return PCI_SPEED_133MHz_PCIX;
595 if ((curr_freq & 0xA0) == 0xA0) 609 if ((curr_freq & 0xA0) == 0xA0)
596 return PCI_SPEED_100MHz_PCIX; 610 return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@ static inline u8 get_controller_speed(struct controller *ctrl)
602 return PCI_SPEED_33MHz; 616 return PCI_SPEED_33MHz;
603 } 617 }
604 618
605 misc = readw(ctrl->hpc_reg + MISC); 619 misc = readw(ctrl->hpc_reg + MISC);
606 return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; 620 return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
607} 621}
608
609 622
610/* 623
624/**
611 * get_adapter_speed - find the max supported frequency/mode of adapter. 625 * get_adapter_speed - find the max supported frequency/mode of adapter.
612 * 626 *
613 * @ctrl: hotplug controller. 627 * @ctrl: hotplug controller.
614 * @hp_slot: hotplug slot where adapter is installed. 628 * @hp_slot: hotplug slot where adapter is installed.
615 * 629 *
616 * Returns adapter speed. 630 * Returns adapter speed.
617 *
618 */ 631 */
619static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot) 632static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
620{ 633{
@@ -672,7 +685,8 @@ static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
672} 685}
673 686
674 687
675static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot) 688static inline int cpq_get_latch_status(struct controller *ctrl,
689 struct slot *slot)
676{ 690{
677 u32 status; 691 u32 status;
678 u8 hp_slot; 692 u8 hp_slot;
@@ -687,7 +701,8 @@ static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slo
687} 701}
688 702
689 703
690static inline int get_presence_status(struct controller *ctrl, struct slot *slot) 704static inline int get_presence_status(struct controller *ctrl,
705 struct slot *slot)
691{ 706{
692 int presence_save = 0; 707 int presence_save = 0;
693 u8 hp_slot; 708 u8 hp_slot;
@@ -696,7 +711,8 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
696 hp_slot = slot->device - ctrl->slot_device_offset; 711 hp_slot = slot->device - ctrl->slot_device_offset;
697 712
698 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 713 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
699 presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02; 714 presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
715 >> hp_slot) & 0x02;
700 716
701 return presence_save; 717 return presence_save;
702} 718}
@@ -718,5 +734,12 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
718 return retval; 734 return retval;
719} 735}
720 736
721#endif 737#include <asm/pci_x86.h>
738static inline int cpqhp_routing_table_length(void)
739{
740 BUG_ON(cpqhp_routing_table == NULL);
741 return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
742 sizeof(struct irq_info));
743}
722 744
745#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcbb28a7..075b4f4b6e0d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
25 * Send feedback to <greg@kroah.com> 25 * Send feedback to <greg@kroah.com>
26 * 26 *
27 * Jan 12, 2003 - Added 66/100/133MHz PCI-X support, 27 * Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
28 * Torben Mathiasen <torben.mathiasen@hp.com> 28 * Torben Mathiasen <torben.mathiasen@hp.com>
29 *
30 */ 29 */
31 30
32#include <linux/module.h> 31#include <linux/module.h>
@@ -45,7 +44,6 @@
45 44
46#include "cpqphp.h" 45#include "cpqphp.h"
47#include "cpqphp_nvram.h" 46#include "cpqphp_nvram.h"
48#include <asm/pci_x86.h>
49 47
50 48
51/* Global variables */ 49/* Global variables */
@@ -53,6 +51,7 @@ int cpqhp_debug;
53int cpqhp_legacy_mode; 51int cpqhp_legacy_mode;
54struct controller *cpqhp_ctrl_list; /* = NULL */ 52struct controller *cpqhp_ctrl_list; /* = NULL */
55struct pci_func *cpqhp_slot_list[256]; 53struct pci_func *cpqhp_slot_list[256];
54struct irq_routing_table *cpqhp_routing_table;
56 55
57/* local variables */ 56/* local variables */
58static void __iomem *smbios_table; 57static void __iomem *smbios_table;
@@ -78,33 +77,6 @@ MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
78 77
79#define CPQHPC_MODULE_MINOR 208 78#define CPQHPC_MODULE_MINOR 208
80 79
81static int one_time_init (void);
82static int set_attention_status (struct hotplug_slot *slot, u8 value);
83static int process_SI (struct hotplug_slot *slot);
84static int process_SS (struct hotplug_slot *slot);
85static int hardware_test (struct hotplug_slot *slot, u32 value);
86static int get_power_status (struct hotplug_slot *slot, u8 *value);
87static int get_attention_status (struct hotplug_slot *slot, u8 *value);
88static int get_latch_status (struct hotplug_slot *slot, u8 *value);
89static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
90static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
91static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
92
93static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
94 .owner = THIS_MODULE,
95 .set_attention_status = set_attention_status,
96 .enable_slot = process_SI,
97 .disable_slot = process_SS,
98 .hardware_test = hardware_test,
99 .get_power_status = get_power_status,
100 .get_attention_status = get_attention_status,
101 .get_latch_status = get_latch_status,
102 .get_adapter_status = get_adapter_status,
103 .get_max_bus_speed = get_max_bus_speed,
104 .get_cur_bus_speed = get_cur_bus_speed,
105};
106
107
108static inline int is_slot64bit(struct slot *slot) 80static inline int is_slot64bit(struct slot *slot)
109{ 81{
110 return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0; 82 return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
144 break; 116 break;
145 } 117 }
146 } 118 }
147 119
148 if (!status) 120 if (!status)
149 fp = NULL; 121 fp = NULL;
150 122
@@ -171,7 +143,7 @@ static int init_SERR(struct controller * ctrl)
171 tempdword = ctrl->first_slot; 143 tempdword = ctrl->first_slot;
172 144
173 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; 145 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
174 // Loop through slots 146 /* Loop through slots */
175 while (number_of_slots) { 147 while (number_of_slots) {
176 physical_slot = tempdword; 148 physical_slot = tempdword;
177 writeb(0, ctrl->hpc_reg + SLOT_SERR); 149 writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@ static int init_SERR(struct controller * ctrl)
182 return 0; 154 return 0;
183} 155}
184 156
185 157static int init_cpqhp_routing_table(void)
186/* nice debugging output */
187static int pci_print_IRQ_route (void)
188{ 158{
189 struct irq_routing_table *routing_table;
190 int len; 159 int len;
191 int loop;
192
193 u8 tbus, tdevice, tslot;
194 160
195 routing_table = pcibios_get_irq_routing_table(); 161 cpqhp_routing_table = pcibios_get_irq_routing_table();
196 if (routing_table == NULL) { 162 if (cpqhp_routing_table == NULL)
197 err("No BIOS Routing Table??? Not good\n");
198 return -ENOMEM; 163 return -ENOMEM;
199 }
200 164
201 len = (routing_table->size - sizeof(struct irq_routing_table)) / 165 len = cpqhp_routing_table_length();
202 sizeof(struct irq_info);
203 // Make sure I got at least one entry
204 if (len == 0) { 166 if (len == 0) {
205 kfree(routing_table); 167 kfree(cpqhp_routing_table);
168 cpqhp_routing_table = NULL;
206 return -1; 169 return -1;
207 } 170 }
208 171
209 dbg("bus dev func slot\n"); 172 return 0;
173}
174
175/* nice debugging output */
176static void pci_print_IRQ_route(void)
177{
178 int len;
179 int loop;
180 u8 tbus, tdevice, tslot;
181
182 len = cpqhp_routing_table_length();
210 183
184 dbg("bus dev func slot\n");
211 for (loop = 0; loop < len; ++loop) { 185 for (loop = 0; loop < len; ++loop) {
212 tbus = routing_table->slots[loop].bus; 186 tbus = cpqhp_routing_table->slots[loop].bus;
213 tdevice = routing_table->slots[loop].devfn; 187 tdevice = cpqhp_routing_table->slots[loop].devfn;
214 tslot = routing_table->slots[loop].slot; 188 tslot = cpqhp_routing_table->slots[loop].slot;
215 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); 189 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
216 190
217 } 191 }
218 kfree(routing_table); 192 return;
219 return 0;
220} 193}
221 194
222 195
@@ -242,9 +215,9 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
242 void __iomem *p_max; 215 void __iomem *p_max;
243 216
244 if (!smbios_table || !curr) 217 if (!smbios_table || !curr)
245 return(NULL); 218 return NULL;
246 219
247 // set p_max to the end of the table 220 /* set p_max to the end of the table */
248 p_max = smbios_start + readw(smbios_table + ST_LENGTH); 221 p_max = smbios_start + readw(smbios_table + ST_LENGTH);
249 222
250 p_temp = curr; 223 p_temp = curr;
@@ -253,20 +226,19 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
253 while ((p_temp < p_max) && !bail) { 226 while ((p_temp < p_max) && !bail) {
254 /* Look for the double NULL terminator 227 /* Look for the double NULL terminator
255 * The first condition is the previous byte 228 * The first condition is the previous byte
256 * and the second is the curr */ 229 * and the second is the curr
257 if (!previous_byte && !(readb(p_temp))) { 230 */
231 if (!previous_byte && !(readb(p_temp)))
258 bail = 1; 232 bail = 1;
259 }
260 233
261 previous_byte = readb(p_temp); 234 previous_byte = readb(p_temp);
262 p_temp++; 235 p_temp++;
263 } 236 }
264 237
265 if (p_temp < p_max) { 238 if (p_temp < p_max)
266 return p_temp; 239 return p_temp;
267 } else { 240 else
268 return NULL; 241 return NULL;
269 }
270} 242}
271 243
272 244
@@ -292,21 +264,18 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
292 if (!smbios_table) 264 if (!smbios_table)
293 return NULL; 265 return NULL;
294 266
295 if (!previous) { 267 if (!previous)
296 previous = smbios_start; 268 previous = smbios_start;
297 } else { 269 else
298 previous = get_subsequent_smbios_entry(smbios_start, 270 previous = get_subsequent_smbios_entry(smbios_start,
299 smbios_table, previous); 271 smbios_table, previous);
300 }
301 272
302 while (previous) { 273 while (previous)
303 if (readb(previous + SMBIOS_GENERIC_TYPE) != type) { 274 if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
304 previous = get_subsequent_smbios_entry(smbios_start, 275 previous = get_subsequent_smbios_entry(smbios_start,
305 smbios_table, previous); 276 smbios_table, previous);
306 } else { 277 else
307 break; 278 break;
308 }
309 }
310 279
311 return previous; 280 return previous;
312} 281}
@@ -322,144 +291,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
322 kfree(slot); 291 kfree(slot);
323} 292}
324 293
325#define SLOT_NAME_SIZE 10
326
327static int ctrl_slot_setup(struct controller *ctrl,
328 void __iomem *smbios_start,
329 void __iomem *smbios_table)
330{
331 struct slot *slot;
332 struct hotplug_slot *hotplug_slot;
333 struct hotplug_slot_info *hotplug_slot_info;
334 u8 number_of_slots;
335 u8 slot_device;
336 u8 slot_number;
337 u8 ctrl_slot;
338 u32 tempdword;
339 char name[SLOT_NAME_SIZE];
340 void __iomem *slot_entry= NULL;
341 int result = -ENOMEM;
342
343 dbg("%s\n", __func__);
344
345 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
346
347 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
348 slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
349 slot_number = ctrl->first_slot;
350
351 while (number_of_slots) {
352 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
353 if (!slot)
354 goto error;
355
356 slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
357 GFP_KERNEL);
358 if (!slot->hotplug_slot)
359 goto error_slot;
360 hotplug_slot = slot->hotplug_slot;
361
362 hotplug_slot->info =
363 kzalloc(sizeof(*(hotplug_slot->info)),
364 GFP_KERNEL);
365 if (!hotplug_slot->info)
366 goto error_hpslot;
367 hotplug_slot_info = hotplug_slot->info;
368
369 slot->ctrl = ctrl;
370 slot->bus = ctrl->bus;
371 slot->device = slot_device;
372 slot->number = slot_number;
373 dbg("slot->number = %u\n", slot->number);
374
375 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
376 slot_entry);
377
378 while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
379 slot->number)) {
380 slot_entry = get_SMBIOS_entry(smbios_start,
381 smbios_table, 9, slot_entry);
382 }
383
384 slot->p_sm_slot = slot_entry;
385
386 init_timer(&slot->task_event);
387 slot->task_event.expires = jiffies + 5 * HZ;
388 slot->task_event.function = cpqhp_pushbutton_thread;
389
390 //FIXME: these capabilities aren't used but if they are
391 // they need to be correctly implemented
392 slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
393 slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
394
395 if (is_slot64bit(slot))
396 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
397 if (is_slot66mhz(slot))
398 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
399 if (ctrl->speed == PCI_SPEED_66MHz)
400 slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
401
402 ctrl_slot =
403 slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
404
405 // Check presence
406 slot->capabilities |=
407 ((((~tempdword) >> 23) |
408 ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
409 // Check the switch state
410 slot->capabilities |=
411 ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
412 // Check the slot enable
413 slot->capabilities |=
414 ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
415
416 /* register this slot with the hotplug pci core */
417 hotplug_slot->release = &release_slot;
418 hotplug_slot->private = slot;
419 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
420 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
421
422 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
423 hotplug_slot_info->attention_status =
424 cpq_get_attention_status(ctrl, slot);
425 hotplug_slot_info->latch_status =
426 cpq_get_latch_status(ctrl, slot);
427 hotplug_slot_info->adapter_status =
428 get_presence_status(ctrl, slot);
429
430 dbg("registering bus %d, dev %d, number %d, "
431 "ctrl->slot_device_offset %d, slot %d\n",
432 slot->bus, slot->device,
433 slot->number, ctrl->slot_device_offset,
434 slot_number);
435 result = pci_hp_register(hotplug_slot,
436 ctrl->pci_dev->bus,
437 slot->device,
438 name);
439 if (result) {
440 err("pci_hp_register failed with error %d\n", result);
441 goto error_info;
442 }
443
444 slot->next = ctrl->slot;
445 ctrl->slot = slot;
446
447 number_of_slots--;
448 slot_device++;
449 slot_number++;
450 }
451
452 return 0;
453error_info:
454 kfree(hotplug_slot_info);
455error_hpslot:
456 kfree(hotplug_slot);
457error_slot:
458 kfree(slot);
459error:
460 return result;
461}
462
463static int ctrl_slot_cleanup (struct controller * ctrl) 294static int ctrl_slot_cleanup (struct controller * ctrl)
464{ 295{
465 struct slot *old_slot, *next_slot; 296 struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
476 307
477 cpqhp_remove_debugfs_files(ctrl); 308 cpqhp_remove_debugfs_files(ctrl);
478 309
479 //Free IRQ associated with hot plug device 310 /* Free IRQ associated with hot plug device */
480 free_irq(ctrl->interrupt, ctrl); 311 free_irq(ctrl->interrupt, ctrl);
481 //Unmap the memory 312 /* Unmap the memory */
482 iounmap(ctrl->hpc_reg); 313 iounmap(ctrl->hpc_reg);
483 //Finally reclaim PCI mem 314 /* Finally reclaim PCI mem */
484 release_mem_region(pci_resource_start(ctrl->pci_dev, 0), 315 release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
485 pci_resource_len(ctrl->pci_dev, 0)); 316 pci_resource_len(ctrl->pci_dev, 0));
486 317
487 return(0); 318 return 0;
488} 319}
489 320
490 321
491//============================================================================ 322/**
492// function: get_slot_mapping 323 * get_slot_mapping - determine logical slot mapping for PCI device
493// 324 *
494// Description: Attempts to determine a logical slot mapping for a PCI 325 * Won't work for more than one PCI-PCI bridge in a slot.
495// device. Won't work for more than one PCI-PCI bridge 326 *
496// in a slot. 327 * @bus_num - bus number of PCI device
497// 328 * @dev_num - device number of PCI device
498// Input: u8 bus_num - bus number of PCI device 329 * @slot - Pointer to u8 where slot number will be returned
499// u8 dev_num - device number of PCI device 330 *
500// u8 *slot - Pointer to u8 where slot number will 331 * Output: SUCCESS or FAILURE
501// be returned 332 */
502//
503// Output: SUCCESS or FAILURE
504//=============================================================================
505static int 333static int
506get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot) 334get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
507{ 335{
508 struct irq_routing_table *PCIIRQRoutingInfoLength;
509 u32 work; 336 u32 work;
510 long len; 337 long len;
511 long loop; 338 long loop;
@@ -516,36 +343,25 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
516 343
517 bridgeSlot = 0xFF; 344 bridgeSlot = 0xFF;
518 345
519 PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table(); 346 len = cpqhp_routing_table_length();
520 if (!PCIIRQRoutingInfoLength)
521 return -1;
522
523 len = (PCIIRQRoutingInfoLength->size -
524 sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
525 // Make sure I got at least one entry
526 if (len == 0) {
527 kfree(PCIIRQRoutingInfoLength);
528 return -1;
529 }
530
531 for (loop = 0; loop < len; ++loop) { 347 for (loop = 0; loop < len; ++loop) {
532 tbus = PCIIRQRoutingInfoLength->slots[loop].bus; 348 tbus = cpqhp_routing_table->slots[loop].bus;
533 tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3; 349 tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
534 tslot = PCIIRQRoutingInfoLength->slots[loop].slot; 350 tslot = cpqhp_routing_table->slots[loop].slot;
535 351
536 if ((tbus == bus_num) && (tdevice == dev_num)) { 352 if ((tbus == bus_num) && (tdevice == dev_num)) {
537 *slot = tslot; 353 *slot = tslot;
538 kfree(PCIIRQRoutingInfoLength);
539 return 0; 354 return 0;
540 } else { 355 } else {
541 /* Did not get a match on the target PCI device. Check 356 /* Did not get a match on the target PCI device. Check
542 * if the current IRQ table entry is a PCI-to-PCI bridge 357 * if the current IRQ table entry is a PCI-to-PCI
543 * device. If so, and it's secondary bus matches the 358 * bridge device. If so, and it's secondary bus
544 * bus number for the target device, I need to save the 359 * matches the bus number for the target device, I need
545 * bridge's slot number. If I can not find an entry for 360 * to save the bridge's slot number. If I can not find
546 * the target device, I will have to assume it's on the 361 * an entry for the target device, I will have to
547 * other side of the bridge, and assign it the bridge's 362 * assume it's on the other side of the bridge, and
548 * slot. */ 363 * assign it the bridge's slot.
364 */
549 bus->number = tbus; 365 bus->number = tbus;
550 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0), 366 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
551 PCI_CLASS_REVISION, &work); 367 PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
555 PCI_DEVFN(tdevice, 0), 371 PCI_DEVFN(tdevice, 0),
556 PCI_PRIMARY_BUS, &work); 372 PCI_PRIMARY_BUS, &work);
557 // See if bridge's secondary bus matches target bus. 373 // See if bridge's secondary bus matches target bus.
558 if (((work >> 8) & 0x000000FF) == (long) bus_num) { 374 if (((work >> 8) & 0x000000FF) == (long) bus_num)
559 bridgeSlot = tslot; 375 bridgeSlot = tslot;
560 }
561 } 376 }
562 } 377 }
563 378
564 } 379 }
565 380
566 // If we got here, we didn't find an entry in the IRQ mapping table 381 /* If we got here, we didn't find an entry in the IRQ mapping table for
567 // for the target PCI device. If we did determine that the target 382 * the target PCI device. If we did determine that the target device
568 // device is on the other side of a PCI-to-PCI bridge, return the 383 * is on the other side of a PCI-to-PCI bridge, return the slot number
569 // slot number for the bridge. 384 * for the bridge.
385 */
570 if (bridgeSlot != 0xFF) { 386 if (bridgeSlot != 0xFF) {
571 *slot = bridgeSlot; 387 *slot = bridgeSlot;
572 kfree(PCIIRQRoutingInfoLength);
573 return 0; 388 return 0;
574 } 389 }
575 kfree(PCIIRQRoutingInfoLength); 390 /* Couldn't find an entry in the routing table for this PCI device */
576 // Couldn't find an entry in the routing table for this PCI device
577 return -1; 391 return -1;
578} 392}
579 393
@@ -591,32 +405,32 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
591 u8 hp_slot; 405 u8 hp_slot;
592 406
593 if (func == NULL) 407 if (func == NULL)
594 return(1); 408 return 1;
595 409
596 hp_slot = func->device - ctrl->slot_device_offset; 410 hp_slot = func->device - ctrl->slot_device_offset;
597 411
598 // Wait for exclusive access to hardware 412 /* Wait for exclusive access to hardware */
599 mutex_lock(&ctrl->crit_sect); 413 mutex_lock(&ctrl->crit_sect);
600 414
601 if (status == 1) { 415 if (status == 1)
602 amber_LED_on (ctrl, hp_slot); 416 amber_LED_on (ctrl, hp_slot);
603 } else if (status == 0) { 417 else if (status == 0)
604 amber_LED_off (ctrl, hp_slot); 418 amber_LED_off (ctrl, hp_slot);
605 } else { 419 else {
606 // Done with exclusive hardware access 420 /* Done with exclusive hardware access */
607 mutex_unlock(&ctrl->crit_sect); 421 mutex_unlock(&ctrl->crit_sect);
608 return(1); 422 return 1;
609 } 423 }
610 424
611 set_SOGO(ctrl); 425 set_SOGO(ctrl);
612 426
613 // Wait for SOBS to be unset 427 /* Wait for SOBS to be unset */
614 wait_for_ctrl_irq (ctrl); 428 wait_for_ctrl_irq (ctrl);
615 429
616 // Done with exclusive hardware access 430 /* Done with exclusive hardware access */
617 mutex_unlock(&ctrl->crit_sect); 431 mutex_unlock(&ctrl->crit_sect);
618 432
619 return(0); 433 return 0;
620} 434}
621 435
622 436
@@ -719,7 +533,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
719 533
720 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 534 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
721 535
722 return cpqhp_hardware_test(ctrl, value); 536 return cpqhp_hardware_test(ctrl, value);
723} 537}
724 538
725 539
@@ -738,7 +552,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
738{ 552{
739 struct slot *slot = hotplug_slot->private; 553 struct slot *slot = hotplug_slot->private;
740 struct controller *ctrl = slot->ctrl; 554 struct controller *ctrl = slot->ctrl;
741 555
742 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 556 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
743 557
744 *value = cpq_get_attention_status(ctrl, slot); 558 *value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
793 return 0; 607 return 0;
794} 608}
795 609
610static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
611 .set_attention_status = set_attention_status,
612 .enable_slot = process_SI,
613 .disable_slot = process_SS,
614 .hardware_test = hardware_test,
615 .get_power_status = get_power_status,
616 .get_attention_status = get_attention_status,
617 .get_latch_status = get_latch_status,
618 .get_adapter_status = get_adapter_status,
619 .get_max_bus_speed = get_max_bus_speed,
620 .get_cur_bus_speed = get_cur_bus_speed,
621};
622
623#define SLOT_NAME_SIZE 10
624
625static int ctrl_slot_setup(struct controller *ctrl,
626 void __iomem *smbios_start,
627 void __iomem *smbios_table)
628{
629 struct slot *slot;
630 struct hotplug_slot *hotplug_slot;
631 struct hotplug_slot_info *hotplug_slot_info;
632 u8 number_of_slots;
633 u8 slot_device;
634 u8 slot_number;
635 u8 ctrl_slot;
636 u32 tempdword;
637 char name[SLOT_NAME_SIZE];
638 void __iomem *slot_entry= NULL;
639 int result = -ENOMEM;
640
641 dbg("%s\n", __func__);
642
643 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
644
645 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
646 slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
647 slot_number = ctrl->first_slot;
648
649 while (number_of_slots) {
650 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
651 if (!slot)
652 goto error;
653
654 slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
655 GFP_KERNEL);
656 if (!slot->hotplug_slot)
657 goto error_slot;
658 hotplug_slot = slot->hotplug_slot;
659
660 hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
661 GFP_KERNEL);
662 if (!hotplug_slot->info)
663 goto error_hpslot;
664 hotplug_slot_info = hotplug_slot->info;
665
666 slot->ctrl = ctrl;
667 slot->bus = ctrl->bus;
668 slot->device = slot_device;
669 slot->number = slot_number;
670 dbg("slot->number = %u\n", slot->number);
671
672 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
673 slot_entry);
674
675 while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
676 slot->number)) {
677 slot_entry = get_SMBIOS_entry(smbios_start,
678 smbios_table, 9, slot_entry);
679 }
680
681 slot->p_sm_slot = slot_entry;
682
683 init_timer(&slot->task_event);
684 slot->task_event.expires = jiffies + 5 * HZ;
685 slot->task_event.function = cpqhp_pushbutton_thread;
686
687 /*FIXME: these capabilities aren't used but if they are
688 * they need to be correctly implemented
689 */
690 slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
691 slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
692
693 if (is_slot64bit(slot))
694 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
695 if (is_slot66mhz(slot))
696 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
697 if (ctrl->speed == PCI_SPEED_66MHz)
698 slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
699
700 ctrl_slot =
701 slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
702
703 /* Check presence */
704 slot->capabilities |=
705 ((((~tempdword) >> 23) |
706 ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
707 /* Check the switch state */
708 slot->capabilities |=
709 ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
710 /* Check the slot enable */
711 slot->capabilities |=
712 ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
713
714 /* register this slot with the hotplug pci core */
715 hotplug_slot->release = &release_slot;
716 hotplug_slot->private = slot;
717 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
718 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
719
720 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
721 hotplug_slot_info->attention_status =
722 cpq_get_attention_status(ctrl, slot);
723 hotplug_slot_info->latch_status =
724 cpq_get_latch_status(ctrl, slot);
725 hotplug_slot_info->adapter_status =
726 get_presence_status(ctrl, slot);
727
728 dbg("registering bus %d, dev %d, number %d, "
729 "ctrl->slot_device_offset %d, slot %d\n",
730 slot->bus, slot->device,
731 slot->number, ctrl->slot_device_offset,
732 slot_number);
733 result = pci_hp_register(hotplug_slot,
734 ctrl->pci_dev->bus,
735 slot->device,
736 name);
737 if (result) {
738 err("pci_hp_register failed with error %d\n", result);
739 goto error_info;
740 }
741
742 slot->next = ctrl->slot;
743 ctrl->slot = slot;
744
745 number_of_slots--;
746 slot_device++;
747 slot_number++;
748 }
749
750 return 0;
751error_info:
752 kfree(hotplug_slot_info);
753error_hpslot:
754 kfree(hotplug_slot);
755error_slot:
756 kfree(slot);
757error:
758 return result;
759}
760
761static int one_time_init(void)
762{
763 int loop;
764 int retval = 0;
765
766 if (initialized)
767 return 0;
768
769 power_mode = 0;
770
771 retval = init_cpqhp_routing_table();
772 if (retval)
773 goto error;
774
775 if (cpqhp_debug)
776 pci_print_IRQ_route();
777
778 dbg("Initialize + Start the notification mechanism \n");
779
780 retval = cpqhp_event_start_thread();
781 if (retval)
782 goto error;
783
784 dbg("Initialize slot lists\n");
785 for (loop = 0; loop < 256; loop++)
786 cpqhp_slot_list[loop] = NULL;
787
788 /* FIXME: We also need to hook the NMI handler eventually.
789 * this also needs to be worked with Christoph
790 * register_NMI_handler();
791 */
792 /* Map rom address */
793 cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
794 if (!cpqhp_rom_start) {
795 err ("Could not ioremap memory region for ROM\n");
796 retval = -EIO;
797 goto error;
798 }
799
800 /* Now, map the int15 entry point if we are on compaq specific
801 * hardware
802 */
803 compaq_nvram_init(cpqhp_rom_start);
804
805 /* Map smbios table entry point structure */
806 smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
807 cpqhp_rom_start + ROM_PHY_LEN);
808 if (!smbios_table) {
809 err ("Could not find the SMBIOS pointer in memory\n");
810 retval = -EIO;
811 goto error_rom_start;
812 }
813
814 smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
815 readw(smbios_table + ST_LENGTH));
816 if (!smbios_start) {
817 err ("Could not ioremap memory region taken from SMBIOS values\n");
818 retval = -EIO;
819 goto error_smbios_start;
820 }
821
822 initialized = 1;
823
824 return retval;
825
826error_smbios_start:
827 iounmap(smbios_start);
828error_rom_start:
829 iounmap(cpqhp_rom_start);
830error:
831 return retval;
832}
833
796static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 834static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
797{ 835{
798 u8 num_of_slots = 0; 836 u8 num_of_slots = 0;
@@ -815,7 +853,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
815 return err; 853 return err;
816 } 854 }
817 855
818 // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery 856 /* Need to read VID early b/c it's used to differentiate CPQ and INTC
857 * discovery
858 */
819 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id); 859 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
820 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) { 860 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
821 err(msg_HPC_non_compaq_or_intel); 861 err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
832 } 872 }
833 873
834 /* Check for the proper subsytem ID's 874 /* Check for the proper subsytem ID's
835 * Intel uses a different SSID programming model than Compaq. 875 * Intel uses a different SSID programming model than Compaq.
836 * For Intel, each SSID bit identifies a PHP capability. 876 * For Intel, each SSID bit identifies a PHP capability.
837 * Also Intel HPC's may have RID=0. 877 * Also Intel HPC's may have RID=0.
838 */ 878 */
839 if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) { 879 if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
840 // TODO: This code can be made to support non-Compaq or Intel subsystem IDs 880 err(msg_HPC_not_supported);
841 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid); 881 return -ENODEV;
842 if (rc) { 882 }
843 err("%s : pci_read_config_word failed\n", __func__);
844 goto err_disable_device;
845 }
846 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
847 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
848 err(msg_HPC_non_compaq_or_intel);
849 rc = -ENODEV;
850 goto err_disable_device;
851 }
852 883
853 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL); 884 /* TODO: This code can be made to support non-Compaq or Intel
854 if (!ctrl) { 885 * subsystem IDs
855 err("%s : out of memory\n", __func__); 886 */
856 rc = -ENOMEM; 887 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
857 goto err_disable_device; 888 if (rc) {
858 } 889 err("%s : pci_read_config_word failed\n", __func__);
890 goto err_disable_device;
891 }
892 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
893 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
894 err(msg_HPC_non_compaq_or_intel);
895 rc = -ENODEV;
896 goto err_disable_device;
897 }
859 898
860 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid); 899 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
861 if (rc) { 900 if (!ctrl) {
862 err("%s : pci_read_config_word failed\n", __func__); 901 err("%s : out of memory\n", __func__);
863 goto err_free_ctrl; 902 rc = -ENOMEM;
864 } 903 goto err_disable_device;
904 }
865 905
866 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid); 906 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
867 907 if (rc) {
868 /* Set Vendor ID, so it can be accessed later from other functions */ 908 err("%s : pci_read_config_word failed\n", __func__);
869 ctrl->vendor_id = vendor_id; 909 goto err_free_ctrl;
870 910 }
871 switch (subsystem_vid) {
872 case PCI_VENDOR_ID_COMPAQ:
873 if (pdev->revision >= 0x13) { /* CIOBX */
874 ctrl->push_flag = 1;
875 ctrl->slot_switch_type = 1;
876 ctrl->push_button = 1;
877 ctrl->pci_config_space = 1;
878 ctrl->defeature_PHP = 1;
879 ctrl->pcix_support = 1;
880 ctrl->pcix_speed_capability = 1;
881 pci_read_config_byte(pdev, 0x41, &bus_cap);
882 if (bus_cap & 0x80) {
883 dbg("bus max supports 133MHz PCI-X\n");
884 ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
885 break;
886 }
887 if (bus_cap & 0x40) {
888 dbg("bus max supports 100MHz PCI-X\n");
889 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
890 break;
891 }
892 if (bus_cap & 20) {
893 dbg("bus max supports 66MHz PCI-X\n");
894 ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
895 break;
896 }
897 if (bus_cap & 10) {
898 dbg("bus max supports 66MHz PCI\n");
899 ctrl->speed_capability = PCI_SPEED_66MHz;
900 break;
901 }
902
903 break;
904 }
905
906 switch (subsystem_deviceid) {
907 case PCI_SUB_HPC_ID:
908 /* Original 6500/7000 implementation */
909 ctrl->slot_switch_type = 1;
910 ctrl->speed_capability = PCI_SPEED_33MHz;
911 ctrl->push_button = 0;
912 ctrl->pci_config_space = 1;
913 ctrl->defeature_PHP = 1;
914 ctrl->pcix_support = 0;
915 ctrl->pcix_speed_capability = 0;
916 break;
917 case PCI_SUB_HPC_ID2:
918 /* First Pushbutton implementation */
919 ctrl->push_flag = 1;
920 ctrl->slot_switch_type = 1;
921 ctrl->speed_capability = PCI_SPEED_33MHz;
922 ctrl->push_button = 1;
923 ctrl->pci_config_space = 1;
924 ctrl->defeature_PHP = 1;
925 ctrl->pcix_support = 0;
926 ctrl->pcix_speed_capability = 0;
927 break;
928 case PCI_SUB_HPC_ID_INTC:
929 /* Third party (6500/7000) */
930 ctrl->slot_switch_type = 1;
931 ctrl->speed_capability = PCI_SPEED_33MHz;
932 ctrl->push_button = 0;
933 ctrl->pci_config_space = 1;
934 ctrl->defeature_PHP = 1;
935 ctrl->pcix_support = 0;
936 ctrl->pcix_speed_capability = 0;
937 break;
938 case PCI_SUB_HPC_ID3:
939 /* First 66 Mhz implementation */
940 ctrl->push_flag = 1;
941 ctrl->slot_switch_type = 1;
942 ctrl->speed_capability = PCI_SPEED_66MHz;
943 ctrl->push_button = 1;
944 ctrl->pci_config_space = 1;
945 ctrl->defeature_PHP = 1;
946 ctrl->pcix_support = 0;
947 ctrl->pcix_speed_capability = 0;
948 break;
949 case PCI_SUB_HPC_ID4:
950 /* First PCI-X implementation, 100MHz */
951 ctrl->push_flag = 1;
952 ctrl->slot_switch_type = 1;
953 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
954 ctrl->push_button = 1;
955 ctrl->pci_config_space = 1;
956 ctrl->defeature_PHP = 1;
957 ctrl->pcix_support = 1;
958 ctrl->pcix_speed_capability = 0;
959 break;
960 default:
961 err(msg_HPC_not_supported);
962 rc = -ENODEV;
963 goto err_free_ctrl;
964 }
965 break;
966 911
967 case PCI_VENDOR_ID_INTEL: 912 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
968 /* Check for speed capability (0=33, 1=66) */ 913
969 if (subsystem_deviceid & 0x0001) { 914 /* Set Vendor ID, so it can be accessed later from other
970 ctrl->speed_capability = PCI_SPEED_66MHz; 915 * functions
971 } else { 916 */
972 ctrl->speed_capability = PCI_SPEED_33MHz; 917 ctrl->vendor_id = vendor_id;
973 } 918
974 919 switch (subsystem_vid) {
975 /* Check for push button */ 920 case PCI_VENDOR_ID_COMPAQ:
976 if (subsystem_deviceid & 0x0002) { 921 if (pdev->revision >= 0x13) { /* CIOBX */
977 /* no push button */ 922 ctrl->push_flag = 1;
978 ctrl->push_button = 0; 923 ctrl->slot_switch_type = 1;
979 } else { 924 ctrl->push_button = 1;
980 /* push button supported */ 925 ctrl->pci_config_space = 1;
981 ctrl->push_button = 1; 926 ctrl->defeature_PHP = 1;
982 } 927 ctrl->pcix_support = 1;
983 928 ctrl->pcix_speed_capability = 1;
984 /* Check for slot switch type (0=mechanical, 1=not mechanical) */ 929 pci_read_config_byte(pdev, 0x41, &bus_cap);
985 if (subsystem_deviceid & 0x0004) { 930 if (bus_cap & 0x80) {
986 /* no switch */ 931 dbg("bus max supports 133MHz PCI-X\n");
987 ctrl->slot_switch_type = 0; 932 ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
988 } else {
989 /* switch */
990 ctrl->slot_switch_type = 1;
991 }
992
993 /* PHP Status (0=De-feature PHP, 1=Normal operation) */
994 if (subsystem_deviceid & 0x0008) {
995 ctrl->defeature_PHP = 1; // PHP supported
996 } else {
997 ctrl->defeature_PHP = 0; // PHP not supported
998 }
999
1000 /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
1001 if (subsystem_deviceid & 0x0010) {
1002 ctrl->alternate_base_address = 1; // supported
1003 } else {
1004 ctrl->alternate_base_address = 0; // not supported
1005 }
1006
1007 /* PCI Config Space Index (0=not supported, 1=supported) */
1008 if (subsystem_deviceid & 0x0020) {
1009 ctrl->pci_config_space = 1; // supported
1010 } else {
1011 ctrl->pci_config_space = 0; // not supported
1012 }
1013
1014 /* PCI-X support */
1015 if (subsystem_deviceid & 0x0080) {
1016 /* PCI-X capable */
1017 ctrl->pcix_support = 1;
1018 /* Frequency of operation in PCI-X mode */
1019 if (subsystem_deviceid & 0x0040) {
1020 /* 133MHz PCI-X if bit 7 is 1 */
1021 ctrl->pcix_speed_capability = 1;
1022 } else {
1023 /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
1024 /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
1025 ctrl->pcix_speed_capability = 0;
1026 }
1027 } else {
1028 /* Conventional PCI */
1029 ctrl->pcix_support = 0;
1030 ctrl->pcix_speed_capability = 0;
1031 }
1032 break; 933 break;
934 }
935 if (bus_cap & 0x40) {
936 dbg("bus max supports 100MHz PCI-X\n");
937 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
938 break;
939 }
940 if (bus_cap & 20) {
941 dbg("bus max supports 66MHz PCI-X\n");
942 ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
943 break;
944 }
945 if (bus_cap & 10) {
946 dbg("bus max supports 66MHz PCI\n");
947 ctrl->speed_capability = PCI_SPEED_66MHz;
948 break;
949 }
950
951 break;
952 }
1033 953
1034 default: 954 switch (subsystem_deviceid) {
1035 err(msg_HPC_not_supported); 955 case PCI_SUB_HPC_ID:
1036 rc = -ENODEV; 956 /* Original 6500/7000 implementation */
1037 goto err_free_ctrl; 957 ctrl->slot_switch_type = 1;
958 ctrl->speed_capability = PCI_SPEED_33MHz;
959 ctrl->push_button = 0;
960 ctrl->pci_config_space = 1;
961 ctrl->defeature_PHP = 1;
962 ctrl->pcix_support = 0;
963 ctrl->pcix_speed_capability = 0;
964 break;
965 case PCI_SUB_HPC_ID2:
966 /* First Pushbutton implementation */
967 ctrl->push_flag = 1;
968 ctrl->slot_switch_type = 1;
969 ctrl->speed_capability = PCI_SPEED_33MHz;
970 ctrl->push_button = 1;
971 ctrl->pci_config_space = 1;
972 ctrl->defeature_PHP = 1;
973 ctrl->pcix_support = 0;
974 ctrl->pcix_speed_capability = 0;
975 break;
976 case PCI_SUB_HPC_ID_INTC:
977 /* Third party (6500/7000) */
978 ctrl->slot_switch_type = 1;
979 ctrl->speed_capability = PCI_SPEED_33MHz;
980 ctrl->push_button = 0;
981 ctrl->pci_config_space = 1;
982 ctrl->defeature_PHP = 1;
983 ctrl->pcix_support = 0;
984 ctrl->pcix_speed_capability = 0;
985 break;
986 case PCI_SUB_HPC_ID3:
987 /* First 66 Mhz implementation */
988 ctrl->push_flag = 1;
989 ctrl->slot_switch_type = 1;
990 ctrl->speed_capability = PCI_SPEED_66MHz;
991 ctrl->push_button = 1;
992 ctrl->pci_config_space = 1;
993 ctrl->defeature_PHP = 1;
994 ctrl->pcix_support = 0;
995 ctrl->pcix_speed_capability = 0;
996 break;
997 case PCI_SUB_HPC_ID4:
998 /* First PCI-X implementation, 100MHz */
999 ctrl->push_flag = 1;
1000 ctrl->slot_switch_type = 1;
1001 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
1002 ctrl->push_button = 1;
1003 ctrl->pci_config_space = 1;
1004 ctrl->defeature_PHP = 1;
1005 ctrl->pcix_support = 1;
1006 ctrl->pcix_speed_capability = 0;
1007 break;
1008 default:
1009 err(msg_HPC_not_supported);
1010 rc = -ENODEV;
1011 goto err_free_ctrl;
1038 } 1012 }
1013 break;
1014
1015 case PCI_VENDOR_ID_INTEL:
1016 /* Check for speed capability (0=33, 1=66) */
1017 if (subsystem_deviceid & 0x0001)
1018 ctrl->speed_capability = PCI_SPEED_66MHz;
1019 else
1020 ctrl->speed_capability = PCI_SPEED_33MHz;
1021
1022 /* Check for push button */
1023 if (subsystem_deviceid & 0x0002)
1024 ctrl->push_button = 0;
1025 else
1026 ctrl->push_button = 1;
1027
1028 /* Check for slot switch type (0=mechanical, 1=not mechanical) */
1029 if (subsystem_deviceid & 0x0004)
1030 ctrl->slot_switch_type = 0;
1031 else
1032 ctrl->slot_switch_type = 1;
1033
1034 /* PHP Status (0=De-feature PHP, 1=Normal operation) */
1035 if (subsystem_deviceid & 0x0008)
1036 ctrl->defeature_PHP = 1; /* PHP supported */
1037 else
1038 ctrl->defeature_PHP = 0; /* PHP not supported */
1039
1040 /* Alternate Base Address Register Interface
1041 * (0=not supported, 1=supported)
1042 */
1043 if (subsystem_deviceid & 0x0010)
1044 ctrl->alternate_base_address = 1;
1045 else
1046 ctrl->alternate_base_address = 0;
1047
1048 /* PCI Config Space Index (0=not supported, 1=supported) */
1049 if (subsystem_deviceid & 0x0020)
1050 ctrl->pci_config_space = 1;
1051 else
1052 ctrl->pci_config_space = 0;
1053
1054 /* PCI-X support */
1055 if (subsystem_deviceid & 0x0080) {
1056 ctrl->pcix_support = 1;
1057 if (subsystem_deviceid & 0x0040)
1058 /* 133MHz PCI-X if bit 7 is 1 */
1059 ctrl->pcix_speed_capability = 1;
1060 else
1061 /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
1062 /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
1063 ctrl->pcix_speed_capability = 0;
1064 } else {
1065 /* Conventional PCI */
1066 ctrl->pcix_support = 0;
1067 ctrl->pcix_speed_capability = 0;
1068 }
1069 break;
1039 1070
1040 } else { 1071 default:
1041 err(msg_HPC_not_supported); 1072 err(msg_HPC_not_supported);
1042 return -ENODEV; 1073 rc = -ENODEV;
1074 goto err_free_ctrl;
1043 } 1075 }
1044 1076
1045 // Tell the user that we found one. 1077 /* Tell the user that we found one. */
1046 info("Initializing the PCI hot plug controller residing on PCI bus %d\n", 1078 info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
1047 pdev->bus->number); 1079 pdev->bus->number);
1048 1080
@@ -1087,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1087 if (rc) { 1119 if (rc) {
1088 goto err_free_bus; 1120 goto err_free_bus;
1089 } 1121 }
1090 1122
1091 dbg("pdev = %p\n", pdev); 1123 dbg("pdev = %p\n", pdev);
1092 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0)); 1124 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
1093 dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0)); 1125 dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1109 goto err_free_mem_region; 1141 goto err_free_mem_region;
1110 } 1142 }
1111 1143
1112 // Check for 66Mhz operation 1144 /* Check for 66Mhz operation */
1113 ctrl->speed = get_controller_speed(ctrl); 1145 ctrl->speed = get_controller_speed(ctrl);
1114 1146
1115 1147
@@ -1120,7 +1152,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1120 * 1152 *
1121 ********************************************************/ 1153 ********************************************************/
1122 1154
1123 // find the physical slot number of the first hot plug slot 1155 /* find the physical slot number of the first hot plug slot */
1124 1156
1125 /* Get slot won't work for devices behind bridges, but 1157 /* Get slot won't work for devices behind bridges, but
1126 * in this case it will always be called for the "base" 1158 * in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1137 goto err_iounmap; 1169 goto err_iounmap;
1138 } 1170 }
1139 1171
1140 // Store PCI Config Space for all devices on this bus 1172 /* Store PCI Config Space for all devices on this bus */
1141 rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK)); 1173 rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
1142 if (rc) { 1174 if (rc) {
1143 err("%s: unable to save PCI configuration data, error %d\n", 1175 err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1148 /* 1180 /*
1149 * Get IO, memory, and IRQ resources for new devices 1181 * Get IO, memory, and IRQ resources for new devices
1150 */ 1182 */
1151 // The next line is required for cpqhp_find_available_resources 1183 /* The next line is required for cpqhp_find_available_resources */
1152 ctrl->interrupt = pdev->irq; 1184 ctrl->interrupt = pdev->irq;
1153 if (ctrl->interrupt < 0x10) { 1185 if (ctrl->interrupt < 0x10) {
1154 cpqhp_legacy_mode = 1; 1186 cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1182 __func__, rc); 1214 __func__, rc);
1183 goto err_iounmap; 1215 goto err_iounmap;
1184 } 1216 }
1185 1217
1186 /* Mask all general input interrupts */ 1218 /* Mask all general input interrupts */
1187 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK); 1219 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
1188 1220
@@ -1196,12 +1228,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1196 goto err_iounmap; 1228 goto err_iounmap;
1197 } 1229 }
1198 1230
1199 /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */ 1231 /* Enable Shift Out interrupt and clear it, also enable SERR on power
1232 * fault
1233 */
1200 temp_word = readw(ctrl->hpc_reg + MISC); 1234 temp_word = readw(ctrl->hpc_reg + MISC);
1201 temp_word |= 0x4006; 1235 temp_word |= 0x4006;
1202 writew(temp_word, ctrl->hpc_reg + MISC); 1236 writew(temp_word, ctrl->hpc_reg + MISC);
1203 1237
1204 // Changed 05/05/97 to clear all interrupts at start 1238 /* Changed 05/05/97 to clear all interrupts at start */
1205 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR); 1239 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
1206 1240
1207 ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 1241 ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1216 cpqhp_ctrl_list = ctrl; 1250 cpqhp_ctrl_list = ctrl;
1217 } 1251 }
1218 1252
1219 // turn off empty slots here unless command line option "ON" set 1253 /* turn off empty slots here unless command line option "ON" set
1220 // Wait for exclusive access to hardware 1254 * Wait for exclusive access to hardware
1255 */
1221 mutex_lock(&ctrl->crit_sect); 1256 mutex_lock(&ctrl->crit_sect);
1222 1257
1223 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; 1258 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
1224 1259
1225 // find first device number for the ctrl 1260 /* find first device number for the ctrl */
1226 device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4; 1261 device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
1227 1262
1228 while (num_of_slots) { 1263 while (num_of_slots) {
@@ -1234,23 +1269,21 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1234 hp_slot = func->device - ctrl->slot_device_offset; 1269 hp_slot = func->device - ctrl->slot_device_offset;
1235 dbg("hp_slot: %d\n", hp_slot); 1270 dbg("hp_slot: %d\n", hp_slot);
1236 1271
1237 // We have to save the presence info for these slots 1272 /* We have to save the presence info for these slots */
1238 temp_word = ctrl->ctrl_int_comp >> 16; 1273 temp_word = ctrl->ctrl_int_comp >> 16;
1239 func->presence_save = (temp_word >> hp_slot) & 0x01; 1274 func->presence_save = (temp_word >> hp_slot) & 0x01;
1240 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 1275 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
1241 1276
1242 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { 1277 if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
1243 func->switch_save = 0; 1278 func->switch_save = 0;
1244 } else { 1279 else
1245 func->switch_save = 0x10; 1280 func->switch_save = 0x10;
1246 }
1247 1281
1248 if (!power_mode) { 1282 if (!power_mode)
1249 if (!func->is_a_board) { 1283 if (!func->is_a_board) {
1250 green_LED_off(ctrl, hp_slot); 1284 green_LED_off(ctrl, hp_slot);
1251 slot_disable(ctrl, hp_slot); 1285 slot_disable(ctrl, hp_slot);
1252 } 1286 }
1253 }
1254 1287
1255 device++; 1288 device++;
1256 num_of_slots--; 1289 num_of_slots--;
@@ -1258,7 +1291,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1258 1291
1259 if (!power_mode) { 1292 if (!power_mode) {
1260 set_SOGO(ctrl); 1293 set_SOGO(ctrl);
1261 // Wait for SOBS to be unset 1294 /* Wait for SOBS to be unset */
1262 wait_for_ctrl_irq(ctrl); 1295 wait_for_ctrl_irq(ctrl);
1263 } 1296 }
1264 1297
@@ -1269,7 +1302,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1269 goto err_free_irq; 1302 goto err_free_irq;
1270 } 1303 }
1271 1304
1272 // Done with exclusive hardware access 1305 /* Done with exclusive hardware access */
1273 mutex_unlock(&ctrl->crit_sect); 1306 mutex_unlock(&ctrl->crit_sect);
1274 1307
1275 cpqhp_create_debugfs_files(ctrl); 1308 cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@ err_disable_device:
1291 return rc; 1324 return rc;
1292} 1325}
1293 1326
1294
1295static int one_time_init(void)
1296{
1297 int loop;
1298 int retval = 0;
1299
1300 if (initialized)
1301 return 0;
1302
1303 power_mode = 0;
1304
1305 retval = pci_print_IRQ_route();
1306 if (retval)
1307 goto error;
1308
1309 dbg("Initialize + Start the notification mechanism \n");
1310
1311 retval = cpqhp_event_start_thread();
1312 if (retval)
1313 goto error;
1314
1315 dbg("Initialize slot lists\n");
1316 for (loop = 0; loop < 256; loop++) {
1317 cpqhp_slot_list[loop] = NULL;
1318 }
1319
1320 // FIXME: We also need to hook the NMI handler eventually.
1321 // this also needs to be worked with Christoph
1322 // register_NMI_handler();
1323
1324 // Map rom address
1325 cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
1326 if (!cpqhp_rom_start) {
1327 err ("Could not ioremap memory region for ROM\n");
1328 retval = -EIO;
1329 goto error;
1330 }
1331
1332 /* Now, map the int15 entry point if we are on compaq specific hardware */
1333 compaq_nvram_init(cpqhp_rom_start);
1334
1335 /* Map smbios table entry point structure */
1336 smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
1337 cpqhp_rom_start + ROM_PHY_LEN);
1338 if (!smbios_table) {
1339 err ("Could not find the SMBIOS pointer in memory\n");
1340 retval = -EIO;
1341 goto error_rom_start;
1342 }
1343
1344 smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
1345 readw(smbios_table + ST_LENGTH));
1346 if (!smbios_start) {
1347 err ("Could not ioremap memory region taken from SMBIOS values\n");
1348 retval = -EIO;
1349 goto error_smbios_start;
1350 }
1351
1352 initialized = 1;
1353
1354 return retval;
1355
1356error_smbios_start:
1357 iounmap(smbios_start);
1358error_rom_start:
1359 iounmap(cpqhp_rom_start);
1360error:
1361 return retval;
1362}
1363
1364
1365static void __exit unload_cpqphpd(void) 1327static void __exit unload_cpqphpd(void)
1366{ 1328{
1367 struct pci_func *next; 1329 struct pci_func *next;
@@ -1381,10 +1343,10 @@ static void __exit unload_cpqphpd(void)
1381 if (ctrl->hpc_reg) { 1343 if (ctrl->hpc_reg) {
1382 u16 misc; 1344 u16 misc;
1383 rc = read_slot_enable (ctrl); 1345 rc = read_slot_enable (ctrl);
1384 1346
1385 writeb(0, ctrl->hpc_reg + SLOT_SERR); 1347 writeb(0, ctrl->hpc_reg + SLOT_SERR);
1386 writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK); 1348 writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
1387 1349
1388 misc = readw(ctrl->hpc_reg + MISC); 1350 misc = readw(ctrl->hpc_reg + MISC);
1389 misc &= 0xFFFD; 1351 misc &= 0xFFFD;
1390 writew(misc, ctrl->hpc_reg + MISC); 1352 writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@ static void __exit unload_cpqphpd(void)
1464 } 1426 }
1465 } 1427 }
1466 1428
1467 // Stop the notification mechanism 1429 /* Stop the notification mechanism */
1468 if (initialized) 1430 if (initialized)
1469 cpqhp_event_stop_thread(); 1431 cpqhp_event_stop_thread();
1470 1432
1471 //unmap the rom address 1433 /* unmap the rom address */
1472 if (cpqhp_rom_start) 1434 if (cpqhp_rom_start)
1473 iounmap(cpqhp_rom_start); 1435 iounmap(cpqhp_rom_start);
1474 if (smbios_start) 1436 if (smbios_start)
1475 iounmap(smbios_start); 1437 iounmap(smbios_start);
1476} 1438}
1477 1439
1478
1479
1480static struct pci_device_id hpcd_pci_tbl[] = { 1440static struct pci_device_id hpcd_pci_tbl[] = {
1481 { 1441 {
1482 /* handle any PCI Hotplug controller */ 1442 /* handle any PCI Hotplug controller */
1483 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), 1443 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
1484 .class_mask = ~0, 1444 .class_mask = ~0,
1485 1445
1486 /* no matter who makes it */ 1446 /* no matter who makes it */
1487 .vendor = PCI_ANY_ID, 1447 .vendor = PCI_ANY_ID,
1488 .device = PCI_ANY_ID, 1448 .device = PCI_ANY_ID,
1489 .subvendor = PCI_ANY_ID, 1449 .subvendor = PCI_ANY_ID,
1490 .subdevice = PCI_ANY_ID, 1450 .subdevice = PCI_ANY_ID,
1491 1451
1492 }, { /* end: all zeroes */ } 1452 }, { /* end: all zeroes */ }
1493}; 1453};
1494 1454
1495MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl); 1455MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
1496 1456
1497
1498
1499static struct pci_driver cpqhpc_driver = { 1457static struct pci_driver cpqhpc_driver = {
1500 .name = "compaq_pci_hotplug", 1458 .name = "compaq_pci_hotplug",
1501 .id_table = hpcd_pci_tbl, 1459 .id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@ static struct pci_driver cpqhpc_driver = {
1503 /* remove: cpqhpc_remove_one, */ 1461 /* remove: cpqhpc_remove_one, */
1504}; 1462};
1505 1463
1506
1507
1508static int __init cpqhpc_init(void) 1464static int __init cpqhpc_init(void)
1509{ 1465{
1510 int result; 1466 int result;
@@ -1518,7 +1474,6 @@ static int __init cpqhpc_init(void)
1518 return result; 1474 return result;
1519} 1475}
1520 1476
1521
1522static void __exit cpqhpc_cleanup(void) 1477static void __exit cpqhpc_cleanup(void)
1523{ 1478{
1524 dbg("unload_cpqphpd()\n"); 1479 dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@ static void __exit cpqhpc_cleanup(void)
1529 cpqhp_shutdown_debugfs(); 1484 cpqhp_shutdown_debugfs();
1530} 1485}
1531 1486
1532
1533module_init(cpqhpc_init); 1487module_init(cpqhpc_init);
1534module_exit(cpqhpc_cleanup); 1488module_exit(cpqhpc_cleanup);
1535
1536
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8c4b11..2fa47af992a8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
81 81
82 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 82 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
83 if (change & (0x1L << hp_slot)) { 83 if (change & (0x1L << hp_slot)) {
84 /********************************** 84 /*
85 * this one changed. 85 * this one changed.
86 **********************************/ 86 */
87 func = cpqhp_slot_find(ctrl->bus, 87 func = cpqhp_slot_find(ctrl->bus,
88 (hp_slot + ctrl->slot_device_offset), 0); 88 (hp_slot + ctrl->slot_device_offset), 0);
89 89
90 /* this is the structure that tells the worker thread 90 /* this is the structure that tells the worker thread
91 *what to do */ 91 * what to do
92 */
92 taskInfo = &(ctrl->event_queue[ctrl->next_event]); 93 taskInfo = &(ctrl->event_queue[ctrl->next_event]);
93 ctrl->next_event = (ctrl->next_event + 1) % 10; 94 ctrl->next_event = (ctrl->next_event + 1) % 10;
94 taskInfo->hp_slot = hp_slot; 95 taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
100 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 101 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
101 102
102 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { 103 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
103 /********************************** 104 /*
104 * Switch opened 105 * Switch opened
105 **********************************/ 106 */
106 107
107 func->switch_save = 0; 108 func->switch_save = 0;
108 109
109 taskInfo->event_type = INT_SWITCH_OPEN; 110 taskInfo->event_type = INT_SWITCH_OPEN;
110 } else { 111 } else {
111 /********************************** 112 /*
112 * Switch closed 113 * Switch closed
113 **********************************/ 114 */
114 115
115 func->switch_save = 0x10; 116 func->switch_save = 0x10;
116 117
@@ -131,9 +132,8 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
131{ 132{
132 struct slot *slot = ctrl->slot; 133 struct slot *slot = ctrl->slot;
133 134
134 while (slot && (slot->device != device)) { 135 while (slot && (slot->device != device))
135 slot = slot->next; 136 slot = slot->next;
136 }
137 137
138 return slot; 138 return slot;
139} 139}
@@ -152,17 +152,17 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
152 if (!change) 152 if (!change)
153 return 0; 153 return 0;
154 154
155 /********************************** 155 /*
156 * Presence Change 156 * Presence Change
157 **********************************/ 157 */
158 dbg("cpqsbd: Presence/Notify input change.\n"); 158 dbg("cpqsbd: Presence/Notify input change.\n");
159 dbg(" Changed bits are 0x%4.4x\n", change ); 159 dbg(" Changed bits are 0x%4.4x\n", change );
160 160
161 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 161 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
162 if (change & (0x0101 << hp_slot)) { 162 if (change & (0x0101 << hp_slot)) {
163 /********************************** 163 /*
164 * this one changed. 164 * this one changed.
165 **********************************/ 165 */
166 func = cpqhp_slot_find(ctrl->bus, 166 func = cpqhp_slot_find(ctrl->bus,
167 (hp_slot + ctrl->slot_device_offset), 0); 167 (hp_slot + ctrl->slot_device_offset), 0);
168 168
@@ -177,22 +177,23 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
177 return 0; 177 return 0;
178 178
179 /* If the switch closed, must be a button 179 /* If the switch closed, must be a button
180 * If not in button mode, nevermind */ 180 * If not in button mode, nevermind
181 */
181 if (func->switch_save && (ctrl->push_button == 1)) { 182 if (func->switch_save && (ctrl->push_button == 1)) {
182 temp_word = ctrl->ctrl_int_comp >> 16; 183 temp_word = ctrl->ctrl_int_comp >> 16;
183 temp_byte = (temp_word >> hp_slot) & 0x01; 184 temp_byte = (temp_word >> hp_slot) & 0x01;
184 temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; 185 temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
185 186
186 if (temp_byte != func->presence_save) { 187 if (temp_byte != func->presence_save) {
187 /************************************** 188 /*
188 * button Pressed (doesn't do anything) 189 * button Pressed (doesn't do anything)
189 **************************************/ 190 */
190 dbg("hp_slot %d button pressed\n", hp_slot); 191 dbg("hp_slot %d button pressed\n", hp_slot);
191 taskInfo->event_type = INT_BUTTON_PRESS; 192 taskInfo->event_type = INT_BUTTON_PRESS;
192 } else { 193 } else {
193 /********************************** 194 /*
194 * button Released - TAKE ACTION!!!! 195 * button Released - TAKE ACTION!!!!
195 **********************************/ 196 */
196 dbg("hp_slot %d button released\n", hp_slot); 197 dbg("hp_slot %d button released\n", hp_slot);
197 taskInfo->event_type = INT_BUTTON_RELEASE; 198 taskInfo->event_type = INT_BUTTON_RELEASE;
198 199
@@ -210,7 +211,8 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
210 } 211 }
211 } else { 212 } else {
212 /* Switch is open, assume a presence change 213 /* Switch is open, assume a presence change
213 * Save the presence state */ 214 * Save the presence state
215 */
214 temp_word = ctrl->ctrl_int_comp >> 16; 216 temp_word = ctrl->ctrl_int_comp >> 16;
215 func->presence_save = (temp_word >> hp_slot) & 0x01; 217 func->presence_save = (temp_word >> hp_slot) & 0x01;
216 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 218 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
241 if (!change) 243 if (!change)
242 return 0; 244 return 0;
243 245
244 /********************************** 246 /*
245 * power fault 247 * power fault
246 **********************************/ 248 */
247 249
248 info("power fault interrupt\n"); 250 info("power fault interrupt\n");
249 251
250 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 252 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
251 if (change & (0x01 << hp_slot)) { 253 if (change & (0x01 << hp_slot)) {
252 /********************************** 254 /*
253 * this one changed. 255 * this one changed.
254 **********************************/ 256 */
255 func = cpqhp_slot_find(ctrl->bus, 257 func = cpqhp_slot_find(ctrl->bus,
256 (hp_slot + ctrl->slot_device_offset), 0); 258 (hp_slot + ctrl->slot_device_offset), 0);
257 259
@@ -262,16 +264,16 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
262 rc++; 264 rc++;
263 265
264 if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { 266 if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
265 /********************************** 267 /*
266 * power fault Cleared 268 * power fault Cleared
267 **********************************/ 269 */
268 func->status = 0x00; 270 func->status = 0x00;
269 271
270 taskInfo->event_type = INT_POWER_FAULT_CLEAR; 272 taskInfo->event_type = INT_POWER_FAULT_CLEAR;
271 } else { 273 } else {
272 /********************************** 274 /*
273 * power fault 275 * power fault
274 **********************************/ 276 */
275 taskInfo->event_type = INT_POWER_FAULT; 277 taskInfo->event_type = INT_POWER_FAULT;
276 278
277 if (ctrl->rev < 4) { 279 if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **h
432 434
433 435
434 /* If we got here, there the bridge requires some of the resource, but 436 /* If we got here, there the bridge requires some of the resource, but
435 * we may be able to split some off of the front */ 437 * we may be able to split some off of the front
438 */
436 439
437 node = *head; 440 node = *head;
438 441
439 if (node->length & (alignment -1)) { 442 if (node->length & (alignment -1)) {
440 /* this one isn't an aligned length, so we'll make a new entry 443 /* this one isn't an aligned length, so we'll make a new entry
441 * and split it up. */ 444 * and split it up.
445 */
442 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 446 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
443 447
444 if (!split_node) 448 if (!split_node)
@@ -544,10 +548,10 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
544 if (!(*head)) 548 if (!(*head))
545 return NULL; 549 return NULL;
546 550
547 if ( cpqhp_resource_sort_and_combine(head) ) 551 if (cpqhp_resource_sort_and_combine(head))
548 return NULL; 552 return NULL;
549 553
550 if ( sort_by_size(head) ) 554 if (sort_by_size(head))
551 return NULL; 555 return NULL;
552 556
553 for (node = *head; node; node = node->next) { 557 for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
556 560
557 if (node->base & (size - 1)) { 561 if (node->base & (size - 1)) {
558 /* this one isn't base aligned properly 562 /* this one isn't base aligned properly
559 * so we'll make a new entry and split it up */ 563 * so we'll make a new entry and split it up
564 */
560 temp_dword = (node->base | (size-1)) + 1; 565 temp_dword = (node->base | (size-1)) + 1;
561 566
562 /* Short circuit if adjusted size is too small */ 567 /* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
581 /* Don't need to check if too small since we already did */ 586 /* Don't need to check if too small since we already did */
582 if (node->length > size) { 587 if (node->length > size) {
583 /* this one is longer than we need 588 /* this one is longer than we need
584 * so we'll make a new entry and split it up */ 589 * so we'll make a new entry and split it up
590 */
585 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 591 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
586 592
587 if (!split_node) 593 if (!split_node)
@@ -601,7 +607,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
601 continue; 607 continue;
602 608
603 /* If we got here, then it is the right size 609 /* If we got here, then it is the right size
604 * Now take it out of the list and break */ 610 * Now take it out of the list and break
611 */
605 if (*head == node) { 612 if (*head == node) {
606 *head = node->next; 613 *head = node->next;
607 } else { 614 } else {
@@ -642,14 +649,16 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
642 return NULL; 649 return NULL;
643 650
644 for (max = *head; max; max = max->next) { 651 for (max = *head; max; max = max->next) {
645 /* If not big enough we could probably just bail, 652 /* If not big enough we could probably just bail,
646 * instead we'll continue to the next. */ 653 * instead we'll continue to the next.
654 */
647 if (max->length < size) 655 if (max->length < size)
648 continue; 656 continue;
649 657
650 if (max->base & (size - 1)) { 658 if (max->base & (size - 1)) {
651 /* this one isn't base aligned properly 659 /* this one isn't base aligned properly
652 * so we'll make a new entry and split it up */ 660 * so we'll make a new entry and split it up
661 */
653 temp_dword = (max->base | (size-1)) + 1; 662 temp_dword = (max->base | (size-1)) + 1;
654 663
655 /* Short circuit if adjusted size is too small */ 664 /* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
672 681
673 if ((max->base + max->length) & (size - 1)) { 682 if ((max->base + max->length) & (size - 1)) {
674 /* this one isn't end aligned properly at the top 683 /* this one isn't end aligned properly at the top
675 * so we'll make a new entry and split it up */ 684 * so we'll make a new entry and split it up
685 */
676 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 686 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
677 687
678 if (!split_node) 688 if (!split_node)
@@ -744,7 +754,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
744 if (node->base & (size - 1)) { 754 if (node->base & (size - 1)) {
745 dbg("%s: not aligned\n", __func__); 755 dbg("%s: not aligned\n", __func__);
746 /* this one isn't base aligned properly 756 /* this one isn't base aligned properly
747 * so we'll make a new entry and split it up */ 757 * so we'll make a new entry and split it up
758 */
748 temp_dword = (node->base | (size-1)) + 1; 759 temp_dword = (node->base | (size-1)) + 1;
749 760
750 /* Short circuit if adjusted size is too small */ 761 /* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
769 if (node->length > size) { 780 if (node->length > size) {
770 dbg("%s: too big\n", __func__); 781 dbg("%s: too big\n", __func__);
771 /* this one is longer than we need 782 /* this one is longer than we need
772 * so we'll make a new entry and split it up */ 783 * so we'll make a new entry and split it up
784 */
773 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 785 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
774 786
775 if (!split_node) 787 if (!split_node)
@@ -886,19 +898,19 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
886 u32 Diff; 898 u32 Diff;
887 u32 temp_dword; 899 u32 temp_dword;
888 900
889 901
890 misc = readw(ctrl->hpc_reg + MISC); 902 misc = readw(ctrl->hpc_reg + MISC);
891 /*************************************** 903 /*
892 * Check to see if it was our interrupt 904 * Check to see if it was our interrupt
893 ***************************************/ 905 */
894 if (!(misc & 0x000C)) { 906 if (!(misc & 0x000C)) {
895 return IRQ_NONE; 907 return IRQ_NONE;
896 } 908 }
897 909
898 if (misc & 0x0004) { 910 if (misc & 0x0004) {
899 /********************************** 911 /*
900 * Serial Output interrupt Pending 912 * Serial Output interrupt Pending
901 **********************************/ 913 */
902 914
903 /* Clear the interrupt */ 915 /* Clear the interrupt */
904 misc |= 0x0004; 916 misc |= 0x0004;
@@ -961,11 +973,8 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
961 struct pci_func *next; 973 struct pci_func *next;
962 974
963 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); 975 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
964 if (new_slot == NULL) { 976 if (new_slot == NULL)
965 /* I'm not dead yet!
966 * You will be. */
967 return new_slot; 977 return new_slot;
968 }
969 978
970 new_slot->next = NULL; 979 new_slot->next = NULL;
971 new_slot->configured = 1; 980 new_slot->configured = 1;
@@ -996,10 +1005,8 @@ static int slot_remove(struct pci_func * old_slot)
996 return 1; 1005 return 1;
997 1006
998 next = cpqhp_slot_list[old_slot->bus]; 1007 next = cpqhp_slot_list[old_slot->bus];
999 1008 if (next == NULL)
1000 if (next == NULL) {
1001 return 1; 1009 return 1;
1002 }
1003 1010
1004 if (next == old_slot) { 1011 if (next == old_slot) {
1005 cpqhp_slot_list[old_slot->bus] = old_slot->next; 1012 cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@ static int slot_remove(struct pci_func * old_slot)
1008 return 0; 1015 return 0;
1009 } 1016 }
1010 1017
1011 while ((next->next != old_slot) && (next->next != NULL)) { 1018 while ((next->next != old_slot) && (next->next != NULL))
1012 next = next->next; 1019 next = next->next;
1013 }
1014 1020
1015 if (next->next == old_slot) { 1021 if (next->next == old_slot) {
1016 next->next = old_slot->next; 1022 next->next = old_slot->next;
@@ -1040,9 +1046,8 @@ static int bridge_slot_remove(struct pci_func *bridge)
1040 for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { 1046 for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
1041 next = cpqhp_slot_list[tempBus]; 1047 next = cpqhp_slot_list[tempBus];
1042 1048
1043 while (!slot_remove(next)) { 1049 while (!slot_remove(next))
1044 next = cpqhp_slot_list[tempBus]; 1050 next = cpqhp_slot_list[tempBus];
1045 }
1046 } 1051 }
1047 1052
1048 next = cpqhp_slot_list[bridge->bus]; 1053 next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1130 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); 1135 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
1131 u16 reg16; 1136 u16 reg16;
1132 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); 1137 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
1133 1138
1134 if (ctrl->speed == adapter_speed) 1139 if (ctrl->speed == adapter_speed)
1135 return 0; 1140 return 0;
1136 1141
1137 /* We don't allow freq/mode changes if we find another adapter running 1142 /* We don't allow freq/mode changes if we find another adapter running
1138 * in another slot on this controller */ 1143 * in another slot on this controller
1144 */
1139 for(slot = ctrl->slot; slot; slot = slot->next) { 1145 for(slot = ctrl->slot; slot; slot = slot->next) {
1140 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1146 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1141 continue; 1147 continue;
1142 if (!slot->hotplug_slot || !slot->hotplug_slot->info) 1148 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
1143 continue; 1149 continue;
1144 if (slot->hotplug_slot->info->adapter_status == 0) 1150 if (slot->hotplug_slot->info->adapter_status == 0)
1145 continue; 1151 continue;
1146 /* If another adapter is running on the same segment but at a 1152 /* If another adapter is running on the same segment but at a
1147 * lower speed/mode, we allow the new adapter to function at 1153 * lower speed/mode, we allow the new adapter to function at
1148 * this rate if supported */ 1154 * this rate if supported
1149 if (ctrl->speed < adapter_speed) 1155 */
1156 if (ctrl->speed < adapter_speed)
1150 return 0; 1157 return 0;
1151 1158
1152 return 1; 1159 return 1;
1153 } 1160 }
1154 1161
1155 /* If the controller doesn't support freq/mode changes and the 1162 /* If the controller doesn't support freq/mode changes and the
1156 * controller is running at a higher mode, we bail */ 1163 * controller is running at a higher mode, we bail
1164 */
1157 if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) 1165 if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
1158 return 1; 1166 return 1;
1159 1167
1160 /* But we allow the adapter to run at a lower rate if possible */ 1168 /* But we allow the adapter to run at a lower rate if possible */
1161 if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) 1169 if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
1162 return 0; 1170 return 0;
1163 1171
1164 /* We try to set the max speed supported by both the adapter and 1172 /* We try to set the max speed supported by both the adapter and
1165 * controller */ 1173 * controller
1174 */
1166 if (ctrl->speed_capability < adapter_speed) { 1175 if (ctrl->speed_capability < adapter_speed) {
1167 if (ctrl->speed == ctrl->speed_capability) 1176 if (ctrl->speed == ctrl->speed_capability)
1168 return 0; 1177 return 0;
@@ -1171,22 +1180,22 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1171 1180
1172 writel(0x0L, ctrl->hpc_reg + LED_CONTROL); 1181 writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
1173 writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); 1182 writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
1174 1183
1175 set_SOGO(ctrl); 1184 set_SOGO(ctrl);
1176 wait_for_ctrl_irq(ctrl); 1185 wait_for_ctrl_irq(ctrl);
1177 1186
1178 if (adapter_speed != PCI_SPEED_133MHz_PCIX) 1187 if (adapter_speed != PCI_SPEED_133MHz_PCIX)
1179 reg = 0xF5; 1188 reg = 0xF5;
1180 else 1189 else
1181 reg = 0xF4; 1190 reg = 0xF4;
1182 pci_write_config_byte(ctrl->pci_dev, 0x41, reg); 1191 pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
1183 1192
1184 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); 1193 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
1185 reg16 &= ~0x000F; 1194 reg16 &= ~0x000F;
1186 switch(adapter_speed) { 1195 switch(adapter_speed) {
1187 case(PCI_SPEED_133MHz_PCIX): 1196 case(PCI_SPEED_133MHz_PCIX):
1188 reg = 0x75; 1197 reg = 0x75;
1189 reg16 |= 0xB; 1198 reg16 |= 0xB;
1190 break; 1199 break;
1191 case(PCI_SPEED_100MHz_PCIX): 1200 case(PCI_SPEED_100MHz_PCIX):
1192 reg = 0x74; 1201 reg = 0x74;
@@ -1203,48 +1212,48 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1203 default: /* 33MHz PCI 2.2 */ 1212 default: /* 33MHz PCI 2.2 */
1204 reg = 0x71; 1213 reg = 0x71;
1205 break; 1214 break;
1206 1215
1207 } 1216 }
1208 reg16 |= 0xB << 12; 1217 reg16 |= 0xB << 12;
1209 writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); 1218 writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
1210 1219
1211 mdelay(5); 1220 mdelay(5);
1212 1221
1213 /* Reenable interrupts */ 1222 /* Reenable interrupts */
1214 writel(0, ctrl->hpc_reg + INT_MASK); 1223 writel(0, ctrl->hpc_reg + INT_MASK);
1215 1224
1216 pci_write_config_byte(ctrl->pci_dev, 0x41, reg); 1225 pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
1217 1226
1218 /* Restart state machine */ 1227 /* Restart state machine */
1219 reg = ~0xF; 1228 reg = ~0xF;
1220 pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); 1229 pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
1221 pci_write_config_byte(ctrl->pci_dev, 0x43, reg); 1230 pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
1222 1231
1223 /* Only if mode change...*/ 1232 /* Only if mode change...*/
1224 if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || 1233 if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
1225 ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 1234 ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
1226 set_SOGO(ctrl); 1235 set_SOGO(ctrl);
1227 1236
1228 wait_for_ctrl_irq(ctrl); 1237 wait_for_ctrl_irq(ctrl);
1229 mdelay(1100); 1238 mdelay(1100);
1230 1239
1231 /* Restore LED/Slot state */ 1240 /* Restore LED/Slot state */
1232 writel(leds, ctrl->hpc_reg + LED_CONTROL); 1241 writel(leds, ctrl->hpc_reg + LED_CONTROL);
1233 writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); 1242 writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
1234 1243
1235 set_SOGO(ctrl); 1244 set_SOGO(ctrl);
1236 wait_for_ctrl_irq(ctrl); 1245 wait_for_ctrl_irq(ctrl);
1237 1246
1238 ctrl->speed = adapter_speed; 1247 ctrl->speed = adapter_speed;
1239 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1248 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
1240 1249
1241 info("Successfully changed frequency/mode for adapter in slot %d\n", 1250 info("Successfully changed frequency/mode for adapter in slot %d\n",
1242 slot->number); 1251 slot->number);
1243 return 0; 1252 return 0;
1244} 1253}
1245 1254
1246/* the following routines constitute the bulk of the 1255/* the following routines constitute the bulk of the
1247 hotplug controller logic 1256 * hotplug controller logic
1248 */ 1257 */
1249 1258
1250 1259
@@ -1268,17 +1277,17 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1268 1277
1269 hp_slot = func->device - ctrl->slot_device_offset; 1278 hp_slot = func->device - ctrl->slot_device_offset;
1270 1279
1271 if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) { 1280 /*
1272 /********************************** 1281 * The switch is open.
1273 * The switch is open. 1282 */
1274 **********************************/ 1283 if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
1275 rc = INTERLOCK_OPEN; 1284 rc = INTERLOCK_OPEN;
1276 } else if (is_slot_enabled (ctrl, hp_slot)) { 1285 /*
1277 /********************************** 1286 * The board is already on
1278 * The board is already on 1287 */
1279 **********************************/ 1288 else if (is_slot_enabled (ctrl, hp_slot))
1280 rc = CARD_FUNCTIONING; 1289 rc = CARD_FUNCTIONING;
1281 } else { 1290 else {
1282 mutex_lock(&ctrl->crit_sect); 1291 mutex_lock(&ctrl->crit_sect);
1283 1292
1284 /* turn on board without attaching to the bus */ 1293 /* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1299 1308
1300 /* Wait for SOBS to be unset */ 1309 /* Wait for SOBS to be unset */
1301 wait_for_ctrl_irq (ctrl); 1310 wait_for_ctrl_irq (ctrl);
1302 1311
1303 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1312 adapter_speed = get_adapter_speed(ctrl, hp_slot);
1304 if (ctrl->speed != adapter_speed) 1313 if (ctrl->speed != adapter_speed)
1305 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1314 if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1352 * Get slot won't work for devices behind 1361 * Get slot won't work for devices behind
1353 * bridges, but in this case it will always be 1362 * bridges, but in this case it will always be
1354 * called for the "base" bus/dev/func of an 1363 * called for the "base" bus/dev/func of an
1355 * adapter. */ 1364 * adapter.
1365 */
1356 1366
1357 mutex_lock(&ctrl->crit_sect); 1367 mutex_lock(&ctrl->crit_sect);
1358 1368
@@ -1377,7 +1387,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1377 1387
1378 * Get slot won't work for devices behind bridges, but 1388 * Get slot won't work for devices behind bridges, but
1379 * in this case it will always be called for the "base" 1389 * in this case it will always be called for the "base"
1380 * bus/dev/func of an adapter. */ 1390 * bus/dev/func of an adapter.
1391 */
1381 1392
1382 mutex_lock(&ctrl->crit_sect); 1393 mutex_lock(&ctrl->crit_sect);
1383 1394
@@ -1434,7 +1445,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1434 wait_for_ctrl_irq (ctrl); 1445 wait_for_ctrl_irq (ctrl);
1435 1446
1436 /* Change bits in slot power register to force another shift out 1447 /* Change bits in slot power register to force another shift out
1437 * NOTE: this is to work around the timer bug */ 1448 * NOTE: this is to work around the timer bug
1449 */
1438 temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); 1450 temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
1439 writeb(0x00, ctrl->hpc_reg + SLOT_POWER); 1451 writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
1440 writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); 1452 writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1443 1455
1444 /* Wait for SOBS to be unset */ 1456 /* Wait for SOBS to be unset */
1445 wait_for_ctrl_irq (ctrl); 1457 wait_for_ctrl_irq (ctrl);
1446 1458
1447 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1459 adapter_speed = get_adapter_speed(ctrl, hp_slot);
1448 if (ctrl->speed != adapter_speed) 1460 if (ctrl->speed != adapter_speed)
1449 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1461 if (set_controller_speed(ctrl, adapter_speed, hp_slot))
1450 rc = WRONG_BUS_FREQUENCY; 1462 rc = WRONG_BUS_FREQUENCY;
1451 1463
1452 /* turn off board without attaching to the bus */ 1464 /* turn off board without attaching to the bus */
1453 disable_slot_power (ctrl, hp_slot); 1465 disable_slot_power (ctrl, hp_slot);
1454 1466
@@ -1461,7 +1473,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1461 1473
1462 if (rc) 1474 if (rc)
1463 return rc; 1475 return rc;
1464 1476
1465 p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1477 p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
1466 1478
1467 /* turn on board and blink green LED */ 1479 /* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1521 } 1533 }
1522 1534
1523 /* All F's is an empty slot or an invalid board */ 1535 /* All F's is an empty slot or an invalid board */
1524 if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */ 1536 if (temp_register != 0xFFFFFFFF) {
1525 res_lists.io_head = ctrl->io_head; 1537 res_lists.io_head = ctrl->io_head;
1526 res_lists.mem_head = ctrl->mem_head; 1538 res_lists.mem_head = ctrl->mem_head;
1527 res_lists.p_mem_head = ctrl->p_mem_head; 1539 res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1570 index = 0; 1582 index = 0;
1571 do { 1583 do {
1572 new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); 1584 new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
1573 if (new_slot && !new_slot->pci_dev) { 1585 if (new_slot && !new_slot->pci_dev)
1574 cpqhp_configure_device(ctrl, new_slot); 1586 cpqhp_configure_device(ctrl, new_slot);
1575 }
1576 } while (new_slot); 1587 } while (new_slot);
1577 1588
1578 mutex_lock(&ctrl->crit_sect); 1589 mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@ static void interrupt_event_handler(struct controller *ctrl)
1859 info(msg_button_on, p_slot->number); 1870 info(msg_button_on, p_slot->number);
1860 } 1871 }
1861 mutex_lock(&ctrl->crit_sect); 1872 mutex_lock(&ctrl->crit_sect);
1862 1873
1863 dbg("blink green LED and turn off amber\n"); 1874 dbg("blink green LED and turn off amber\n");
1864 1875
1865 amber_LED_off (ctrl, hp_slot); 1876 amber_LED_off (ctrl, hp_slot);
1866 green_LED_blink (ctrl, hp_slot); 1877 green_LED_blink (ctrl, hp_slot);
1867 1878
1868 set_SOGO(ctrl); 1879 set_SOGO(ctrl);
1869 1880
1870 /* Wait for SOBS to be unset */ 1881 /* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
1958 if (cpqhp_process_SI(ctrl, func) != 0) { 1969 if (cpqhp_process_SI(ctrl, func) != 0) {
1959 amber_LED_on(ctrl, hp_slot); 1970 amber_LED_on(ctrl, hp_slot);
1960 green_LED_off(ctrl, hp_slot); 1971 green_LED_off(ctrl, hp_slot);
1961 1972
1962 set_SOGO(ctrl); 1973 set_SOGO(ctrl);
1963 1974
1964 /* Wait for SOBS to be unset */ 1975 /* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2079 struct pci_bus *pci_bus = ctrl->pci_bus; 2090 struct pci_bus *pci_bus = ctrl->pci_bus;
2080 int physical_slot=0; 2091 int physical_slot=0;
2081 2092
2082 device = func->device; 2093 device = func->device;
2083 func = cpqhp_slot_find(ctrl->bus, device, index++); 2094 func = cpqhp_slot_find(ctrl->bus, device, index++);
2084 p_slot = cpqhp_find_slot(ctrl, device); 2095 p_slot = cpqhp_find_slot(ctrl, device);
2085 if (p_slot) { 2096 if (p_slot) {
@@ -2113,9 +2124,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2113 2124
2114 /* If the VGA Enable bit is set, remove isn't 2125 /* If the VGA Enable bit is set, remove isn't
2115 * supported */ 2126 * supported */
2116 if (BCR & PCI_BRIDGE_CTL_VGA) { 2127 if (BCR & PCI_BRIDGE_CTL_VGA)
2117 rc = REMOVE_NOT_SUPPORTED; 2128 rc = REMOVE_NOT_SUPPORTED;
2118 }
2119 } 2129 }
2120 } 2130 }
2121 2131
@@ -2183,67 +2193,67 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
2183 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; 2193 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
2184 2194
2185 switch (test_num) { 2195 switch (test_num) {
2186 case 1: 2196 case 1:
2187 /* Do stuff here! */ 2197 /* Do stuff here! */
2188 2198
2189 /* Do that funky LED thing */ 2199 /* Do that funky LED thing */
2190 /* so we can restore them later */ 2200 /* so we can restore them later */
2191 save_LED = readl(ctrl->hpc_reg + LED_CONTROL); 2201 save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
2192 work_LED = 0x01010101; 2202 work_LED = 0x01010101;
2193 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2203 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2194 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2204 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2195 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2205 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2196 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2206 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2197 2207
2198 work_LED = 0x01010000; 2208 work_LED = 0x01010000;
2199 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2209 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2200 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2210 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2201 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2211 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2202 work_LED = 0x00000101; 2212 work_LED = 0x00000101;
2203 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2213 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2204 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2214 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2205 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2215 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2216
2217 work_LED = 0x01010000;
2218 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2219 for (loop = 0; loop < num_of_slots; loop++) {
2220 set_SOGO(ctrl);
2206 2221
2207 work_LED = 0x01010000; 2222 /* Wait for SOGO interrupt */
2208 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2223 wait_for_ctrl_irq (ctrl);
2209 for (loop = 0; loop < num_of_slots; loop++) {
2210 set_SOGO(ctrl);
2211 2224
2212 /* Wait for SOGO interrupt */ 2225 /* Get ready for next iteration */
2213 wait_for_ctrl_irq (ctrl); 2226 long_delay((3*HZ)/10);
2227 work_LED = work_LED >> 16;
2228 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2214 2229
2215 /* Get ready for next iteration */ 2230 set_SOGO(ctrl);
2216 long_delay((3*HZ)/10);
2217 work_LED = work_LED >> 16;
2218 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2219
2220 set_SOGO(ctrl);
2221 2231
2222 /* Wait for SOGO interrupt */ 2232 /* Wait for SOGO interrupt */
2223 wait_for_ctrl_irq (ctrl); 2233 wait_for_ctrl_irq (ctrl);
2224 2234
2225 /* Get ready for next iteration */ 2235 /* Get ready for next iteration */
2226 long_delay((3*HZ)/10); 2236 long_delay((3*HZ)/10);
2227 work_LED = work_LED << 16; 2237 work_LED = work_LED << 16;
2228 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2238 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2229 work_LED = work_LED << 1; 2239 work_LED = work_LED << 1;
2230 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2240 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2231 } 2241 }
2232 2242
2233 /* put it back the way it was */ 2243 /* put it back the way it was */
2234 writel(save_LED, ctrl->hpc_reg + LED_CONTROL); 2244 writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
2235 2245
2236 set_SOGO(ctrl); 2246 set_SOGO(ctrl);
2237 2247
2238 /* Wait for SOBS to be unset */ 2248 /* Wait for SOBS to be unset */
2239 wait_for_ctrl_irq (ctrl); 2249 wait_for_ctrl_irq (ctrl);
2240 break; 2250 break;
2241 case 2: 2251 case 2:
2242 /* Do other stuff here! */ 2252 /* Do other stuff here! */
2243 break; 2253 break;
2244 case 3: 2254 case 3:
2245 /* and more... */ 2255 /* and more... */
2246 break; 2256 break;
2247 } 2257 }
2248 return 0; 2258 return 0;
2249} 2259}
@@ -2312,9 +2322,9 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
2312 while ((function < max_functions) && (!stop_it)) { 2322 while ((function < max_functions) && (!stop_it)) {
2313 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); 2323 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
2314 2324
2315 if (ID == 0xFFFFFFFF) { /* There's nothing there. */ 2325 if (ID == 0xFFFFFFFF) {
2316 function++; 2326 function++;
2317 } else { /* There's something there */ 2327 } else {
2318 /* Setup slot structure. */ 2328 /* Setup slot structure. */
2319 new_slot = cpqhp_slot_create(func->bus); 2329 new_slot = cpqhp_slot_create(func->bus);
2320 2330
@@ -2339,8 +2349,8 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
2339 2349
2340 2350
2341/* 2351/*
2342 Configuration logic that involves the hotplug data structures and 2352 * Configuration logic that involves the hotplug data structures and
2343 their bookkeeping 2353 * their bookkeeping
2344 */ 2354 */
2345 2355
2346 2356
@@ -2393,7 +2403,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2393 if (rc) 2403 if (rc)
2394 return rc; 2404 return rc;
2395 2405
2396 if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */ 2406 if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
2397 /* set Primary bus */ 2407 /* set Primary bus */
2398 dbg("set Primary bus = %d\n", func->bus); 2408 dbg("set Primary bus = %d\n", func->bus);
2399 rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); 2409 rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2484 temp_resources.irqs = &irqs; 2494 temp_resources.irqs = &irqs;
2485 2495
2486 /* Make copies of the nodes we are going to pass down so that 2496 /* Make copies of the nodes we are going to pass down so that
2487 * if there is a problem,we can just use these to free resources */ 2497 * if there is a problem,we can just use these to free resources
2498 */
2488 hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); 2499 hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
2489 hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); 2500 hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
2490 hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); 2501 hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2556 temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; 2567 temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
2557 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); 2568 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
2558 2569
2559 /* Adjust this to compensate for extra adjustment in first loop */ 2570 /* Adjust this to compensate for extra adjustment in first loop
2571 */
2560 irqs.barber_pole--; 2572 irqs.barber_pole--;
2561 2573
2562 rc = 0; 2574 rc = 0;
@@ -2917,27 +2929,26 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2917 } /* End of base register loop */ 2929 } /* End of base register loop */
2918 if (cpqhp_legacy_mode) { 2930 if (cpqhp_legacy_mode) {
2919 /* Figure out which interrupt pin this function uses */ 2931 /* Figure out which interrupt pin this function uses */
2920 rc = pci_bus_read_config_byte (pci_bus, devfn, 2932 rc = pci_bus_read_config_byte (pci_bus, devfn,
2921 PCI_INTERRUPT_PIN, &temp_byte); 2933 PCI_INTERRUPT_PIN, &temp_byte);
2922 2934
2923 /* If this function needs an interrupt and we are behind 2935 /* If this function needs an interrupt and we are behind
2924 * a bridge and the pin is tied to something that's 2936 * a bridge and the pin is tied to something that's
2925 * alread mapped, set this one the same */ 2937 * alread mapped, set this one the same */
2926 if (temp_byte && resources->irqs && 2938 if (temp_byte && resources->irqs &&
2927 (resources->irqs->valid_INT & 2939 (resources->irqs->valid_INT &
2928 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { 2940 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
2929 /* We have to share with something already set up */ 2941 /* We have to share with something already set up */
2930 IRQ = resources->irqs->interrupt[(temp_byte + 2942 IRQ = resources->irqs->interrupt[(temp_byte +
2931 resources->irqs->barber_pole - 1) & 0x03]; 2943 resources->irqs->barber_pole - 1) & 0x03];
2932 } else { 2944 } else {
2933 /* Program IRQ based on card type */ 2945 /* Program IRQ based on card type */
2934 rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); 2946 rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
2935 2947
2936 if (class_code == PCI_BASE_CLASS_STORAGE) { 2948 if (class_code == PCI_BASE_CLASS_STORAGE)
2937 IRQ = cpqhp_disk_irq; 2949 IRQ = cpqhp_disk_irq;
2938 } else { 2950 else
2939 IRQ = cpqhp_nic_irq; 2951 IRQ = cpqhp_nic_irq;
2940 }
2941 } 2952 }
2942 2953
2943 /* IRQ Line */ 2954 /* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb174888002b..76ba8a1c774d 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@ static u8 evbuffer[1024];
94 94
95static void __iomem *compaq_int15_entry_point; 95static void __iomem *compaq_int15_entry_point;
96 96
97static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */ 97/* lock for ordering int15_bios_call() */
98static spinlock_t int15_lock;
98 99
99 100
100/* This is a series of function that deals with 101/* This is a series of function that deals with
101 setting & getting the hotplug resource table in some environment variable. 102 * setting & getting the hotplug resource table in some environment variable.
102*/ 103 */
103 104
104/* 105/*
105 * We really shouldn't be doing this unless there is a _very_ good reason to!!! 106 * We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
113 114
114 if ((*used + 1) > *avail) 115 if ((*used + 1) > *avail)
115 return(1); 116 return(1);
116 117
117 *((u8*)*p_buffer) = value; 118 *((u8*)*p_buffer) = value;
118 tByte = (u8**)p_buffer; 119 tByte = (u8**)p_buffer;
119 (*tByte)++; 120 (*tByte)++;
@@ -170,10 +171,10 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
170 unsigned long flags; 171 unsigned long flags;
171 int op = operation; 172 int op = operation;
172 int ret_val; 173 int ret_val;
173 174
174 if (!compaq_int15_entry_point) 175 if (!compaq_int15_entry_point)
175 return -ENODEV; 176 return -ENODEV;
176 177
177 spin_lock_irqsave(&int15_lock, flags); 178 spin_lock_irqsave(&int15_lock, flags);
178 __asm__ ( 179 __asm__ (
179 "xorl %%ebx,%%ebx\n" \ 180 "xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
187 "D" (buffer), "m" (compaq_int15_entry_point) 188 "D" (buffer), "m" (compaq_int15_entry_point)
188 : "%ebx", "%edx"); 189 : "%ebx", "%edx");
189 spin_unlock_irqrestore(&int15_lock, flags); 190 spin_unlock_irqrestore(&int15_lock, flags);
190 191
191 return((ret_val & 0xFF00) >> 8); 192 return((ret_val & 0xFF00) >> 8);
192} 193}
193 194
@@ -210,14 +211,16 @@ static int load_HRT (void __iomem *rom_start)
210 211
211 available = 1024; 212 available = 1024;
212 213
213 // Now load the EV 214 /* Now load the EV */
214 temp_dword = available; 215 temp_dword = available;
215 216
216 rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword); 217 rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
217 218
218 evbuffer_length = temp_dword; 219 evbuffer_length = temp_dword;
219 220
220 // We're maintaining the resource lists so write FF to invalidate old info 221 /* We're maintaining the resource lists so write FF to invalidate old
222 * info
223 */
221 temp_dword = 1; 224 temp_dword = 1;
222 225
223 rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword); 226 rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@ static u32 store_HRT (void __iomem *rom_start)
263 p_EV_header = (struct ev_hrt_header *) pFill; 266 p_EV_header = (struct ev_hrt_header *) pFill;
264 267
265 ctrl = cpqhp_ctrl_list; 268 ctrl = cpqhp_ctrl_list;
266 269
267 // The revision of this structure 270 /* The revision of this structure */
268 rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available); 271 rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
269 if (rc) 272 if (rc)
270 return(rc); 273 return(rc);
271 274
272 // The number of controllers 275 /* The number of controllers */
273 rc = add_byte( &pFill, 1, &usedbytes, &available); 276 rc = add_byte( &pFill, 1, &usedbytes, &available);
274 if (rc) 277 if (rc)
275 return(rc); 278 return(rc);
@@ -279,27 +282,27 @@ static u32 store_HRT (void __iomem *rom_start)
279 282
280 numCtrl++; 283 numCtrl++;
281 284
282 // The bus number 285 /* The bus number */
283 rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available); 286 rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
284 if (rc) 287 if (rc)
285 return(rc); 288 return(rc);
286 289
287 // The device Number 290 /* The device Number */
288 rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available); 291 rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
289 if (rc) 292 if (rc)
290 return(rc); 293 return(rc);
291 294
292 // The function Number 295 /* The function Number */
293 rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available); 296 rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
294 if (rc) 297 if (rc)
295 return(rc); 298 return(rc);
296 299
297 // Skip the number of available entries 300 /* Skip the number of available entries */
298 rc = add_dword( &pFill, 0, &usedbytes, &available); 301 rc = add_dword( &pFill, 0, &usedbytes, &available);
299 if (rc) 302 if (rc)
300 return(rc); 303 return(rc);
301 304
302 // Figure out memory Available 305 /* Figure out memory Available */
303 306
304 resNode = ctrl->mem_head; 307 resNode = ctrl->mem_head;
305 308
@@ -308,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
308 while (resNode) { 311 while (resNode) {
309 loop ++; 312 loop ++;
310 313
311 // base 314 /* base */
312 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 315 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
313 if (rc) 316 if (rc)
314 return(rc); 317 return(rc);
315 318
316 // length 319 /* length */
317 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 320 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
318 if (rc) 321 if (rc)
319 return(rc); 322 return(rc);
@@ -321,10 +324,10 @@ static u32 store_HRT (void __iomem *rom_start)
321 resNode = resNode->next; 324 resNode = resNode->next;
322 } 325 }
323 326
324 // Fill in the number of entries 327 /* Fill in the number of entries */
325 p_ev_ctrl->mem_avail = loop; 328 p_ev_ctrl->mem_avail = loop;
326 329
327 // Figure out prefetchable memory Available 330 /* Figure out prefetchable memory Available */
328 331
329 resNode = ctrl->p_mem_head; 332 resNode = ctrl->p_mem_head;
330 333
@@ -333,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
333 while (resNode) { 336 while (resNode) {
334 loop ++; 337 loop ++;
335 338
336 // base 339 /* base */
337 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 340 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
338 if (rc) 341 if (rc)
339 return(rc); 342 return(rc);
340 343
341 // length 344 /* length */
342 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 345 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
343 if (rc) 346 if (rc)
344 return(rc); 347 return(rc);
@@ -346,10 +349,10 @@ static u32 store_HRT (void __iomem *rom_start)
346 resNode = resNode->next; 349 resNode = resNode->next;
347 } 350 }
348 351
349 // Fill in the number of entries 352 /* Fill in the number of entries */
350 p_ev_ctrl->p_mem_avail = loop; 353 p_ev_ctrl->p_mem_avail = loop;
351 354
352 // Figure out IO Available 355 /* Figure out IO Available */
353 356
354 resNode = ctrl->io_head; 357 resNode = ctrl->io_head;
355 358
@@ -358,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
358 while (resNode) { 361 while (resNode) {
359 loop ++; 362 loop ++;
360 363
361 // base 364 /* base */
362 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 365 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
363 if (rc) 366 if (rc)
364 return(rc); 367 return(rc);
365 368
366 // length 369 /* length */
367 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 370 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
368 if (rc) 371 if (rc)
369 return(rc); 372 return(rc);
@@ -371,10 +374,10 @@ static u32 store_HRT (void __iomem *rom_start)
371 resNode = resNode->next; 374 resNode = resNode->next;
372 } 375 }
373 376
374 // Fill in the number of entries 377 /* Fill in the number of entries */
375 p_ev_ctrl->io_avail = loop; 378 p_ev_ctrl->io_avail = loop;
376 379
377 // Figure out bus Available 380 /* Figure out bus Available */
378 381
379 resNode = ctrl->bus_head; 382 resNode = ctrl->bus_head;
380 383
@@ -383,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
383 while (resNode) { 386 while (resNode) {
384 loop ++; 387 loop ++;
385 388
386 // base 389 /* base */
387 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 390 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
388 if (rc) 391 if (rc)
389 return(rc); 392 return(rc);
390 393
391 // length 394 /* length */
392 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 395 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
393 if (rc) 396 if (rc)
394 return(rc); 397 return(rc);
@@ -396,15 +399,15 @@ static u32 store_HRT (void __iomem *rom_start)
396 resNode = resNode->next; 399 resNode = resNode->next;
397 } 400 }
398 401
399 // Fill in the number of entries 402 /* Fill in the number of entries */
400 p_ev_ctrl->bus_avail = loop; 403 p_ev_ctrl->bus_avail = loop;
401 404
402 ctrl = ctrl->next; 405 ctrl = ctrl->next;
403 } 406 }
404 407
405 p_EV_header->num_of_ctrl = numCtrl; 408 p_EV_header->num_of_ctrl = numCtrl;
406 409
407 // Now store the EV 410 /* Now store the EV */
408 411
409 temp_dword = usedbytes; 412 temp_dword = usedbytes;
410 413
@@ -449,20 +452,21 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
449 struct ev_hrt_header *p_EV_header; 452 struct ev_hrt_header *p_EV_header;
450 453
451 if (!evbuffer_init) { 454 if (!evbuffer_init) {
452 // Read the resource list information in from NVRAM 455 /* Read the resource list information in from NVRAM */
453 if (load_HRT(rom_start)) 456 if (load_HRT(rom_start))
454 memset (evbuffer, 0, 1024); 457 memset (evbuffer, 0, 1024);
455 458
456 evbuffer_init = 1; 459 evbuffer_init = 1;
457 } 460 }
458 461
459 // If we saved information in NVRAM, use it now 462 /* If we saved information in NVRAM, use it now */
460 p_EV_header = (struct ev_hrt_header *) evbuffer; 463 p_EV_header = (struct ev_hrt_header *) evbuffer;
461 464
462 // The following code is for systems where version 1.0 of this 465 /* The following code is for systems where version 1.0 of this
463 // driver has been loaded, but doesn't support the hardware. 466 * driver has been loaded, but doesn't support the hardware.
464 // In that case, the driver would incorrectly store something 467 * In that case, the driver would incorrectly store something
465 // in NVRAM. 468 * in NVRAM.
469 */
466 if ((p_EV_header->Version == 2) || 470 if ((p_EV_header->Version == 2) ||
467 ((p_EV_header->Version == 1) && !ctrl->push_flag)) { 471 ((p_EV_header->Version == 1) && !ctrl->push_flag)) {
468 p_byte = &(p_EV_header->next); 472 p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
479 function = p_ev_ctrl->function; 483 function = p_ev_ctrl->function;
480 484
481 while ((bus != ctrl->bus) || 485 while ((bus != ctrl->bus) ||
482 (device != PCI_SLOT(ctrl->pci_dev->devfn)) || 486 (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
483 (function != PCI_FUNC(ctrl->pci_dev->devfn))) { 487 (function != PCI_FUNC(ctrl->pci_dev->devfn))) {
484 nummem = p_ev_ctrl->mem_avail; 488 nummem = p_ev_ctrl->mem_avail;
485 numpmem = p_ev_ctrl->p_mem_avail; 489 numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
491 if (p_byte > ((u8*)p_EV_header + evbuffer_length)) 495 if (p_byte > ((u8*)p_EV_header + evbuffer_length))
492 return 2; 496 return 2;
493 497
494 // Skip forward to the next entry 498 /* Skip forward to the next entry */
495 p_byte += (nummem + numpmem + numio + numbus) * 8; 499 p_byte += (nummem + numpmem + numio + numbus) * 8;
496 500
497 if (p_byte > ((u8*)p_EV_header + evbuffer_length)) 501 if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
629 ctrl->bus_head = bus_node; 633 ctrl->bus_head = bus_node;
630 } 634 }
631 635
632 // If all of the following fail, we don't have any resources for 636 /* If all of the following fail, we don't have any resources for
633 // hot plug add 637 * hot plug add
638 */
634 rc = 1; 639 rc = 1;
635 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); 640 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
636 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); 641 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
640 if (rc) 645 if (rc)
641 return(rc); 646 return(rc);
642 } else { 647 } else {
643 if ((evbuffer[0] != 0) && (!ctrl->push_flag)) 648 if ((evbuffer[0] != 0) && (!ctrl->push_flag))
644 return 1; 649 return 1;
645 } 650 }
646 651
647 return 0; 652 return 0;
648} 653}
649 654
650 655
651int compaq_nvram_store (void __iomem *rom_start) 656int compaq_nvram_store (void __iomem *rom_start)
652{ 657{
653 int rc = 1; 658 int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0fcb8ee..6173b9a4544e 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
37#include "../pci.h" 37#include "../pci.h"
38#include "cpqphp.h" 38#include "cpqphp.h"
39#include "cpqphp_nvram.h" 39#include "cpqphp_nvram.h"
40#include <asm/pci_x86.h>
41 40
42 41
43u8 cpqhp_nic_irq; 42u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
82} 81}
83 82
84 83
85int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) 84int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
86{ 85{
87 unsigned char bus; 86 unsigned char bus;
88 struct pci_bus *child; 87 struct pci_bus *child;
89 int num; 88 int num;
90 89
91 if (func->pci_dev == NULL) 90 if (func->pci_dev == NULL)
92 func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function)); 91 func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
93 92
94 /* No pci device, we need to create it then */ 93 /* No pci device, we need to create it then */
95 if (func->pci_dev == NULL) { 94 if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
99 if (num) 98 if (num)
100 pci_bus_add_devices(ctrl->pci_dev->bus); 99 pci_bus_add_devices(ctrl->pci_dev->bus);
101 100
102 func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function)); 101 func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
103 if (func->pci_dev == NULL) { 102 if (func->pci_dev == NULL) {
104 dbg("ERROR: pci_dev still null\n"); 103 dbg("ERROR: pci_dev still null\n");
105 return 0; 104 return 0;
@@ -112,20 +111,24 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
112 pci_do_scan_bus(child); 111 pci_do_scan_bus(child);
113 } 112 }
114 113
114 pci_dev_put(func->pci_dev);
115
115 return 0; 116 return 0;
116} 117}
117 118
118 119
119int cpqhp_unconfigure_device(struct pci_func* func) 120int cpqhp_unconfigure_device(struct pci_func* func)
120{ 121{
121 int j; 122 int j;
122 123
123 dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); 124 dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
124 125
125 for (j=0; j<8 ; j++) { 126 for (j=0; j<8 ; j++) {
126 struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j)); 127 struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
127 if (temp) 128 if (temp) {
129 pci_dev_put(temp);
128 pci_remove_bus_device(temp); 130 pci_remove_bus_device(temp);
131 }
129 } 132 }
130 return 0; 133 return 0;
131} 134}
@@ -178,32 +181,22 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
178 if (!rc) 181 if (!rc)
179 return !rc; 182 return !rc;
180 183
181 // set the Edge Level Control Register (ELCR) 184 /* set the Edge Level Control Register (ELCR) */
182 temp_word = inb(0x4d0); 185 temp_word = inb(0x4d0);
183 temp_word |= inb(0x4d1) << 8; 186 temp_word |= inb(0x4d1) << 8;
184 187
185 temp_word |= 0x01 << irq_num; 188 temp_word |= 0x01 << irq_num;
186 189
187 // This should only be for x86 as it sets the Edge Level Control Register 190 /* This should only be for x86 as it sets the Edge Level
188 outb((u8) (temp_word & 0xFF), 0x4d0); 191 * Control Register
189 outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1); 192 */
190 rc = 0; 193 outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
191 } 194 0xFF00) >> 8), 0x4d1); rc = 0; }
192 195
193 return rc; 196 return rc;
194} 197}
195 198
196 199
197/*
198 * WTF??? This function isn't in the code, yet a function calls it, but the
199 * compiler optimizes it away? strange. Here as a placeholder to keep the
200 * compiler happy.
201 */
202static int PCI_ScanBusNonBridge (u8 bus, u8 device)
203{
204 return 0;
205}
206
207static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num) 200static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
208{ 201{
209 u16 tdevice; 202 u16 tdevice;
@@ -213,11 +206,11 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
213 ctrl->pci_bus->number = bus_num; 206 ctrl->pci_bus->number = bus_num;
214 207
215 for (tdevice = 0; tdevice < 0xFF; tdevice++) { 208 for (tdevice = 0; tdevice < 0xFF; tdevice++) {
216 //Scan for access first 209 /* Scan for access first */
217 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) 210 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
218 continue; 211 continue;
219 dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); 212 dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
220 //Yep we got one. Not a bridge ? 213 /* Yep we got one. Not a bridge ? */
221 if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) { 214 if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
222 *dev_num = tdevice; 215 *dev_num = tdevice;
223 dbg("found it !\n"); 216 dbg("found it !\n");
@@ -225,16 +218,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
225 } 218 }
226 } 219 }
227 for (tdevice = 0; tdevice < 0xFF; tdevice++) { 220 for (tdevice = 0; tdevice < 0xFF; tdevice++) {
228 //Scan for access first 221 /* Scan for access first */
229 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) 222 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
230 continue; 223 continue;
231 dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); 224 dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
232 //Yep we got one. bridge ? 225 /* Yep we got one. bridge ? */
233 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { 226 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
234 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); 227 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
228 /* XXX: no recursion, wtf? */
235 dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); 229 dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
236 if (PCI_ScanBusNonBridge(tbus, tdevice) == 0) 230 return 0;
237 return 0;
238 } 231 }
239 } 232 }
240 233
@@ -244,39 +237,23 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
244 237
245static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge) 238static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
246{ 239{
247 struct irq_routing_table *PCIIRQRoutingInfoLength; 240 int loop, len;
248 long len;
249 long loop;
250 u32 work; 241 u32 work;
251
252 u8 tbus, tdevice, tslot; 242 u8 tbus, tdevice, tslot;
253 243
254 PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table(); 244 len = cpqhp_routing_table_length();
255 if (!PCIIRQRoutingInfoLength)
256 return -1;
257
258 len = (PCIIRQRoutingInfoLength->size -
259 sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
260 // Make sure I got at least one entry
261 if (len == 0) {
262 kfree(PCIIRQRoutingInfoLength );
263 return -1;
264 }
265
266 for (loop = 0; loop < len; ++loop) { 245 for (loop = 0; loop < len; ++loop) {
267 tbus = PCIIRQRoutingInfoLength->slots[loop].bus; 246 tbus = cpqhp_routing_table->slots[loop].bus;
268 tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn; 247 tdevice = cpqhp_routing_table->slots[loop].devfn;
269 tslot = PCIIRQRoutingInfoLength->slots[loop].slot; 248 tslot = cpqhp_routing_table->slots[loop].slot;
270 249
271 if (tslot == slot) { 250 if (tslot == slot) {
272 *bus_num = tbus; 251 *bus_num = tbus;
273 *dev_num = tdevice; 252 *dev_num = tdevice;
274 ctrl->pci_bus->number = tbus; 253 ctrl->pci_bus->number = tbus;
275 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); 254 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
276 if (!nobridge || (work == 0xffffffff)) { 255 if (!nobridge || (work == 0xffffffff))
277 kfree(PCIIRQRoutingInfoLength );
278 return 0; 256 return 0;
279 }
280 257
281 dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); 258 dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
282 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work); 259 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
287 dbg("Scan bus for Non Bridge: bus %d\n", tbus); 264 dbg("Scan bus for Non Bridge: bus %d\n", tbus);
288 if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) { 265 if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
289 *bus_num = tbus; 266 *bus_num = tbus;
290 kfree(PCIIRQRoutingInfoLength );
291 return 0; 267 return 0;
292 } 268 }
293 } else { 269 } else
294 kfree(PCIIRQRoutingInfoLength );
295 return 0; 270 return 0;
296 }
297
298 } 271 }
299 } 272 }
300 kfree(PCIIRQRoutingInfoLength );
301 return -1; 273 return -1;
302} 274}
303 275
304 276
305int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot) 277int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
306{ 278{
307 return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed) 279 /* plain (bridges allowed) */
280 return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
308} 281}
309 282
310 283
311/* More PCI configuration routines; this time centered around hotplug controller */ 284/* More PCI configuration routines; this time centered around hotplug
285 * controller
286 */
312 287
313 288
314/* 289/*
@@ -339,12 +314,12 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
339 int stop_it; 314 int stop_it;
340 int index; 315 int index;
341 316
342 // Decide which slots are supported 317 /* Decide which slots are supported */
343 318
344 if (is_hot_plug) { 319 if (is_hot_plug) {
345 //********************************* 320 /*
346 // is_hot_plug is the slot mask 321 * is_hot_plug is the slot mask
347 //********************************* 322 */
348 FirstSupported = is_hot_plug >> 4; 323 FirstSupported = is_hot_plug >> 4;
349 LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1; 324 LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
350 } else { 325 } else {
@@ -352,123 +327,127 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
352 LastSupported = 0x1F; 327 LastSupported = 0x1F;
353 } 328 }
354 329
355 // Save PCI configuration space for all devices in supported slots 330 /* Save PCI configuration space for all devices in supported slots */
356 ctrl->pci_bus->number = busnumber; 331 ctrl->pci_bus->number = busnumber;
357 for (device = FirstSupported; device <= LastSupported; device++) { 332 for (device = FirstSupported; device <= LastSupported; device++) {
358 ID = 0xFFFFFFFF; 333 ID = 0xFFFFFFFF;
359 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID); 334 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
335
336 if (ID == 0xFFFFFFFF) {
337 if (is_hot_plug) {
338 /* Setup slot structure with entry for empty
339 * slot
340 */
341 new_slot = cpqhp_slot_create(busnumber);
342 if (new_slot == NULL)
343 return 1;
360 344
361 if (ID != 0xFFFFFFFF) { // device in slot 345 new_slot->bus = (u8) busnumber;
362 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code); 346 new_slot->device = (u8) device;
363 if (rc) 347 new_slot->function = 0;
364 return rc; 348 new_slot->is_a_board = 0;
349 new_slot->presence_save = 0;
350 new_slot->switch_save = 0;
351 }
352 continue;
353 }
365 354
366 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type); 355 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
367 if (rc) 356 if (rc)
368 return rc; 357 return rc;
369 358
370 // If multi-function device, set max_functions to 8 359 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
371 if (header_type & 0x80) 360 if (rc)
372 max_functions = 8; 361 return rc;
373 else
374 max_functions = 1;
375 362
376 function = 0; 363 /* If multi-function device, set max_functions to 8 */
364 if (header_type & 0x80)
365 max_functions = 8;
366 else
367 max_functions = 1;
377 368
378 do { 369 function = 0;
379 DevError = 0;
380 370
381 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge 371 do {
382 // Recurse the subordinate bus 372 DevError = 0;
383 // get the subordinate bus number 373 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
384 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus); 374 /* Recurse the subordinate bus
385 if (rc) { 375 * get the subordinate bus number
376 */
377 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
378 if (rc) {
379 return rc;
380 } else {
381 sub_bus = (int) secondary_bus;
382
383 /* Save secondary bus cfg spc
384 * with this recursive call.
385 */
386 rc = cpqhp_save_config(ctrl, sub_bus, 0);
387 if (rc)
386 return rc; 388 return rc;
387 } else { 389 ctrl->pci_bus->number = busnumber;
388 sub_bus = (int) secondary_bus;
389
390 // Save secondary bus cfg spc
391 // with this recursive call.
392 rc = cpqhp_save_config(ctrl, sub_bus, 0);
393 if (rc)
394 return rc;
395 ctrl->pci_bus->number = busnumber;
396 }
397 } 390 }
391 }
398 392
399 index = 0; 393 index = 0;
394 new_slot = cpqhp_slot_find(busnumber, device, index++);
395 while (new_slot &&
396 (new_slot->function != (u8) function))
400 new_slot = cpqhp_slot_find(busnumber, device, index++); 397 new_slot = cpqhp_slot_find(busnumber, device, index++);
401 while (new_slot &&
402 (new_slot->function != (u8) function))
403 new_slot = cpqhp_slot_find(busnumber, device, index++);
404 398
405 if (!new_slot) { 399 if (!new_slot) {
406 // Setup slot structure. 400 /* Setup slot structure. */
407 new_slot = cpqhp_slot_create(busnumber); 401 new_slot = cpqhp_slot_create(busnumber);
408 402 if (new_slot == NULL)
409 if (new_slot == NULL) 403 return 1;
410 return(1); 404 }
411 }
412
413 new_slot->bus = (u8) busnumber;
414 new_slot->device = (u8) device;
415 new_slot->function = (u8) function;
416 new_slot->is_a_board = 1;
417 new_slot->switch_save = 0x10;
418 // In case of unsupported board
419 new_slot->status = DevError;
420 new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
421
422 for (cloop = 0; cloop < 0x20; cloop++) {
423 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
424 if (rc)
425 return rc;
426 }
427 405
428 function++; 406 new_slot->bus = (u8) busnumber;
407 new_slot->device = (u8) device;
408 new_slot->function = (u8) function;
409 new_slot->is_a_board = 1;
410 new_slot->switch_save = 0x10;
411 /* In case of unsupported board */
412 new_slot->status = DevError;
413 new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
429 414
430 stop_it = 0; 415 for (cloop = 0; cloop < 0x20; cloop++) {
416 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
417 if (rc)
418 return rc;
419 }
431 420
432 // this loop skips to the next present function 421 pci_dev_put(new_slot->pci_dev);
433 // reading in Class Code and Header type.
434 422
435 while ((function < max_functions)&&(!stop_it)) { 423 function++;
436 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
437 if (ID == 0xFFFFFFFF) { // nothing there.
438 function++;
439 } else { // Something there
440 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
441 if (rc)
442 return rc;
443 424
444 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type); 425 stop_it = 0;
445 if (rc)
446 return rc;
447 426
448 stop_it++; 427 /* this loop skips to the next present function
449 } 428 * reading in Class Code and Header type.
429 */
430 while ((function < max_functions) && (!stop_it)) {
431 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
432 if (ID == 0xFFFFFFFF) {
433 function++;
434 continue;
450 } 435 }
436 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
437 if (rc)
438 return rc;
451 439
452 } while (function < max_functions); 440 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
453 } // End of IF (device in slot?) 441 if (rc)
454 else if (is_hot_plug) { 442 return rc;
455 // Setup slot structure with entry for empty slot
456 new_slot = cpqhp_slot_create(busnumber);
457 443
458 if (new_slot == NULL) { 444 stop_it++;
459 return(1);
460 } 445 }
461 446
462 new_slot->bus = (u8) busnumber; 447 } while (function < max_functions);
463 new_slot->device = (u8) device; 448 } /* End of FOR loop */
464 new_slot->function = 0;
465 new_slot->is_a_board = 0;
466 new_slot->presence_save = 0;
467 new_slot->switch_save = 0;
468 }
469 } // End of FOR loop
470 449
471 return(0); 450 return 0;
472} 451}
473 452
474 453
@@ -489,7 +468,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
489 u8 secondary_bus; 468 u8 secondary_bus;
490 int sub_bus; 469 int sub_bus;
491 int max_functions; 470 int max_functions;
492 int function; 471 int function = 0;
493 int cloop = 0; 472 int cloop = 0;
494 int stop_it; 473 int stop_it;
495 474
@@ -498,63 +477,58 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
498 ctrl->pci_bus->number = new_slot->bus; 477 ctrl->pci_bus->number = new_slot->bus;
499 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID); 478 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
500 479
501 if (ID != 0xFFFFFFFF) { // device in slot 480 if (ID == 0xFFFFFFFF)
502 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code); 481 return 2;
503 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
504
505 if (header_type & 0x80) // Multi-function device
506 max_functions = 8;
507 else
508 max_functions = 1;
509
510 function = 0;
511
512 do {
513 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
514 // Recurse the subordinate bus
515 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
516 482
517 sub_bus = (int) secondary_bus; 483 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
484 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
518 485
519 // Save the config headers for the secondary bus. 486 if (header_type & 0x80) /* Multi-function device */
520 rc = cpqhp_save_config(ctrl, sub_bus, 0); 487 max_functions = 8;
521 if (rc) 488 else
522 return(rc); 489 max_functions = 1;
523 ctrl->pci_bus->number = new_slot->bus;
524 490
525 } // End of IF 491 while (function < max_functions) {
492 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
493 /* Recurse the subordinate bus */
494 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
526 495
527 new_slot->status = 0; 496 sub_bus = (int) secondary_bus;
528 497
529 for (cloop = 0; cloop < 0x20; cloop++) { 498 /* Save the config headers for the secondary
530 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop])); 499 * bus.
531 } 500 */
501 rc = cpqhp_save_config(ctrl, sub_bus, 0);
502 if (rc)
503 return(rc);
504 ctrl->pci_bus->number = new_slot->bus;
532 505
533 function++; 506 }
534 507
535 stop_it = 0; 508 new_slot->status = 0;
536 509
537 // this loop skips to the next present function 510 for (cloop = 0; cloop < 0x20; cloop++)
538 // reading in the Class Code and the Header type. 511 pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
539 512
540 while ((function < max_functions) && (!stop_it)) { 513 function++;
541 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
542 514
543 if (ID == 0xFFFFFFFF) { // nothing there. 515 stop_it = 0;
544 function++;
545 } else { // Something there
546 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
547 516
548 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type); 517 /* this loop skips to the next present function
518 * reading in the Class Code and the Header type.
519 */
520 while ((function < max_functions) && (!stop_it)) {
521 pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
549 522
550 stop_it++; 523 if (ID == 0xFFFFFFFF)
551 } 524 function++;
525 else {
526 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
527 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
528 stop_it++;
552 } 529 }
530 }
553 531
554 } while (function < max_functions);
555 } // End of IF (device in slot?)
556 else {
557 return 2;
558 } 532 }
559 533
560 return 0; 534 return 0;
@@ -590,11 +564,10 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
590 pci_bus->number = func->bus; 564 pci_bus->number = func->bus;
591 devfn = PCI_DEVFN(func->device, func->function); 565 devfn = PCI_DEVFN(func->device, func->function);
592 566
593 // Check for Bridge 567 /* Check for Bridge */
594 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 568 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
595 569
596 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { 570 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
597 // PCI-PCI Bridge
598 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 571 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
599 572
600 sub_bus = (int) secondary_bus; 573 sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
610 } 583 }
611 pci_bus->number = func->bus; 584 pci_bus->number = func->bus;
612 585
613 //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together 586 /* FIXME: this loop is duplicated in the non-bridge
614 // Figure out IO and memory base lengths 587 * case. The two could be rolled together Figure out
588 * IO and memory base lengths
589 */
615 for (cloop = 0x10; cloop <= 0x14; cloop += 4) { 590 for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
616 temp_register = 0xFFFFFFFF; 591 temp_register = 0xFFFFFFFF;
617 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 592 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
618 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 593 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
619 594 /* If this register is implemented */
620 if (base) { // If this register is implemented 595 if (base) {
621 if (base & 0x01L) { 596 if (base & 0x01L) {
622 // IO base 597 /* IO base
623 // set base = amount of IO space requested 598 * set base = amount of IO space
599 * requested
600 */
624 base = base & 0xFFFFFFFE; 601 base = base & 0xFFFFFFFE;
625 base = (~base) + 1; 602 base = (~base) + 1;
626 603
627 type = 1; 604 type = 1;
628 } else { 605 } else {
629 // memory base 606 /* memory base */
630 base = base & 0xFFFFFFF0; 607 base = base & 0xFFFFFFF0;
631 base = (~base) + 1; 608 base = (~base) + 1;
632 609
@@ -637,32 +614,36 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
637 type = 0; 614 type = 0;
638 } 615 }
639 616
640 // Save information in slot structure 617 /* Save information in slot structure */
641 func->base_length[(cloop - 0x10) >> 2] = 618 func->base_length[(cloop - 0x10) >> 2] =
642 base; 619 base;
643 func->base_type[(cloop - 0x10) >> 2] = type; 620 func->base_type[(cloop - 0x10) >> 2] = type;
644 621
645 } // End of base register loop 622 } /* End of base register loop */
646 623
647 624 } else if ((header_type & 0x7F) == 0x00) {
648 } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge 625 /* Figure out IO and memory base lengths */
649 // Figure out IO and memory base lengths
650 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 626 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
651 temp_register = 0xFFFFFFFF; 627 temp_register = 0xFFFFFFFF;
652 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 628 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
653 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 629 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
654 630
655 if (base) { // If this register is implemented 631 /* If this register is implemented */
632 if (base) {
656 if (base & 0x01L) { 633 if (base & 0x01L) {
657 // IO base 634 /* IO base
658 // base = amount of IO space requested 635 * base = amount of IO space
636 * requested
637 */
659 base = base & 0xFFFFFFFE; 638 base = base & 0xFFFFFFFE;
660 base = (~base) + 1; 639 base = (~base) + 1;
661 640
662 type = 1; 641 type = 1;
663 } else { 642 } else {
664 // memory base 643 /* memory base
665 // base = amount of memory space requested 644 * base = amount of memory
645 * space requested
646 */
666 base = base & 0xFFFFFFF0; 647 base = base & 0xFFFFFFF0;
667 base = (~base) + 1; 648 base = (~base) + 1;
668 649
@@ -673,16 +654,16 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
673 type = 0; 654 type = 0;
674 } 655 }
675 656
676 // Save information in slot structure 657 /* Save information in slot structure */
677 func->base_length[(cloop - 0x10) >> 2] = base; 658 func->base_length[(cloop - 0x10) >> 2] = base;
678 func->base_type[(cloop - 0x10) >> 2] = type; 659 func->base_type[(cloop - 0x10) >> 2] = type;
679 660
680 } // End of base register loop 661 } /* End of base register loop */
681 662
682 } else { // Some other unknown header type 663 } else { /* Some other unknown header type */
683 } 664 }
684 665
685 // find the next device in this slot 666 /* find the next device in this slot */
686 func = cpqhp_slot_find(func->bus, func->device, index++); 667 func = cpqhp_slot_find(func->bus, func->device, index++);
687 } 668 }
688 669
@@ -728,18 +709,18 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
728 pci_bus->number = func->bus; 709 pci_bus->number = func->bus;
729 devfn = PCI_DEVFN(func->device, func->function); 710 devfn = PCI_DEVFN(func->device, func->function);
730 711
731 // Save the command register 712 /* Save the command register */
732 pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command); 713 pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
733 714
734 // disable card 715 /* disable card */
735 command = 0x00; 716 command = 0x00;
736 pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command); 717 pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
737 718
738 // Check for Bridge 719 /* Check for Bridge */
739 pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 720 pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
740 721
741 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 722 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
742 // Clear Bridge Control Register 723 /* Clear Bridge Control Register */
743 command = 0x00; 724 command = 0x00;
744 pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command); 725 pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
745 pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 726 pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
755 bus_node->next = func->bus_head; 736 bus_node->next = func->bus_head;
756 func->bus_head = bus_node; 737 func->bus_head = bus_node;
757 738
758 // Save IO base and Limit registers 739 /* Save IO base and Limit registers */
759 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base); 740 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
760 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length); 741 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
761 742
@@ -771,7 +752,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
771 func->io_head = io_node; 752 func->io_head = io_node;
772 } 753 }
773 754
774 // Save memory base and Limit registers 755 /* Save memory base and Limit registers */
775 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base); 756 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
776 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length); 757 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
777 758
@@ -787,7 +768,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
787 func->mem_head = mem_node; 768 func->mem_head = mem_node;
788 } 769 }
789 770
790 // Save prefetchable memory base and Limit registers 771 /* Save prefetchable memory base and Limit registers */
791 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base); 772 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
792 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length); 773 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
793 774
@@ -802,7 +783,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
802 p_mem_node->next = func->p_mem_head; 783 p_mem_node->next = func->p_mem_head;
803 func->p_mem_head = p_mem_node; 784 func->p_mem_head = p_mem_node;
804 } 785 }
805 // Figure out IO and memory base lengths 786 /* Figure out IO and memory base lengths */
806 for (cloop = 0x10; cloop <= 0x14; cloop += 4) { 787 for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
807 pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base); 788 pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
808 789
@@ -812,11 +793,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
812 793
813 temp_register = base; 794 temp_register = base;
814 795
815 if (base) { // If this register is implemented 796 /* If this register is implemented */
797 if (base) {
816 if (((base & 0x03L) == 0x01) 798 if (((base & 0x03L) == 0x01)
817 && (save_command & 0x01)) { 799 && (save_command & 0x01)) {
818 // IO base 800 /* IO base
819 // set temp_register = amount of IO space requested 801 * set temp_register = amount
802 * of IO space requested
803 */
820 temp_register = base & 0xFFFFFFFE; 804 temp_register = base & 0xFFFFFFFE;
821 temp_register = (~temp_register) + 1; 805 temp_register = (~temp_register) + 1;
822 806
@@ -834,7 +818,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
834 } else 818 } else
835 if (((base & 0x0BL) == 0x08) 819 if (((base & 0x0BL) == 0x08)
836 && (save_command & 0x02)) { 820 && (save_command & 0x02)) {
837 // prefetchable memory base 821 /* prefetchable memory base */
838 temp_register = base & 0xFFFFFFF0; 822 temp_register = base & 0xFFFFFFF0;
839 temp_register = (~temp_register) + 1; 823 temp_register = (~temp_register) + 1;
840 824
@@ -851,7 +835,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
851 } else 835 } else
852 if (((base & 0x0BL) == 0x00) 836 if (((base & 0x0BL) == 0x00)
853 && (save_command & 0x02)) { 837 && (save_command & 0x02)) {
854 // prefetchable memory base 838 /* prefetchable memory base */
855 temp_register = base & 0xFFFFFFF0; 839 temp_register = base & 0xFFFFFFF0;
856 temp_register = (~temp_register) + 1; 840 temp_register = (~temp_register) + 1;
857 841
@@ -868,9 +852,10 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
868 } else 852 } else
869 return(1); 853 return(1);
870 } 854 }
871 } // End of base register loop 855 } /* End of base register loop */
872 } else if ((header_type & 0x7F) == 0x00) { // Standard header 856 /* Standard header */
873 // Figure out IO and memory base lengths 857 } else if ((header_type & 0x7F) == 0x00) {
858 /* Figure out IO and memory base lengths */
874 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 859 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
875 pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base); 860 pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
876 861
@@ -880,11 +865,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
880 865
881 temp_register = base; 866 temp_register = base;
882 867
883 if (base) { // If this register is implemented 868 /* If this register is implemented */
869 if (base) {
884 if (((base & 0x03L) == 0x01) 870 if (((base & 0x03L) == 0x01)
885 && (save_command & 0x01)) { 871 && (save_command & 0x01)) {
886 // IO base 872 /* IO base
887 // set temp_register = amount of IO space requested 873 * set temp_register = amount
874 * of IO space requested
875 */
888 temp_register = base & 0xFFFFFFFE; 876 temp_register = base & 0xFFFFFFFE;
889 temp_register = (~temp_register) + 1; 877 temp_register = (~temp_register) + 1;
890 878
@@ -901,7 +889,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
901 } else 889 } else
902 if (((base & 0x0BL) == 0x08) 890 if (((base & 0x0BL) == 0x08)
903 && (save_command & 0x02)) { 891 && (save_command & 0x02)) {
904 // prefetchable memory base 892 /* prefetchable memory base */
905 temp_register = base & 0xFFFFFFF0; 893 temp_register = base & 0xFFFFFFF0;
906 temp_register = (~temp_register) + 1; 894 temp_register = (~temp_register) + 1;
907 895
@@ -918,7 +906,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
918 } else 906 } else
919 if (((base & 0x0BL) == 0x00) 907 if (((base & 0x0BL) == 0x00)
920 && (save_command & 0x02)) { 908 && (save_command & 0x02)) {
921 // prefetchable memory base 909 /* prefetchable memory base */
922 temp_register = base & 0xFFFFFFF0; 910 temp_register = base & 0xFFFFFFF0;
923 temp_register = (~temp_register) + 1; 911 temp_register = (~temp_register) + 1;
924 912
@@ -935,15 +923,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
935 } else 923 } else
936 return(1); 924 return(1);
937 } 925 }
938 } // End of base register loop 926 } /* End of base register loop */
939 } else { // Some other unknown header type
940 } 927 }
941 928
942 // find the next device in this slot 929 /* find the next device in this slot */
943 func = cpqhp_slot_find(func->bus, func->device, index++); 930 func = cpqhp_slot_find(func->bus, func->device, index++);
944 } 931 }
945 932
946 return(0); 933 return 0;
947} 934}
948 935
949 936
@@ -975,16 +962,16 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
975 pci_bus->number = func->bus; 962 pci_bus->number = func->bus;
976 devfn = PCI_DEVFN(func->device, func->function); 963 devfn = PCI_DEVFN(func->device, func->function);
977 964
978 // Start at the top of config space so that the control 965 /* Start at the top of config space so that the control
979 // registers are programmed last 966 * registers are programmed last
980 for (cloop = 0x3C; cloop > 0; cloop -= 4) { 967 */
968 for (cloop = 0x3C; cloop > 0; cloop -= 4)
981 pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]); 969 pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
982 }
983 970
984 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 971 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
985 972
986 // If this is a bridge device, restore subordinate devices 973 /* If this is a bridge device, restore subordinate devices */
987 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 974 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
988 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 975 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
989 976
990 sub_bus = (int) secondary_bus; 977 sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
1000 } 987 }
1001 } else { 988 } else {
1002 989
1003 // Check all the base Address Registers to make sure 990 /* Check all the base Address Registers to make sure
1004 // they are the same. If not, the board is different. 991 * they are the same. If not, the board is different.
992 */
1005 993
1006 for (cloop = 16; cloop < 40; cloop += 4) { 994 for (cloop = 16; cloop < 40; cloop += 4) {
1007 pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp); 995 pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1058 1046
1059 pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register); 1047 pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
1060 1048
1061 // No adapter present 1049 /* No adapter present */
1062 if (temp_register == 0xFFFFFFFF) 1050 if (temp_register == 0xFFFFFFFF)
1063 return(NO_ADAPTER_PRESENT); 1051 return(NO_ADAPTER_PRESENT);
1064 1052
1065 if (temp_register != func->config_space[0]) 1053 if (temp_register != func->config_space[0])
1066 return(ADAPTER_NOT_SAME); 1054 return(ADAPTER_NOT_SAME);
1067 1055
1068 // Check for same revision number and class code 1056 /* Check for same revision number and class code */
1069 pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register); 1057 pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
1070 1058
1071 // Adapter not the same 1059 /* Adapter not the same */
1072 if (temp_register != func->config_space[0x08 >> 2]) 1060 if (temp_register != func->config_space[0x08 >> 2])
1073 return(ADAPTER_NOT_SAME); 1061 return(ADAPTER_NOT_SAME);
1074 1062
1075 // Check for Bridge 1063 /* Check for Bridge */
1076 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 1064 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
1077 1065
1078 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 1066 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1079 // In order to continue checking, we must program the 1067 /* In order to continue checking, we must program the
1080 // bus registers in the bridge to respond to accesses 1068 * bus registers in the bridge to respond to accesses
1081 // for it's subordinate bus(es) 1069 * for its subordinate bus(es)
1070 */
1082 1071
1083 temp_register = func->config_space[0x18 >> 2]; 1072 temp_register = func->config_space[0x18 >> 2];
1084 pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register); 1073 pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1096 } 1085 }
1097 1086
1098 } 1087 }
1099 // Check to see if it is a standard config header 1088 /* Check to see if it is a standard config header */
1100 else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) { 1089 else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
1101 // Check subsystem vendor and ID 1090 /* Check subsystem vendor and ID */
1102 pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register); 1091 pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
1103 1092
1104 if (temp_register != func->config_space[0x2C >> 2]) { 1093 if (temp_register != func->config_space[0x2C >> 2]) {
1105 // If it's a SMART-2 and the register isn't filled 1094 /* If it's a SMART-2 and the register isn't
1106 // in, ignore the difference because 1095 * filled in, ignore the difference because
1107 // they just have an old rev of the firmware 1096 * they just have an old rev of the firmware
1108 1097 */
1109 if (!((func->config_space[0] == 0xAE100E11) 1098 if (!((func->config_space[0] == 0xAE100E11)
1110 && (temp_register == 0x00L))) 1099 && (temp_register == 0x00L)))
1111 return(ADAPTER_NOT_SAME); 1100 return(ADAPTER_NOT_SAME);
1112 } 1101 }
1113 // Figure out IO and memory base lengths 1102 /* Figure out IO and memory base lengths */
1114 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 1103 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
1115 temp_register = 0xFFFFFFFF; 1104 temp_register = 0xFFFFFFFF;
1116 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 1105 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
1117 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 1106 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
1118 if (base) { // If this register is implemented 1107
1108 /* If this register is implemented */
1109 if (base) {
1119 if (base & 0x01L) { 1110 if (base & 0x01L) {
1120 // IO base 1111 /* IO base
1121 // set base = amount of IO space requested 1112 * set base = amount of IO
1113 * space requested
1114 */
1122 base = base & 0xFFFFFFFE; 1115 base = base & 0xFFFFFFFE;
1123 base = (~base) + 1; 1116 base = (~base) + 1;
1124 1117
1125 type = 1; 1118 type = 1;
1126 } else { 1119 } else {
1127 // memory base 1120 /* memory base */
1128 base = base & 0xFFFFFFF0; 1121 base = base & 0xFFFFFFF0;
1129 base = (~base) + 1; 1122 base = (~base) + 1;
1130 1123
@@ -1135,23 +1128,24 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1135 type = 0; 1128 type = 0;
1136 } 1129 }
1137 1130
1138 // Check information in slot structure 1131 /* Check information in slot structure */
1139 if (func->base_length[(cloop - 0x10) >> 2] != base) 1132 if (func->base_length[(cloop - 0x10) >> 2] != base)
1140 return(ADAPTER_NOT_SAME); 1133 return(ADAPTER_NOT_SAME);
1141 1134
1142 if (func->base_type[(cloop - 0x10) >> 2] != type) 1135 if (func->base_type[(cloop - 0x10) >> 2] != type)
1143 return(ADAPTER_NOT_SAME); 1136 return(ADAPTER_NOT_SAME);
1144 1137
1145 } // End of base register loop 1138 } /* End of base register loop */
1146 1139
1147 } // End of (type 0 config space) else 1140 } /* End of (type 0 config space) else */
1148 else { 1141 else {
1149 // this is not a type 0 or 1 config space header so 1142 /* this is not a type 0 or 1 config space header so
1150 // we don't know how to do it 1143 * we don't know how to do it
1144 */
1151 return(DEVICE_TYPE_NOT_SUPPORTED); 1145 return(DEVICE_TYPE_NOT_SUPPORTED);
1152 } 1146 }
1153 1147
1154 // Get the next function 1148 /* Get the next function */
1155 func = cpqhp_slot_find(func->bus, func->device, index++); 1149 func = cpqhp_slot_find(func->bus, func->device, index++);
1156 } 1150 }
1157 1151
@@ -1168,7 +1162,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1168 * this function is for hot plug ADD! 1162 * this function is for hot plug ADD!
1169 * 1163 *
1170 * returns 0 if success 1164 * returns 0 if success
1171 */ 1165 */
1172int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start) 1166int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
1173{ 1167{
1174 u8 temp; 1168 u8 temp;
@@ -1187,10 +1181,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1187 rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff); 1181 rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
1188 dbg("rom_resource_table = %p\n", rom_resource_table); 1182 dbg("rom_resource_table = %p\n", rom_resource_table);
1189 1183
1190 if (rom_resource_table == NULL) { 1184 if (rom_resource_table == NULL)
1191 return -ENODEV; 1185 return -ENODEV;
1192 } 1186
1193 // Sum all resources and setup resource maps 1187 /* Sum all resources and setup resource maps */
1194 unused_IRQ = readl(rom_resource_table + UNUSED_IRQ); 1188 unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
1195 dbg("unused_IRQ = %x\n", unused_IRQ); 1189 dbg("unused_IRQ = %x\n", unused_IRQ);
1196 1190
@@ -1222,13 +1216,11 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1222 1216
1223 temp = 0; 1217 temp = 0;
1224 1218
1225 if (!cpqhp_nic_irq) { 1219 if (!cpqhp_nic_irq)
1226 cpqhp_nic_irq = ctrl->cfgspc_irq; 1220 cpqhp_nic_irq = ctrl->cfgspc_irq;
1227 }
1228 1221
1229 if (!cpqhp_disk_irq) { 1222 if (!cpqhp_disk_irq)
1230 cpqhp_disk_irq = ctrl->cfgspc_irq; 1223 cpqhp_disk_irq = ctrl->cfgspc_irq;
1231 }
1232 1224
1233 dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq); 1225 dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
1234 1226
@@ -1262,13 +1254,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1262 dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length, 1254 dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
1263 primary_bus, secondary_bus, max_bus); 1255 primary_bus, secondary_bus, max_bus);
1264 1256
1265 // If this entry isn't for our controller's bus, ignore it 1257 /* If this entry isn't for our controller's bus, ignore it */
1266 if (primary_bus != ctrl->bus) { 1258 if (primary_bus != ctrl->bus) {
1267 i--; 1259 i--;
1268 one_slot += sizeof (struct slot_rt); 1260 one_slot += sizeof (struct slot_rt);
1269 continue; 1261 continue;
1270 } 1262 }
1271 // find out if this entry is for an occupied slot 1263 /* find out if this entry is for an occupied slot */
1272 ctrl->pci_bus->number = primary_bus; 1264 ctrl->pci_bus->number = primary_bus;
1273 pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword); 1265 pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
1274 dbg("temp_D_word = %x\n", temp_dword); 1266 dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1282 func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++); 1274 func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
1283 } 1275 }
1284 1276
1285 // If we can't find a match, skip this table entry 1277 /* If we can't find a match, skip this table entry */
1286 if (!func) { 1278 if (!func) {
1287 i--; 1279 i--;
1288 one_slot += sizeof (struct slot_rt); 1280 one_slot += sizeof (struct slot_rt);
1289 continue; 1281 continue;
1290 } 1282 }
1291 // this may not work and shouldn't be used 1283 /* this may not work and shouldn't be used */
1292 if (secondary_bus != primary_bus) 1284 if (secondary_bus != primary_bus)
1293 bridged_slot = 1; 1285 bridged_slot = 1;
1294 else 1286 else
@@ -1301,7 +1293,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1301 } 1293 }
1302 1294
1303 1295
1304 // If we've got a valid IO base, use it 1296 /* If we've got a valid IO base, use it */
1305 1297
1306 temp_dword = io_base + io_length; 1298 temp_dword = io_base + io_length;
1307 1299
@@ -1325,7 +1317,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1325 } 1317 }
1326 } 1318 }
1327 1319
1328 // If we've got a valid memory base, use it 1320 /* If we've got a valid memory base, use it */
1329 temp_dword = mem_base + mem_length; 1321 temp_dword = mem_base + mem_length;
1330 if ((mem_base) && (temp_dword < 0x10000)) { 1322 if ((mem_base) && (temp_dword < 0x10000)) {
1331 mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); 1323 mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1348 } 1340 }
1349 } 1341 }
1350 1342
1351 // If we've got a valid prefetchable memory base, and 1343 /* If we've got a valid prefetchable memory base, and
1352 // the base + length isn't greater than 0xFFFF 1344 * the base + length isn't greater than 0xFFFF
1345 */
1353 temp_dword = pre_mem_base + pre_mem_length; 1346 temp_dword = pre_mem_base + pre_mem_length;
1354 if ((pre_mem_base) && (temp_dword < 0x10000)) { 1347 if ((pre_mem_base) && (temp_dword < 0x10000)) {
1355 p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); 1348 p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1372 } 1365 }
1373 } 1366 }
1374 1367
1375 // If we've got a valid bus number, use it 1368 /* If we've got a valid bus number, use it
1376 // The second condition is to ignore bus numbers on 1369 * The second condition is to ignore bus numbers on
1377 // populated slots that don't have PCI-PCI bridges 1370 * populated slots that don't have PCI-PCI bridges
1371 */
1378 if (secondary_bus && (secondary_bus != primary_bus)) { 1372 if (secondary_bus && (secondary_bus != primary_bus)) {
1379 bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); 1373 bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
1380 if (!bus_node) 1374 if (!bus_node)
@@ -1398,8 +1392,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1398 one_slot += sizeof (struct slot_rt); 1392 one_slot += sizeof (struct slot_rt);
1399 } 1393 }
1400 1394
1401 // If all of the following fail, we don't have any resources for 1395 /* If all of the following fail, we don't have any resources for
1402 // hot plug add 1396 * hot plug add
1397 */
1403 rc = 1; 1398 rc = 1;
1404 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); 1399 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
1405 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); 1400 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index a13abf55d784..8450f4a6568a 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -225,7 +225,8 @@ void cpqhp_shutdown_debugfs(void)
225 225
226void cpqhp_create_debugfs_files(struct controller *ctrl) 226void cpqhp_create_debugfs_files(struct controller *ctrl)
227{ 227{
228 ctrl->dentry = debugfs_create_file(ctrl->pci_dev->dev.bus_id, S_IRUGO, root, ctrl, &debug_ops); 228 ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev),
229 S_IRUGO, root, ctrl, &debug_ops);
229} 230}
230 231
231void cpqhp_remove_debugfs_files(struct controller *ctrl) 232void cpqhp_remove_debugfs_files(struct controller *ctrl)
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index d8649e127298..6151389fd903 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -1,395 +1,163 @@
1/* 1/* Works like the fakephp driver used to, except a little better.
2 * Fake PCI Hot Plug Controller Driver
3 * 2 *
4 * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> 3 * - It's possible to remove devices with subordinate busses.
5 * Copyright (C) 2003 IBM Corp. 4 * - New PCI devices that appear via any method, not just a fakephp triggered
6 * Copyright (C) 2003 Rolf Eike Beer <eike-kernel@sf-tec.de> 5 * rescan, will be noticed.
6 * - Devices that are removed via any method, not just a fakephp triggered
7 * removal, will also be noticed.
7 * 8 *
8 * Based on ideas and code from: 9 * Uses nothing from the pci-hotplug subsystem.
9 * Vladimir Kondratiev <vladimir.kondratiev@intel.com>
10 * Rolf Eike Beer <eike-kernel@sf-tec.de>
11 * 10 *
12 * All rights reserved.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation, version 2 of the License.
17 *
18 * Send feedback to <greg@kroah.com>
19 */ 11 */
20 12
21/*
22 *
23 * This driver will "emulate" removing PCI devices from the system. If
24 * the "power" file is written to with "0" then the specified PCI device
25 * will be completely removed from the kernel.
26 *
27 * WARNING, this does NOT turn off the power to the PCI device. This is
28 * a "logical" removal, not a physical or electrical removal.
29 *
30 * Use this module at your own risk, you have been warned!
31 *
32 * Enabling PCI devices is left as an exercise for the reader...
33 *
34 */
35#include <linux/kernel.h>
36#include <linux/module.h> 13#include <linux/module.h>
37#include <linux/pci.h> 14#include <linux/kernel.h>
38#include <linux/pci_hotplug.h> 15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/kobject.h>
18#include <linux/sysfs.h>
39#include <linux/init.h> 19#include <linux/init.h>
40#include <linux/string.h> 20#include <linux/pci.h>
41#include <linux/slab.h> 21#include <linux/device.h>
42#include <linux/workqueue.h>
43#include "../pci.h" 22#include "../pci.h"
44 23
45#if !defined(MODULE) 24struct legacy_slot {
46 #define MY_NAME "fakephp" 25 struct kobject kobj;
47#else 26 struct pci_dev *dev;
48 #define MY_NAME THIS_MODULE->name 27 struct list_head list;
49#endif
50
51#define dbg(format, arg...) \
52 do { \
53 if (debug) \
54 printk(KERN_DEBUG "%s: " format, \
55 MY_NAME , ## arg); \
56 } while (0)
57#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
58#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
59
60#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
61#define DRIVER_DESC "Fake PCI Hot Plug Controller Driver"
62
63struct dummy_slot {
64 struct list_head node;
65 struct hotplug_slot *slot;
66 struct pci_dev *dev;
67 struct work_struct remove_work;
68 unsigned long removed;
69}; 28};
70 29
71static int debug; 30static LIST_HEAD(legacy_list);
72static int dup_slots;
73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq;
75
76static void pci_rescan_worker(struct work_struct *work);
77static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
78
79static int enable_slot (struct hotplug_slot *slot);
80static int disable_slot (struct hotplug_slot *slot);
81 31
82static struct hotplug_slot_ops dummy_hotplug_slot_ops = { 32static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr,
83 .owner = THIS_MODULE, 33 char *buf)
84 .enable_slot = enable_slot,
85 .disable_slot = disable_slot,
86};
87
88static void dummy_release(struct hotplug_slot *slot)
89{ 34{
90 struct dummy_slot *dslot = slot->private; 35 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
91 36 strcpy(buf, "1\n");
92 list_del(&dslot->node); 37 return 2;
93 kfree(dslot->slot->info);
94 kfree(dslot->slot);
95 pci_dev_put(dslot->dev);
96 kfree(dslot);
97} 38}
98 39
99#define SLOT_NAME_SIZE 8 40static void remove_callback(void *data)
100
101static int add_slot(struct pci_dev *dev)
102{ 41{
103 struct dummy_slot *dslot; 42 pci_remove_bus_device((struct pci_dev *)data);
104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
106 int retval = -ENOMEM;
107 static int count = 1;
108
109 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
110 if (!slot)
111 goto error;
112
113 slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
114 if (!slot->info)
115 goto error_slot;
116
117 slot->info->power_status = 1;
118 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
119 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
120
121 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
122 if (!dslot)
123 goto error_info;
124
125 if (dup_slots)
126 snprintf(name, SLOT_NAME_SIZE, "fake");
127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
130 slot->ops = &dummy_hotplug_slot_ops;
131 slot->release = &dummy_release;
132 slot->private = dslot;
133
134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
135 if (retval) {
136 err("pci_hp_register failed with error %d\n", retval);
137 goto error_dslot;
138 }
139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
141 dslot->slot = slot;
142 dslot->dev = pci_dev_get(dev);
143 list_add (&dslot->node, &slot_list);
144 return retval;
145
146error_dslot:
147 kfree(dslot);
148error_info:
149 kfree(slot->info);
150error_slot:
151 kfree(slot);
152error:
153 return retval;
154} 43}
155 44
156static int __init pci_scan_buses(void) 45static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr,
46 const char *buf, size_t len)
157{ 47{
158 struct pci_dev *dev = NULL; 48 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
159 int lastslot = 0; 49 unsigned long val;
160 50
161 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 51 if (strict_strtoul(buf, 0, &val) < 0)
162 if (PCI_FUNC(dev->devfn) > 0 && 52 return -EINVAL;
163 lastslot == PCI_SLOT(dev->devfn))
164 continue;
165 lastslot = PCI_SLOT(dev->devfn);
166 add_slot(dev);
167 }
168 53
169 return 0; 54 if (val)
55 pci_rescan_bus(slot->dev->bus);
56 else
57 sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback,
58 slot->dev, THIS_MODULE);
59 return len;
170} 60}
171 61
172static void remove_slot(struct dummy_slot *dslot) 62static struct attribute *legacy_attrs[] = {
173{ 63 &(struct attribute){ .name = "power", .mode = 0644 },
174 int retval; 64 NULL,
175 65};
176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
177 retval = pci_hp_deregister(dslot->slot);
178 if (retval)
179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
181}
182 66
183/* called from the single-threaded workqueue handler to remove a slot */ 67static void legacy_release(struct kobject *kobj)
184static void remove_slot_worker(struct work_struct *work)
185{ 68{
186 struct dummy_slot *dslot = 69 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
187 container_of(work, struct dummy_slot, remove_work);
188 remove_slot(dslot);
189}
190 70
191/** 71 pci_dev_put(slot->dev);
192 * pci_rescan_slot - Rescan slot 72 kfree(slot);
193 * @temp: Device template. Should be set: bus and devfn.
194 *
195 * Tries hard not to re-enable already existing devices;
196 * also handles scanning of subfunctions.
197 */
198static int pci_rescan_slot(struct pci_dev *temp)
199{
200 struct pci_bus *bus = temp->bus;
201 struct pci_dev *dev;
202 int func;
203 u8 hdr_type;
204 int count = 0;
205
206 if (!pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type)) {
207 temp->hdr_type = hdr_type & 0x7f;
208 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
209 pci_dev_put(dev);
210 else {
211 dev = pci_scan_single_device(bus, temp->devfn);
212 if (dev) {
213 dbg("New device on %s function %x:%x\n",
214 bus->name, temp->devfn >> 3,
215 temp->devfn & 7);
216 count++;
217 }
218 }
219 /* multifunction device? */
220 if (!(hdr_type & 0x80))
221 return count;
222
223 /* continue scanning for other functions */
224 for (func = 1, temp->devfn++; func < 8; func++, temp->devfn++) {
225 if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
226 continue;
227 temp->hdr_type = hdr_type & 0x7f;
228
229 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
230 pci_dev_put(dev);
231 else {
232 dev = pci_scan_single_device(bus, temp->devfn);
233 if (dev) {
234 dbg("New device on %s function %x:%x\n",
235 bus->name, temp->devfn >> 3,
236 temp->devfn & 7);
237 count++;
238 }
239 }
240 }
241 }
242
243 return count;
244} 73}
245 74
75static struct kobj_type legacy_ktype = {
76 .sysfs_ops = &(struct sysfs_ops){
77 .store = legacy_store, .show = legacy_show
78 },
79 .release = &legacy_release,
80 .default_attrs = legacy_attrs,
81};
246 82
247/** 83static int legacy_add_slot(struct pci_dev *pdev)
248 * pci_rescan_bus - Rescan PCI bus
249 * @bus: the PCI bus to rescan
250 *
251 * Call pci_rescan_slot for each possible function of the bus.
252 */
253static void pci_rescan_bus(const struct pci_bus *bus)
254{ 84{
255 unsigned int devfn; 85 struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL);
256 struct pci_dev *dev;
257 int retval;
258 int found = 0;
259 dev = alloc_pci_dev();
260 if (!dev)
261 return;
262 86
263 dev->bus = (struct pci_bus*)bus; 87 if (!slot)
264 dev->sysdata = bus->sysdata; 88 return -ENOMEM;
265 for (devfn = 0; devfn < 0x100; devfn += 8) {
266 dev->devfn = devfn;
267 found += pci_rescan_slot(dev);
268 }
269
270 if (found) {
271 pci_bus_assign_resources(bus);
272 list_for_each_entry(dev, &bus->devices, bus_list) {
273 /* Skip already-added devices */
274 if (dev->is_added)
275 continue;
276 retval = pci_bus_add_device(dev);
277 if (retval)
278 dev_err(&dev->dev,
279 "Error adding device, continuing\n");
280 else
281 add_slot(dev);
282 }
283 pci_bus_add_devices(bus);
284 }
285 kfree(dev);
286}
287 89
288/* recursively scan all buses */ 90 if (kobject_init_and_add(&slot->kobj, &legacy_ktype,
289static void pci_rescan_buses(const struct list_head *list) 91 &pci_slots_kset->kobj, "%s",
290{ 92 dev_name(&pdev->dev))) {
291 const struct list_head *l; 93 dev_warn(&pdev->dev, "Failed to created legacy fake slot\n");
292 list_for_each(l,list) { 94 return -EINVAL;
293 const struct pci_bus *b = pci_bus_b(l);
294 pci_rescan_bus(b);
295 pci_rescan_buses(&b->children);
296 } 95 }
297} 96 slot->dev = pci_dev_get(pdev);
298 97
299/* initiate rescan of all pci buses */ 98 list_add(&slot->list, &legacy_list);
300static inline void pci_rescan(void) {
301 pci_rescan_buses(&pci_root_buses);
302}
303
304/* called from the single-threaded workqueue handler to rescan all pci buses */
305static void pci_rescan_worker(struct work_struct *work)
306{
307 pci_rescan();
308}
309 99
310static int enable_slot(struct hotplug_slot *hotplug_slot)
311{
312 /* mis-use enable_slot for rescanning of the pci bus */
313 cancel_work_sync(&pci_rescan_work);
314 queue_work(dummyphp_wq, &pci_rescan_work);
315 return 0; 100 return 0;
316} 101}
317 102
318static int disable_slot(struct hotplug_slot *slot) 103static int legacy_notify(struct notifier_block *nb,
104 unsigned long action, void *data)
319{ 105{
320 struct dummy_slot *dslot; 106 struct pci_dev *pdev = to_pci_dev(data);
321 struct pci_dev *dev;
322 int func;
323
324 if (!slot)
325 return -ENODEV;
326 dslot = slot->private;
327
328 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
329 107
330 for (func = 7; func >= 0; func--) { 108 if (action == BUS_NOTIFY_ADD_DEVICE) {
331 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 109 legacy_add_slot(pdev);
332 if (!dev) 110 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
333 continue; 111 struct legacy_slot *slot;
334 112
335 if (test_and_set_bit(0, &dslot->removed)) { 113 list_for_each_entry(slot, &legacy_list, list)
336 dbg("Slot already scheduled for removal\n"); 114 if (slot->dev == pdev)
337 pci_dev_put(dev); 115 goto found;
338 return -ENODEV;
339 }
340 116
341 /* remove the device from the pci core */ 117 dev_warn(&pdev->dev, "Missing legacy fake slot?");
342 pci_remove_bus_device(dev); 118 return -ENODEV;
343 119found:
344 /* queue work item to blow away this sysfs entry and other 120 kobject_del(&slot->kobj);
345 * parts. 121 list_del(&slot->list);
346 */ 122 kobject_put(&slot->kobj);
347 INIT_WORK(&dslot->remove_work, remove_slot_worker);
348 queue_work(dummyphp_wq, &dslot->remove_work);
349
350 pci_dev_put(dev);
351 } 123 }
124
352 return 0; 125 return 0;
353} 126}
354 127
355static void cleanup_slots (void) 128static struct notifier_block legacy_notifier = {
356{ 129 .notifier_call = legacy_notify
357 struct list_head *tmp; 130};
358 struct list_head *next;
359 struct dummy_slot *dslot;
360
361 destroy_workqueue(dummyphp_wq);
362 list_for_each_safe (tmp, next, &slot_list) {
363 dslot = list_entry (tmp, struct dummy_slot, node);
364 remove_slot(dslot);
365 }
366
367}
368 131
369static int __init dummyphp_init(void) 132static int __init init_legacy(void)
370{ 133{
371 info(DRIVER_DESC "\n"); 134 struct pci_dev *pdev = NULL;
372 135
373 dummyphp_wq = create_singlethread_workqueue(MY_NAME); 136 /* Add existing devices */
374 if (!dummyphp_wq) 137 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)))
375 return -ENOMEM; 138 legacy_add_slot(pdev);
376 139
377 return pci_scan_buses(); 140 /* Be alerted of any new ones */
141 bus_register_notifier(&pci_bus_type, &legacy_notifier);
142 return 0;
378} 143}
144module_init(init_legacy);
379 145
380 146static void __exit remove_legacy(void)
381static void __exit dummyphp_exit(void)
382{ 147{
383 cleanup_slots(); 148 struct legacy_slot *slot, *tmp;
149
150 bus_unregister_notifier(&pci_bus_type, &legacy_notifier);
151
152 list_for_each_entry_safe(slot, tmp, &legacy_list, list) {
153 list_del(&slot->list);
154 kobject_del(&slot->kobj);
155 kobject_put(&slot->kobj);
156 }
384} 157}
158module_exit(remove_legacy);
385 159
386module_init(dummyphp_init);
387module_exit(dummyphp_exit);
388 160
389MODULE_AUTHOR(DRIVER_AUTHOR); 161MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>");
390MODULE_DESCRIPTION(DRIVER_DESC); 162MODULE_DESCRIPTION("Legacy version of the fakephp interface");
391MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
392module_param(debug, bool, S_IRUGO | S_IWUSR);
393MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
394module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
395MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index dd18f857dfb0..7485ffda950c 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -153,45 +153,47 @@ int ibmphp_init_devno(struct slot **cur_slot)
153 return -1; 153 return -1;
154 } 154 }
155 for (loop = 0; loop < len; loop++) { 155 for (loop = 0; loop < len; loop++) {
156 if ((*cur_slot)->number == rtable->slots[loop].slot) { 156 if ((*cur_slot)->number == rtable->slots[loop].slot &&
157 if ((*cur_slot)->bus == rtable->slots[loop].bus) { 157 (*cur_slot)->bus == rtable->slots[loop].bus) {
158 struct io_apic_irq_attr irq_attr;
159
158 (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); 160 (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
159 for (i = 0; i < 4; i++) 161 for (i = 0; i < 4; i++)
160 (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, 162 (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
161 (int) (*cur_slot)->device, i); 163 (int) (*cur_slot)->device, i,
162 164 &irq_attr);
163 debug("(*cur_slot)->irq[0] = %x\n", 165
164 (*cur_slot)->irq[0]); 166 debug("(*cur_slot)->irq[0] = %x\n",
165 debug("(*cur_slot)->irq[1] = %x\n", 167 (*cur_slot)->irq[0]);
166 (*cur_slot)->irq[1]); 168 debug("(*cur_slot)->irq[1] = %x\n",
167 debug("(*cur_slot)->irq[2] = %x\n", 169 (*cur_slot)->irq[1]);
168 (*cur_slot)->irq[2]); 170 debug("(*cur_slot)->irq[2] = %x\n",
169 debug("(*cur_slot)->irq[3] = %x\n", 171 (*cur_slot)->irq[2]);
170 (*cur_slot)->irq[3]); 172 debug("(*cur_slot)->irq[3] = %x\n",
171 173 (*cur_slot)->irq[3]);
172 debug("rtable->exlusive_irqs = %x\n", 174
175 debug("rtable->exlusive_irqs = %x\n",
173 rtable->exclusive_irqs); 176 rtable->exclusive_irqs);
174 debug("rtable->slots[loop].irq[0].bitmap = %x\n", 177 debug("rtable->slots[loop].irq[0].bitmap = %x\n",
175 rtable->slots[loop].irq[0].bitmap); 178 rtable->slots[loop].irq[0].bitmap);
176 debug("rtable->slots[loop].irq[1].bitmap = %x\n", 179 debug("rtable->slots[loop].irq[1].bitmap = %x\n",
177 rtable->slots[loop].irq[1].bitmap); 180 rtable->slots[loop].irq[1].bitmap);
178 debug("rtable->slots[loop].irq[2].bitmap = %x\n", 181 debug("rtable->slots[loop].irq[2].bitmap = %x\n",
179 rtable->slots[loop].irq[2].bitmap); 182 rtable->slots[loop].irq[2].bitmap);
180 debug("rtable->slots[loop].irq[3].bitmap = %x\n", 183 debug("rtable->slots[loop].irq[3].bitmap = %x\n",
181 rtable->slots[loop].irq[3].bitmap); 184 rtable->slots[loop].irq[3].bitmap);
182 185
183 debug("rtable->slots[loop].irq[0].link = %x\n", 186 debug("rtable->slots[loop].irq[0].link = %x\n",
184 rtable->slots[loop].irq[0].link); 187 rtable->slots[loop].irq[0].link);
185 debug("rtable->slots[loop].irq[1].link = %x\n", 188 debug("rtable->slots[loop].irq[1].link = %x\n",
186 rtable->slots[loop].irq[1].link); 189 rtable->slots[loop].irq[1].link);
187 debug("rtable->slots[loop].irq[2].link = %x\n", 190 debug("rtable->slots[loop].irq[2].link = %x\n",
188 rtable->slots[loop].irq[2].link); 191 rtable->slots[loop].irq[2].link);
189 debug("rtable->slots[loop].irq[3].link = %x\n", 192 debug("rtable->slots[loop].irq[3].link = %x\n",
190 rtable->slots[loop].irq[3].link); 193 rtable->slots[loop].irq[3].link);
191 debug("end of init_devno\n"); 194 debug("end of init_devno\n");
192 kfree(rtable); 195 kfree(rtable);
193 return 0; 196 return 0;
194 }
195 } 197 }
196 } 198 }
197 199
@@ -1316,7 +1318,6 @@ error:
1316} 1318}
1317 1319
1318struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { 1320struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
1319 .owner = THIS_MODULE,
1320 .set_attention_status = set_attention_status, 1321 .set_attention_status = set_attention_status,
1321 .enable_slot = enable_slot, 1322 .enable_slot = enable_slot,
1322 .disable_slot = ibmphp_disable_slot, 1323 .disable_slot = ibmphp_disable_slot,
@@ -1419,3 +1420,4 @@ static void __exit ibmphp_exit(void)
1419} 1420}
1420 1421
1421module_init(ibmphp_init); 1422module_init(ibmphp_init);
1423module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0f07f9..844580489d4d 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
347 .store = test_write_file 347 .store = test_write_file
348}; 348};
349 349
350static int has_power_file(struct pci_slot *pci_slot) 350static bool has_power_file(struct pci_slot *pci_slot)
351{ 351{
352 struct hotplug_slot *slot = pci_slot->hotplug; 352 struct hotplug_slot *slot = pci_slot->hotplug;
353 if ((!slot) || (!slot->ops)) 353 if ((!slot) || (!slot->ops))
354 return -ENODEV; 354 return false;
355 if ((slot->ops->enable_slot) || 355 if ((slot->ops->enable_slot) ||
356 (slot->ops->disable_slot) || 356 (slot->ops->disable_slot) ||
357 (slot->ops->get_power_status)) 357 (slot->ops->get_power_status))
358 return 0; 358 return true;
359 return -ENOENT; 359 return false;
360} 360}
361 361
362static int has_attention_file(struct pci_slot *pci_slot) 362static bool has_attention_file(struct pci_slot *pci_slot)
363{ 363{
364 struct hotplug_slot *slot = pci_slot->hotplug; 364 struct hotplug_slot *slot = pci_slot->hotplug;
365 if ((!slot) || (!slot->ops)) 365 if ((!slot) || (!slot->ops))
366 return -ENODEV; 366 return false;
367 if ((slot->ops->set_attention_status) || 367 if ((slot->ops->set_attention_status) ||
368 (slot->ops->get_attention_status)) 368 (slot->ops->get_attention_status))
369 return 0; 369 return true;
370 return -ENOENT; 370 return false;
371} 371}
372 372
373static int has_latch_file(struct pci_slot *pci_slot) 373static bool has_latch_file(struct pci_slot *pci_slot)
374{ 374{
375 struct hotplug_slot *slot = pci_slot->hotplug; 375 struct hotplug_slot *slot = pci_slot->hotplug;
376 if ((!slot) || (!slot->ops)) 376 if ((!slot) || (!slot->ops))
377 return -ENODEV; 377 return false;
378 if (slot->ops->get_latch_status) 378 if (slot->ops->get_latch_status)
379 return 0; 379 return true;
380 return -ENOENT; 380 return false;
381} 381}
382 382
383static int has_adapter_file(struct pci_slot *pci_slot) 383static bool has_adapter_file(struct pci_slot *pci_slot)
384{ 384{
385 struct hotplug_slot *slot = pci_slot->hotplug; 385 struct hotplug_slot *slot = pci_slot->hotplug;
386 if ((!slot) || (!slot->ops)) 386 if ((!slot) || (!slot->ops))
387 return -ENODEV; 387 return false;
388 if (slot->ops->get_adapter_status) 388 if (slot->ops->get_adapter_status)
389 return 0; 389 return true;
390 return -ENOENT; 390 return false;
391} 391}
392 392
393static int has_max_bus_speed_file(struct pci_slot *pci_slot) 393static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
394{ 394{
395 struct hotplug_slot *slot = pci_slot->hotplug; 395 struct hotplug_slot *slot = pci_slot->hotplug;
396 if ((!slot) || (!slot->ops)) 396 if ((!slot) || (!slot->ops))
397 return -ENODEV; 397 return false;
398 if (slot->ops->get_max_bus_speed) 398 if (slot->ops->get_max_bus_speed)
399 return 0; 399 return true;
400 return -ENOENT; 400 return false;
401} 401}
402 402
403static int has_cur_bus_speed_file(struct pci_slot *pci_slot) 403static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
404{ 404{
405 struct hotplug_slot *slot = pci_slot->hotplug; 405 struct hotplug_slot *slot = pci_slot->hotplug;
406 if ((!slot) || (!slot->ops)) 406 if ((!slot) || (!slot->ops))
407 return -ENODEV; 407 return false;
408 if (slot->ops->get_cur_bus_speed) 408 if (slot->ops->get_cur_bus_speed)
409 return 0; 409 return true;
410 return -ENOENT; 410 return false;
411} 411}
412 412
413static int has_test_file(struct pci_slot *pci_slot) 413static bool has_test_file(struct pci_slot *pci_slot)
414{ 414{
415 struct hotplug_slot *slot = pci_slot->hotplug; 415 struct hotplug_slot *slot = pci_slot->hotplug;
416 if ((!slot) || (!slot->ops)) 416 if ((!slot) || (!slot->ops))
417 return -ENODEV; 417 return false;
418 if (slot->ops->hardware_test) 418 if (slot->ops->hardware_test)
419 return 0; 419 return true;
420 return -ENOENT; 420 return false;
421} 421}
422 422
423static int fs_add_slot(struct pci_slot *slot) 423static int fs_add_slot(struct pci_slot *slot)
424{ 424{
425 int retval = 0; 425 int retval = 0;
426 426
427 if (has_power_file(slot) == 0) { 427 /* Create symbolic link to the hotplug driver module */
428 retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr); 428 pci_hp_create_module_link(slot);
429
430 if (has_power_file(slot)) {
431 retval = sysfs_create_file(&slot->kobj,
432 &hotplug_slot_attr_power.attr);
429 if (retval) 433 if (retval)
430 goto exit_power; 434 goto exit_power;
431 } 435 }
432 436
433 if (has_attention_file(slot) == 0) { 437 if (has_attention_file(slot)) {
434 retval = sysfs_create_file(&slot->kobj, 438 retval = sysfs_create_file(&slot->kobj,
435 &hotplug_slot_attr_attention.attr); 439 &hotplug_slot_attr_attention.attr);
436 if (retval) 440 if (retval)
437 goto exit_attention; 441 goto exit_attention;
438 } 442 }
439 443
440 if (has_latch_file(slot) == 0) { 444 if (has_latch_file(slot)) {
441 retval = sysfs_create_file(&slot->kobj, 445 retval = sysfs_create_file(&slot->kobj,
442 &hotplug_slot_attr_latch.attr); 446 &hotplug_slot_attr_latch.attr);
443 if (retval) 447 if (retval)
444 goto exit_latch; 448 goto exit_latch;
445 } 449 }
446 450
447 if (has_adapter_file(slot) == 0) { 451 if (has_adapter_file(slot)) {
448 retval = sysfs_create_file(&slot->kobj, 452 retval = sysfs_create_file(&slot->kobj,
449 &hotplug_slot_attr_presence.attr); 453 &hotplug_slot_attr_presence.attr);
450 if (retval) 454 if (retval)
451 goto exit_adapter; 455 goto exit_adapter;
452 } 456 }
453 457
454 if (has_max_bus_speed_file(slot) == 0) { 458 if (has_max_bus_speed_file(slot)) {
455 retval = sysfs_create_file(&slot->kobj, 459 retval = sysfs_create_file(&slot->kobj,
456 &hotplug_slot_attr_max_bus_speed.attr); 460 &hotplug_slot_attr_max_bus_speed.attr);
457 if (retval) 461 if (retval)
458 goto exit_max_speed; 462 goto exit_max_speed;
459 } 463 }
460 464
461 if (has_cur_bus_speed_file(slot) == 0) { 465 if (has_cur_bus_speed_file(slot)) {
462 retval = sysfs_create_file(&slot->kobj, 466 retval = sysfs_create_file(&slot->kobj,
463 &hotplug_slot_attr_cur_bus_speed.attr); 467 &hotplug_slot_attr_cur_bus_speed.attr);
464 if (retval) 468 if (retval)
465 goto exit_cur_speed; 469 goto exit_cur_speed;
466 } 470 }
467 471
468 if (has_test_file(slot) == 0) { 472 if (has_test_file(slot)) {
469 retval = sysfs_create_file(&slot->kobj, 473 retval = sysfs_create_file(&slot->kobj,
470 &hotplug_slot_attr_test.attr); 474 &hotplug_slot_attr_test.attr);
471 if (retval) 475 if (retval)
@@ -475,55 +479,61 @@ static int fs_add_slot(struct pci_slot *slot)
475 goto exit; 479 goto exit;
476 480
477exit_test: 481exit_test:
478 if (has_cur_bus_speed_file(slot) == 0) 482 if (has_cur_bus_speed_file(slot))
479 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr); 483 sysfs_remove_file(&slot->kobj,
480 484 &hotplug_slot_attr_cur_bus_speed.attr);
481exit_cur_speed: 485exit_cur_speed:
482 if (has_max_bus_speed_file(slot) == 0) 486 if (has_max_bus_speed_file(slot))
483 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 487 sysfs_remove_file(&slot->kobj,
484 488 &hotplug_slot_attr_max_bus_speed.attr);
485exit_max_speed: 489exit_max_speed:
486 if (has_adapter_file(slot) == 0) 490 if (has_adapter_file(slot))
487 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 491 sysfs_remove_file(&slot->kobj,
488 492 &hotplug_slot_attr_presence.attr);
489exit_adapter: 493exit_adapter:
490 if (has_latch_file(slot) == 0) 494 if (has_latch_file(slot))
491 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr); 495 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
492
493exit_latch: 496exit_latch:
494 if (has_attention_file(slot) == 0) 497 if (has_attention_file(slot))
495 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr); 498 sysfs_remove_file(&slot->kobj,
496 499 &hotplug_slot_attr_attention.attr);
497exit_attention: 500exit_attention:
498 if (has_power_file(slot) == 0) 501 if (has_power_file(slot))
499 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 502 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
500exit_power: 503exit_power:
504 pci_hp_remove_module_link(slot);
501exit: 505exit:
502 return retval; 506 return retval;
503} 507}
504 508
505static void fs_remove_slot(struct pci_slot *slot) 509static void fs_remove_slot(struct pci_slot *slot)
506{ 510{
507 if (has_power_file(slot) == 0) 511 if (has_power_file(slot))
508 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 512 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
509 513
510 if (has_attention_file(slot) == 0) 514 if (has_attention_file(slot))
511 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr); 515 sysfs_remove_file(&slot->kobj,
516 &hotplug_slot_attr_attention.attr);
512 517
513 if (has_latch_file(slot) == 0) 518 if (has_latch_file(slot))
514 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr); 519 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
515 520
516 if (has_adapter_file(slot) == 0) 521 if (has_adapter_file(slot))
517 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 522 sysfs_remove_file(&slot->kobj,
523 &hotplug_slot_attr_presence.attr);
518 524
519 if (has_max_bus_speed_file(slot) == 0) 525 if (has_max_bus_speed_file(slot))
520 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 526 sysfs_remove_file(&slot->kobj,
527 &hotplug_slot_attr_max_bus_speed.attr);
521 528
522 if (has_cur_bus_speed_file(slot) == 0) 529 if (has_cur_bus_speed_file(slot))
523 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr); 530 sysfs_remove_file(&slot->kobj,
531 &hotplug_slot_attr_cur_bus_speed.attr);
524 532
525 if (has_test_file(slot) == 0) 533 if (has_test_file(slot))
526 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); 534 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
535
536 pci_hp_remove_module_link(slot);
527} 537}
528 538
529static struct hotplug_slot *get_slot_from_name (const char *name) 539static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
540} 550}
541 551
542/** 552/**
543 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem 553 * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
544 * @bus: bus this slot is on 554 * @bus: bus this slot is on
545 * @slot: pointer to the &struct hotplug_slot to register 555 * @slot: pointer to the &struct hotplug_slot to register
546 * @slot_nr: slot number 556 * @devnr: device number
547 * @name: name registered with kobject core 557 * @name: name registered with kobject core
548 * 558 *
549 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 559 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
551 * 561 *
552 * Returns 0 if successful, anything else for an error. 562 * Returns 0 if successful, anything else for an error.
553 */ 563 */
554int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr, 564int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
555 const char *name) 565 int devnr, const char *name,
566 struct module *owner, const char *mod_name)
556{ 567{
557 int result; 568 int result;
558 struct pci_slot *pci_slot; 569 struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
567 return -EINVAL; 578 return -EINVAL;
568 } 579 }
569 580
570 mutex_lock(&pci_hp_mutex); 581 slot->ops->owner = owner;
582 slot->ops->mod_name = mod_name;
571 583
584 mutex_lock(&pci_hp_mutex);
572 /* 585 /*
573 * No problems if we call this interface from both ACPI_PCI_SLOT 586 * No problems if we call this interface from both ACPI_PCI_SLOT
574 * driver and call it here again. If we've already created the 587 * driver and call it here again. If we've already created the
575 * pci_slot, the interface will simply bump the refcount. 588 * pci_slot, the interface will simply bump the refcount.
576 */ 589 */
577 pci_slot = pci_create_slot(bus, slot_nr, name, slot); 590 pci_slot = pci_create_slot(bus, devnr, name, slot);
578 if (IS_ERR(pci_slot)) { 591 if (IS_ERR(pci_slot)) {
579 result = PTR_ERR(pci_slot); 592 result = PTR_ERR(pci_slot);
580 goto out; 593 goto out;
@@ -684,6 +697,6 @@ MODULE_LICENSE("GPL");
684module_param(debug, bool, 0644); 697module_param(debug, bool, 0644);
685MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 698MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
686 699
687EXPORT_SYMBOL_GPL(pci_hp_register); 700EXPORT_SYMBOL_GPL(__pci_hp_register);
688EXPORT_SYMBOL_GPL(pci_hp_deregister); 701EXPORT_SYMBOL_GPL(pci_hp_deregister);
689EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); 702EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 39ae37589fda..e6cf096498be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,10 +46,10 @@ extern int pciehp_force;
46extern struct workqueue_struct *pciehp_wq; 46extern struct workqueue_struct *pciehp_wq;
47 47
48#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
49 do { \ 49do { \
50 if (pciehp_debug) \ 50 if (pciehp_debug) \
51 printk("%s: " format, MY_NAME , ## arg); \ 51 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
52 } while (0) 52} while (0)
53#define err(format, arg...) \ 53#define err(format, arg...) \
54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
55#define info(format, arg...) \ 55#define info(format, arg...) \
@@ -60,7 +60,7 @@ extern struct workqueue_struct *pciehp_wq;
60#define ctrl_dbg(ctrl, format, arg...) \ 60#define ctrl_dbg(ctrl, format, arg...) \
61 do { \ 61 do { \
62 if (pciehp_debug) \ 62 if (pciehp_debug) \
63 dev_printk(, &ctrl->pcie->device, \ 63 dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
64 format, ## arg); \ 64 format, ## arg); \
65 } while (0) 65 } while (0)
66#define ctrl_err(ctrl, format, arg...) \ 66#define ctrl_err(ctrl, format, arg...) \
@@ -81,7 +81,6 @@ struct slot {
81 struct hpc_ops *hpc_ops; 81 struct hpc_ops *hpc_ops;
82 struct hotplug_slot *hotplug_slot; 82 struct hotplug_slot *hotplug_slot;
83 struct list_head slot_list; 83 struct list_head slot_list;
84 unsigned long last_emi_toggle;
85 struct delayed_work work; /* work for button event */ 84 struct delayed_work work; /* work for button event */
86 struct mutex lock; 85 struct mutex lock;
87}; 86};
@@ -108,10 +107,11 @@ struct controller {
108 u32 slot_cap; 107 u32 slot_cap;
109 u8 cap_base; 108 u8 cap_base;
110 struct timer_list poll_timer; 109 struct timer_list poll_timer;
111 int cmd_busy; 110 unsigned int cmd_busy:1;
112 unsigned int no_cmd_complete:1; 111 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 112 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1; 113 unsigned int notification_enabled:1;
114 unsigned int power_fault_detected;
115}; 115};
116 116
117#define INT_BUTTON_IGNORE 0 117#define INT_BUTTON_IGNORE 0
@@ -202,8 +202,6 @@ struct hpc_ops {
202 int (*set_attention_status)(struct slot *slot, u8 status); 202 int (*set_attention_status)(struct slot *slot, u8 status);
203 int (*get_latch_status)(struct slot *slot, u8 *status); 203 int (*get_latch_status)(struct slot *slot, u8 *status);
204 int (*get_adapter_status)(struct slot *slot, u8 *status); 204 int (*get_adapter_status)(struct slot *slot, u8 *status);
205 int (*get_emi_status)(struct slot *slot, u8 *status);
206 int (*toggle_emi)(struct slot *slot);
207 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 205 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
208 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 206 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
209 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val); 207 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 438d795f9fe3..96048010e7d9 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -67,37 +67,27 @@ static int __init parse_detect_mode(void)
67 return PCIEHP_DETECT_DEFAULT; 67 return PCIEHP_DETECT_DEFAULT;
68} 68}
69 69
70static struct pcie_port_service_id __initdata port_pci_ids[] = {
71 {
72 .vendor = PCI_ANY_ID,
73 .device = PCI_ANY_ID,
74 .port_type = PCIE_ANY_PORT,
75 .service_type = PCIE_PORT_SERVICE_HP,
76 .driver_data = 0,
77 }, { /* end: all zeroes */ }
78};
79
80static int __initdata dup_slot_id; 70static int __initdata dup_slot_id;
81static int __initdata acpi_slot_detected; 71static int __initdata acpi_slot_detected;
82static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); 72static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
83 73
84/* Dummy driver for dumplicate name detection */ 74/* Dummy driver for dumplicate name detection */
85static int __init dummy_probe(struct pcie_device *dev, 75static int __init dummy_probe(struct pcie_device *dev)
86 const struct pcie_port_service_id *id)
87{ 76{
88 int pos; 77 int pos;
89 u32 slot_cap; 78 u32 slot_cap;
90 struct slot *slot, *tmp; 79 struct slot *slot, *tmp;
91 struct pci_dev *pdev = dev->port; 80 struct pci_dev *pdev = dev->port;
92 struct pci_bus *pbus = pdev->subordinate; 81 struct pci_bus *pbus = pdev->subordinate;
93 if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
94 return -ENOMEM;
95 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 82 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
96 if (pciehp_get_hp_hw_control_from_firmware(pdev)) 83 if (pciehp_get_hp_hw_control_from_firmware(pdev))
97 return -ENODEV; 84 return -ENODEV;
98 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) 85 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
99 return -ENODEV; 86 return -ENODEV;
100 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 87 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
88 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
89 if (!slot)
90 return -ENOMEM;
101 slot->number = slot_cap >> 19; 91 slot->number = slot_cap >> 19;
102 list_for_each_entry(tmp, &dummy_slots, slot_list) { 92 list_for_each_entry(tmp, &dummy_slots, slot_list) {
103 if (tmp->number == slot->number) 93 if (tmp->number == slot->number)
@@ -111,7 +101,8 @@ static int __init dummy_probe(struct pcie_device *dev,
111 101
112static struct pcie_port_service_driver __initdata dummy_driver = { 102static struct pcie_port_service_driver __initdata dummy_driver = {
113 .name = "pciehp_dummy", 103 .name = "pciehp_dummy",
114 .id_table = port_pci_ids, 104 .port_type = PCIE_ANY_PORT,
105 .service = PCIE_PORT_SERVICE_HP,
115 .probe = dummy_probe, 106 .probe = dummy_probe,
116}; 107};
117 108
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 681e3912b821..2317557fdee6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
74 74
75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { 75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
76 .owner = THIS_MODULE,
77 .set_attention_status = set_attention_status, 76 .set_attention_status = set_attention_status,
78 .enable_slot = enable_slot, 77 .enable_slot = enable_slot,
79 .disable_slot = disable_slot, 78 .disable_slot = disable_slot,
@@ -85,99 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
85 .get_cur_bus_speed = get_cur_bus_speed, 84 .get_cur_bus_speed = get_cur_bus_speed,
86}; 85};
87 86
88/*
89 * Check the status of the Electro Mechanical Interlock (EMI)
90 */
91static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
92{
93 struct slot *slot = hotplug_slot->private;
94 return (slot->hpc_ops->get_emi_status(slot, value));
95}
96
97/*
98 * sysfs interface for the Electro Mechanical Interlock (EMI)
99 * 1 == locked, 0 == unlocked
100 */
101static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
102{
103 int retval;
104 u8 value;
105
106 retval = get_lock_status(slot, &value);
107 if (retval)
108 goto lock_read_exit;
109 retval = sprintf (buf, "%d\n", value);
110
111lock_read_exit:
112 return retval;
113}
114
115/*
116 * Change the status of the Electro Mechanical Interlock (EMI)
117 * This is a toggle - in addition there must be at least 1 second
118 * in between toggles.
119 */
120static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
121{
122 struct slot *slot = hotplug_slot->private;
123 int retval;
124 u8 value;
125
126 mutex_lock(&slot->ctrl->crit_sect);
127
128 /* has it been >1 sec since our last toggle? */
129 if ((get_seconds() - slot->last_emi_toggle) < 1) {
130 mutex_unlock(&slot->ctrl->crit_sect);
131 return -EINVAL;
132 }
133
134 /* see what our current state is */
135 retval = get_lock_status(hotplug_slot, &value);
136 if (retval || (value == status))
137 goto set_lock_exit;
138
139 slot->hpc_ops->toggle_emi(slot);
140set_lock_exit:
141 mutex_unlock(&slot->ctrl->crit_sect);
142 return 0;
143}
144
145/*
146 * sysfs interface which allows the user to toggle the Electro Mechanical
147 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
148 */
149static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
150 const char *buf, size_t count)
151{
152 struct slot *slot = hotplug_slot->private;
153 unsigned long llock;
154 u8 lock;
155 int retval = 0;
156
157 llock = simple_strtoul(buf, NULL, 10);
158 lock = (u8)(llock & 0xff);
159
160 switch (lock) {
161 case 0:
162 case 1:
163 retval = set_lock_status(hotplug_slot, lock);
164 break;
165 default:
166 ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
167 lock);
168 retval = -EINVAL;
169 }
170 if (retval)
171 return retval;
172 return count;
173}
174
175static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
176 .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
177 .show = lock_read_file,
178 .store = lock_write_file
179};
180
181/** 87/**
182 * release_slot - free up the memory used by a slot 88 * release_slot - free up the memory used by a slot
183 * @hotplug_slot: slot to free 89 * @hotplug_slot: slot to free
@@ -236,17 +142,6 @@ static int init_slots(struct controller *ctrl)
236 get_attention_status(hotplug_slot, &info->attention_status); 142 get_attention_status(hotplug_slot, &info->attention_status);
237 get_latch_status(hotplug_slot, &info->latch_status); 143 get_latch_status(hotplug_slot, &info->latch_status);
238 get_adapter_status(hotplug_slot, &info->adapter_status); 144 get_adapter_status(hotplug_slot, &info->adapter_status);
239 /* create additional sysfs entries */
240 if (EMI(ctrl)) {
241 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
242 &hotplug_slot_attr_lock.attr);
243 if (retval) {
244 pci_hp_deregister(hotplug_slot);
245 ctrl_err(ctrl, "Cannot create additional sysfs "
246 "entries\n");
247 goto error_info;
248 }
249 }
250 } 145 }
251 146
252 return 0; 147 return 0;
@@ -261,13 +156,8 @@ error:
261static void cleanup_slots(struct controller *ctrl) 156static void cleanup_slots(struct controller *ctrl)
262{ 157{
263 struct slot *slot; 158 struct slot *slot;
264 159 list_for_each_entry(slot, &ctrl->slot_list, slot_list)
265 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
266 if (EMI(ctrl))
267 sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
268 &hotplug_slot_attr_lock.attr);
269 pci_hp_deregister(slot->hotplug_slot); 160 pci_hp_deregister(slot->hotplug_slot);
270 }
271} 161}
272 162
273/* 163/*
@@ -401,7 +291,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
401 return 0; 291 return 0;
402} 292}
403 293
404static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_id *id) 294static int pciehp_probe(struct pcie_device *dev)
405{ 295{
406 int rc; 296 int rc;
407 struct controller *ctrl; 297 struct controller *ctrl;
@@ -475,7 +365,7 @@ static void pciehp_remove (struct pcie_device *dev)
475} 365}
476 366
477#ifdef CONFIG_PM 367#ifdef CONFIG_PM
478static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 368static int pciehp_suspend (struct pcie_device *dev)
479{ 369{
480 dev_info(&dev->device, "%s ENTRY\n", __func__); 370 dev_info(&dev->device, "%s ENTRY\n", __func__);
481 return 0; 371 return 0;
@@ -503,20 +393,12 @@ static int pciehp_resume (struct pcie_device *dev)
503 } 393 }
504 return 0; 394 return 0;
505} 395}
506#endif 396#endif /* PM */
507
508static struct pcie_port_service_id port_pci_ids[] = { {
509 .vendor = PCI_ANY_ID,
510 .device = PCI_ANY_ID,
511 .port_type = PCIE_ANY_PORT,
512 .service_type = PCIE_PORT_SERVICE_HP,
513 .driver_data = 0,
514 }, { /* end: all zeroes */ }
515};
516 397
517static struct pcie_port_service_driver hpdriver_portdrv = { 398static struct pcie_port_service_driver hpdriver_portdrv = {
518 .name = PCIE_MODULE_NAME, 399 .name = PCIE_MODULE_NAME,
519 .id_table = &port_pci_ids[0], 400 .port_type = PCIE_ANY_PORT,
401 .service = PCIE_PORT_SERVICE_HP,
520 402
521 .probe = pciehp_probe, 403 .probe = pciehp_probe,
522 .remove = pciehp_remove, 404 .remove = pciehp_remove,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7a16c6897bb9..52813257e5bf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@ static int hpc_query_power_fault(struct slot *slot)
422 return !!(slot_status & PCI_EXP_SLTSTA_PFD); 422 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
423} 423}
424 424
425static int hpc_get_emi_status(struct slot *slot, u8 *status)
426{
427 struct controller *ctrl = slot->ctrl;
428 u16 slot_status;
429 int retval;
430
431 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
432 if (retval) {
433 ctrl_err(ctrl, "Cannot check EMI status\n");
434 return retval;
435 }
436 *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
437 return retval;
438}
439
440static int hpc_toggle_emi(struct slot *slot)
441{
442 u16 slot_cmd;
443 u16 cmd_mask;
444 int rc;
445
446 slot_cmd = PCI_EXP_SLTCTL_EIC;
447 cmd_mask = PCI_EXP_SLTCTL_EIC;
448 rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
449 slot->last_emi_toggle = get_seconds();
450
451 return rc;
452}
453
454static int hpc_set_attention_status(struct slot *slot, u8 value) 425static int hpc_set_attention_status(struct slot *slot, u8 value)
455{ 426{
456 struct controller *ctrl = slot->ctrl; 427 struct controller *ctrl = slot->ctrl;
@@ -548,23 +519,21 @@ static int hpc_power_on_slot(struct slot * slot)
548 519
549 slot_cmd = POWER_ON; 520 slot_cmd = POWER_ON;
550 cmd_mask = PCI_EXP_SLTCTL_PCC; 521 cmd_mask = PCI_EXP_SLTCTL_PCC;
551 /* Enable detection that we turned off at slot power-off time */
552 if (!pciehp_poll_mode) { 522 if (!pciehp_poll_mode) {
553 slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 523 /* Enable power fault detection turned off at power off time */
554 PCI_EXP_SLTCTL_PDCE); 524 slot_cmd |= PCI_EXP_SLTCTL_PFDE;
555 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 525 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
556 PCI_EXP_SLTCTL_PDCE);
557 } 526 }
558 527
559 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 528 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
560
561 if (retval) { 529 if (retval) {
562 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); 530 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
563 return -1; 531 return retval;
564 } 532 }
565 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 533 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
566 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 534 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
567 535
536 ctrl->power_fault_detected = 0;
568 return retval; 537 return retval;
569} 538}
570 539
@@ -621,18 +590,10 @@ static int hpc_power_off_slot(struct slot * slot)
621 590
622 slot_cmd = POWER_OFF; 591 slot_cmd = POWER_OFF;
623 cmd_mask = PCI_EXP_SLTCTL_PCC; 592 cmd_mask = PCI_EXP_SLTCTL_PCC;
624 /*
625 * If we get MRL or presence detect interrupts now, the isr
626 * will notice the sticky power-fault bit too and issue power
627 * indicator change commands. This will lead to an endless loop
628 * of command completions, since the power-fault bit remains on
629 * till the slot is powered on again.
630 */
631 if (!pciehp_poll_mode) { 593 if (!pciehp_poll_mode) {
632 slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 594 /* Disable power fault detection */
633 PCI_EXP_SLTCTL_PDCE); 595 slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
634 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 596 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
635 PCI_EXP_SLTCTL_PDCE);
636 } 597 }
637 598
638 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 599 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -672,10 +633,11 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
672 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 633 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
673 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 634 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
674 PCI_EXP_SLTSTA_CC); 635 PCI_EXP_SLTSTA_CC);
636 detected &= ~intr_loc;
675 intr_loc |= detected; 637 intr_loc |= detected;
676 if (!intr_loc) 638 if (!intr_loc)
677 return IRQ_NONE; 639 return IRQ_NONE;
678 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { 640 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
679 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 641 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
680 __func__); 642 __func__);
681 return IRQ_NONE; 643 return IRQ_NONE;
@@ -709,9 +671,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
709 pciehp_handle_presence_change(p_slot); 671 pciehp_handle_presence_change(p_slot);
710 672
711 /* Check Power Fault Detected */ 673 /* Check Power Fault Detected */
712 if (intr_loc & PCI_EXP_SLTSTA_PFD) 674 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
675 ctrl->power_fault_detected = 1;
713 pciehp_handle_power_fault(p_slot); 676 pciehp_handle_power_fault(p_slot);
714 677 }
715 return IRQ_HANDLED; 678 return IRQ_HANDLED;
716} 679}
717 680
@@ -882,8 +845,6 @@ static struct hpc_ops pciehp_hpc_ops = {
882 .get_attention_status = hpc_get_attention_status, 845 .get_attention_status = hpc_get_attention_status,
883 .get_latch_status = hpc_get_latch_status, 846 .get_latch_status = hpc_get_latch_status,
884 .get_adapter_status = hpc_get_adapter_status, 847 .get_adapter_status = hpc_get_adapter_status,
885 .get_emi_status = hpc_get_emi_status,
886 .toggle_emi = hpc_toggle_emi,
887 848
888 .get_max_bus_speed = hpc_get_max_lnk_speed, 849 .get_max_bus_speed = hpc_get_max_lnk_speed,
889 .get_cur_bus_speed = hpc_get_cur_lnk_speed, 850 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf9e89f..5175d9b26f0b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
82static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 82static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
83 83
84static struct hotplug_slot_ops skel_hotplug_slot_ops = { 84static struct hotplug_slot_ops skel_hotplug_slot_ops = {
85 .owner = THIS_MODULE,
86 .enable_slot = enable_slot, 85 .enable_slot = enable_slot,
87 .disable_slot = disable_slot, 86 .disable_slot = disable_slot,
88 .set_attention_status = set_attention_status, 87 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a08fdc7..c159223389ec 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
423} 423}
424 424
425struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { 425struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
426 .owner = THIS_MODULE,
427 .enable_slot = enable_slot, 426 .enable_slot = enable_slot,
428 .disable_slot = disable_slot, 427 .disable_slot = disable_slot,
429 .set_attention_status = set_attention_status, 428 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 3eee70928d45..a4494d78e7c2 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@ static int disable_slot(struct hotplug_slot *slot);
83static inline int get_power_status(struct hotplug_slot *slot, u8 *value); 83static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
84 84
85static struct hotplug_slot_ops sn_hotplug_slot_ops = { 85static struct hotplug_slot_ops sn_hotplug_slot_ops = {
86 .owner = THIS_MODULE,
87 .enable_slot = enable_slot, 86 .enable_slot = enable_slot,
88 .disable_slot = disable_slot, 87 .disable_slot = disable_slot,
89 .get_power_status = get_power_status, 88 .get_power_status = get_power_status,
@@ -679,7 +678,7 @@ alloc_err:
679 return rc; 678 return rc;
680} 679}
681 680
682static int sn_pci_hotplug_init(void) 681static int __init sn_pci_hotplug_init(void)
683{ 682{
684 struct pci_bus *pci_bus = NULL; 683 struct pci_bus *pci_bus = NULL;
685 int rc; 684 int rc;
@@ -716,7 +715,7 @@ static int sn_pci_hotplug_init(void)
716 return registered == 1 ? 0 : -ENODEV; 715 return registered == 1 ? 0 : -ENODEV;
717} 716}
718 717
719static void sn_pci_hotplug_exit(void) 718static void __exit sn_pci_hotplug_exit(void)
720{ 719{
721 struct hotplug_slot *bss_hotplug_slot; 720 struct hotplug_slot *bss_hotplug_slot;
722 721
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 6aba0b6cf2e0..974e924ca96d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -48,10 +48,10 @@ extern int shpchp_debug;
48extern struct workqueue_struct *shpchp_wq; 48extern struct workqueue_struct *shpchp_wq;
49 49
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51do { \
52 if (shpchp_debug) \ 52 if (shpchp_debug) \
53 printk("%s: " format, MY_NAME , ## arg); \ 53 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
54 } while (0) 54} while (0)
55#define err(format, arg...) \ 55#define err(format, arg...) \
56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
57#define info(format, arg...) \ 57#define info(format, arg...) \
@@ -62,7 +62,7 @@ extern struct workqueue_struct *shpchp_wq;
62#define ctrl_dbg(ctrl, format, arg...) \ 62#define ctrl_dbg(ctrl, format, arg...) \
63 do { \ 63 do { \
64 if (shpchp_debug) \ 64 if (shpchp_debug) \
65 dev_printk(, &ctrl->pci_dev->dev, \ 65 dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \
66 format, ## arg); \ 66 format, ## arg); \
67 } while (0) 67 } while (0)
68#define ctrl_err(ctrl, format, arg...) \ 68#define ctrl_err(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149c2293..8a520a3d0f59 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
69static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 69static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
70 70
71static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { 71static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
72 .owner = THIS_MODULE,
73 .set_attention_status = set_attention_status, 72 .set_attention_status = set_attention_status,
74 .enable_slot = enable_slot, 73 .enable_slot = enable_slot,
75 .disable_slot = disable_slot, 74 .disable_slot = disable_slot,
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 138f161becc0..aa315e52529b 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -137,7 +137,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
137 busnr)) 137 busnr))
138 break; 138 break;
139 } 139 }
140 if (busnr >= end) { 140 if (busnr > end) {
141 ctrl_err(ctrl, 141 ctrl_err(ctrl,
142 "No free bus for hot-added bridge\n"); 142 "No free bus for hot-added bridge\n");
143 pci_dev_put(dev); 143 pci_dev_put(dev);
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index bf7d6ce9bbb3..737a1c44b07a 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -98,6 +98,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
98 int max_irq; 98 int max_irq;
99 int pos; 99 int pos;
100 int irq; 100 int irq;
101 int node;
101 102
102 pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); 103 pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
103 if (!pos) 104 if (!pos)
@@ -125,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
125 cfg->msg.address_lo = 0xffffffff; 126 cfg->msg.address_lo = 0xffffffff;
126 cfg->msg.address_hi = 0xffffffff; 127 cfg->msg.address_hi = 0xffffffff;
127 128
128 irq = create_irq(); 129 node = dev_to_node(&dev->dev);
130 irq = create_irq_nr(0, node);
129 131
130 if (irq <= 0) { 132 if (irq <= 0) {
131 kfree(cfg); 133 kfree(cfg);
@@ -158,6 +160,7 @@ int ht_create_irq(struct pci_dev *dev, int idx)
158 160
159/** 161/**
160 * ht_destroy_irq - destroy an irq created with ht_create_irq 162 * ht_destroy_irq - destroy an irq created with ht_create_irq
163 * @irq: irq to be destroyed
161 * 164 *
162 * This reverses ht_create_irq removing the specified irq from 165 * This reverses ht_create_irq removing the specified irq from
163 * existence. The irq should be free before this happens. 166 * existence. The irq should be free before this happens.
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f3f686581a90..cd389162735f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,6 +36,7 @@
36#include <linux/iova.h> 36#include <linux/iova.h>
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/sysdev.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
40#include <asm/iommu.h> 41#include <asm/iommu.h>
41#include "pci.h" 42#include "pci.h"
@@ -55,8 +56,12 @@
55#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 56#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
56 57
57#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 58#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
58#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) 59#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
59#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) 60#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
61
62#ifndef PHYSICAL_PAGE_MASK
63#define PHYSICAL_PAGE_MASK PAGE_MASK
64#endif
60 65
61/* global iommu list, set NULL for ignored DMAR units */ 66/* global iommu list, set NULL for ignored DMAR units */
62static struct intel_iommu **g_iommus; 67static struct intel_iommu **g_iommus;
@@ -164,7 +169,8 @@ static inline void context_clear_entry(struct context_entry *context)
164 * 1: writable 169 * 1: writable
165 * 2-6: reserved 170 * 2-6: reserved
166 * 7: super page 171 * 7: super page
167 * 8-11: available 172 * 8-10: available
173 * 11: snoop behavior
168 * 12-63: Host physcial address 174 * 12-63: Host physcial address
169 */ 175 */
170struct dma_pte { 176struct dma_pte {
@@ -186,6 +192,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte)
186 pte->val |= DMA_PTE_WRITE; 192 pte->val |= DMA_PTE_WRITE;
187} 193}
188 194
195static inline void dma_set_pte_snp(struct dma_pte *pte)
196{
197 pte->val |= DMA_PTE_SNP;
198}
199
189static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) 200static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
190{ 201{
191 pte->val = (pte->val & ~3) | (prot & 3); 202 pte->val = (pte->val & ~3) | (prot & 3);
@@ -231,6 +242,7 @@ struct dmar_domain {
231 int flags; /* flags to find out type of domain */ 242 int flags; /* flags to find out type of domain */
232 243
233 int iommu_coherency;/* indicate coherency of iommu access */ 244 int iommu_coherency;/* indicate coherency of iommu access */
245 int iommu_snooping; /* indicate snooping control feature*/
234 int iommu_count; /* reference count of iommu */ 246 int iommu_count; /* reference count of iommu */
235 spinlock_t iommu_lock; /* protect iommu set in domain */ 247 spinlock_t iommu_lock; /* protect iommu set in domain */
236 u64 max_addr; /* maximum mapped address */ 248 u64 max_addr; /* maximum mapped address */
@@ -240,7 +252,8 @@ struct dmar_domain {
240struct device_domain_info { 252struct device_domain_info {
241 struct list_head link; /* link to domain siblings */ 253 struct list_head link; /* link to domain siblings */
242 struct list_head global; /* link to global list */ 254 struct list_head global; /* link to global list */
243 u8 bus; /* PCI bus numer */ 255 int segment; /* PCI domain */
256 u8 bus; /* PCI bus number */
244 u8 devfn; /* PCI devfn number */ 257 u8 devfn; /* PCI devfn number */
245 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 258 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
246 struct dmar_domain *domain; /* pointer to domain */ 259 struct dmar_domain *domain; /* pointer to domain */
@@ -421,7 +434,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
421 return g_iommus[iommu_id]; 434 return g_iommus[iommu_id];
422} 435}
423 436
424/* "Coherency" capability may be different across iommus */
425static void domain_update_iommu_coherency(struct dmar_domain *domain) 437static void domain_update_iommu_coherency(struct dmar_domain *domain)
426{ 438{
427 int i; 439 int i;
@@ -438,7 +450,30 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
438 } 450 }
439} 451}
440 452
441static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) 453static void domain_update_iommu_snooping(struct dmar_domain *domain)
454{
455 int i;
456
457 domain->iommu_snooping = 1;
458
459 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
460 for (; i < g_num_of_iommus; ) {
461 if (!ecap_sc_support(g_iommus[i]->ecap)) {
462 domain->iommu_snooping = 0;
463 break;
464 }
465 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
466 }
467}
468
469/* Some capabilities may be different across iommus */
470static void domain_update_iommu_cap(struct dmar_domain *domain)
471{
472 domain_update_iommu_coherency(domain);
473 domain_update_iommu_snooping(domain);
474}
475
476static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
442{ 477{
443 struct dmar_drhd_unit *drhd = NULL; 478 struct dmar_drhd_unit *drhd = NULL;
444 int i; 479 int i;
@@ -446,12 +481,20 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
446 for_each_drhd_unit(drhd) { 481 for_each_drhd_unit(drhd) {
447 if (drhd->ignored) 482 if (drhd->ignored)
448 continue; 483 continue;
484 if (segment != drhd->segment)
485 continue;
449 486
450 for (i = 0; i < drhd->devices_cnt; i++) 487 for (i = 0; i < drhd->devices_cnt; i++) {
451 if (drhd->devices[i] && 488 if (drhd->devices[i] &&
452 drhd->devices[i]->bus->number == bus && 489 drhd->devices[i]->bus->number == bus &&
453 drhd->devices[i]->devfn == devfn) 490 drhd->devices[i]->devfn == devfn)
454 return drhd->iommu; 491 return drhd->iommu;
492 if (drhd->devices[i] &&
493 drhd->devices[i]->subordinate &&
494 drhd->devices[i]->subordinate->number <= bus &&
495 drhd->devices[i]->subordinate->subordinate >= bus)
496 return drhd->iommu;
497 }
455 498
456 if (drhd->include_all) 499 if (drhd->include_all)
457 return drhd->iommu; 500 return drhd->iommu;
@@ -689,15 +732,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
689static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) 732static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
690{ 733{
691 int addr_width = agaw_to_width(domain->agaw); 734 int addr_width = agaw_to_width(domain->agaw);
735 int npages;
692 736
693 start &= (((u64)1) << addr_width) - 1; 737 start &= (((u64)1) << addr_width) - 1;
694 end &= (((u64)1) << addr_width) - 1; 738 end &= (((u64)1) << addr_width) - 1;
695 /* in case it's partial page */ 739 /* in case it's partial page */
696 start = PAGE_ALIGN(start); 740 start &= PAGE_MASK;
697 end &= PAGE_MASK; 741 end = PAGE_ALIGN(end);
742 npages = (end - start) / VTD_PAGE_SIZE;
698 743
699 /* we don't need lock here, nobody else touches the iova range */ 744 /* we don't need lock here, nobody else touches the iova range */
700 while (start < end) { 745 while (npages--) {
701 dma_pte_clear_one(domain, start); 746 dma_pte_clear_one(domain, start);
702 start += VTD_PAGE_SIZE; 747 start += VTD_PAGE_SIZE;
703 } 748 }
@@ -1004,194 +1049,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1004 return 0; 1049 return 0;
1005} 1050}
1006 1051
1007/* iommu interrupt handling. Most stuff are MSI-like. */
1008
1009static const char *fault_reason_strings[] =
1010{
1011 "Software",
1012 "Present bit in root entry is clear",
1013 "Present bit in context entry is clear",
1014 "Invalid context entry",
1015 "Access beyond MGAW",
1016 "PTE Write access is not set",
1017 "PTE Read access is not set",
1018 "Next page table ptr is invalid",
1019 "Root table address invalid",
1020 "Context table ptr is invalid",
1021 "non-zero reserved fields in RTP",
1022 "non-zero reserved fields in CTP",
1023 "non-zero reserved fields in PTE",
1024};
1025#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1026
1027const char *dmar_get_fault_reason(u8 fault_reason)
1028{
1029 if (fault_reason > MAX_FAULT_REASON_IDX)
1030 return "Unknown";
1031 else
1032 return fault_reason_strings[fault_reason];
1033}
1034
1035void dmar_msi_unmask(unsigned int irq)
1036{
1037 struct intel_iommu *iommu = get_irq_data(irq);
1038 unsigned long flag;
1039
1040 /* unmask it */
1041 spin_lock_irqsave(&iommu->register_lock, flag);
1042 writel(0, iommu->reg + DMAR_FECTL_REG);
1043 /* Read a reg to force flush the post write */
1044 readl(iommu->reg + DMAR_FECTL_REG);
1045 spin_unlock_irqrestore(&iommu->register_lock, flag);
1046}
1047
1048void dmar_msi_mask(unsigned int irq)
1049{
1050 unsigned long flag;
1051 struct intel_iommu *iommu = get_irq_data(irq);
1052
1053 /* mask it */
1054 spin_lock_irqsave(&iommu->register_lock, flag);
1055 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1056 /* Read a reg to force flush the post write */
1057 readl(iommu->reg + DMAR_FECTL_REG);
1058 spin_unlock_irqrestore(&iommu->register_lock, flag);
1059}
1060
1061void dmar_msi_write(int irq, struct msi_msg *msg)
1062{
1063 struct intel_iommu *iommu = get_irq_data(irq);
1064 unsigned long flag;
1065
1066 spin_lock_irqsave(&iommu->register_lock, flag);
1067 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1068 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1069 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1070 spin_unlock_irqrestore(&iommu->register_lock, flag);
1071}
1072
1073void dmar_msi_read(int irq, struct msi_msg *msg)
1074{
1075 struct intel_iommu *iommu = get_irq_data(irq);
1076 unsigned long flag;
1077
1078 spin_lock_irqsave(&iommu->register_lock, flag);
1079 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1080 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1081 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1082 spin_unlock_irqrestore(&iommu->register_lock, flag);
1083}
1084
1085static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
1086 u8 fault_reason, u16 source_id, unsigned long long addr)
1087{
1088 const char *reason;
1089
1090 reason = dmar_get_fault_reason(fault_reason);
1091
1092 printk(KERN_ERR
1093 "DMAR:[%s] Request device [%02x:%02x.%d] "
1094 "fault addr %llx \n"
1095 "DMAR:[fault reason %02d] %s\n",
1096 (type ? "DMA Read" : "DMA Write"),
1097 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1098 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1099 return 0;
1100}
1101
1102#define PRIMARY_FAULT_REG_LEN (16)
1103static irqreturn_t iommu_page_fault(int irq, void *dev_id)
1104{
1105 struct intel_iommu *iommu = dev_id;
1106 int reg, fault_index;
1107 u32 fault_status;
1108 unsigned long flag;
1109
1110 spin_lock_irqsave(&iommu->register_lock, flag);
1111 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1112
1113 /* TBD: ignore advanced fault log currently */
1114 if (!(fault_status & DMA_FSTS_PPF))
1115 goto clear_overflow;
1116
1117 fault_index = dma_fsts_fault_record_index(fault_status);
1118 reg = cap_fault_reg_offset(iommu->cap);
1119 while (1) {
1120 u8 fault_reason;
1121 u16 source_id;
1122 u64 guest_addr;
1123 int type;
1124 u32 data;
1125
1126 /* highest 32 bits */
1127 data = readl(iommu->reg + reg +
1128 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1129 if (!(data & DMA_FRCD_F))
1130 break;
1131
1132 fault_reason = dma_frcd_fault_reason(data);
1133 type = dma_frcd_type(data);
1134
1135 data = readl(iommu->reg + reg +
1136 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1137 source_id = dma_frcd_source_id(data);
1138
1139 guest_addr = dmar_readq(iommu->reg + reg +
1140 fault_index * PRIMARY_FAULT_REG_LEN);
1141 guest_addr = dma_frcd_page_addr(guest_addr);
1142 /* clear the fault */
1143 writel(DMA_FRCD_F, iommu->reg + reg +
1144 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1145
1146 spin_unlock_irqrestore(&iommu->register_lock, flag);
1147
1148 iommu_page_fault_do_one(iommu, type, fault_reason,
1149 source_id, guest_addr);
1150
1151 fault_index++;
1152 if (fault_index > cap_num_fault_regs(iommu->cap))
1153 fault_index = 0;
1154 spin_lock_irqsave(&iommu->register_lock, flag);
1155 }
1156clear_overflow:
1157 /* clear primary fault overflow */
1158 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1159 if (fault_status & DMA_FSTS_PFO)
1160 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
1161
1162 spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 return IRQ_HANDLED;
1164}
1165
1166int dmar_set_interrupt(struct intel_iommu *iommu)
1167{
1168 int irq, ret;
1169
1170 irq = create_irq();
1171 if (!irq) {
1172 printk(KERN_ERR "IOMMU: no free vectors\n");
1173 return -EINVAL;
1174 }
1175
1176 set_irq_data(irq, iommu);
1177 iommu->irq = irq;
1178
1179 ret = arch_setup_dmar_msi(irq);
1180 if (ret) {
1181 set_irq_data(irq, NULL);
1182 iommu->irq = 0;
1183 destroy_irq(irq);
1184 return 0;
1185 }
1186
1187 /* Force fault register is cleared */
1188 iommu_page_fault(irq, iommu);
1189
1190 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
1191 if (ret)
1192 printk(KERN_ERR "IOMMU: can't request irq\n");
1193 return ret;
1194}
1195 1052
1196static int iommu_init_domains(struct intel_iommu *iommu) 1053static int iommu_init_domains(struct intel_iommu *iommu)
1197{ 1054{
@@ -1363,7 +1220,7 @@ static void dmar_init_reserved_ranges(void)
1363 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1220 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1364 continue; 1221 continue;
1365 addr = r->start; 1222 addr = r->start;
1366 addr &= PAGE_MASK; 1223 addr &= PHYSICAL_PAGE_MASK;
1367 size = r->end - addr; 1224 size = r->end - addr;
1368 size = PAGE_ALIGN(size); 1225 size = PAGE_ALIGN(size);
1369 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1226 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
@@ -1429,6 +1286,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1429 else 1286 else
1430 domain->iommu_coherency = 0; 1287 domain->iommu_coherency = 0;
1431 1288
1289 if (ecap_sc_support(iommu->ecap))
1290 domain->iommu_snooping = 1;
1291 else
1292 domain->iommu_snooping = 0;
1293
1432 domain->iommu_count = 1; 1294 domain->iommu_count = 1;
1433 1295
1434 /* always allocate the top pgd */ 1296 /* always allocate the top pgd */
@@ -1464,7 +1326,7 @@ static void domain_exit(struct dmar_domain *domain)
1464} 1326}
1465 1327
1466static int domain_context_mapping_one(struct dmar_domain *domain, 1328static int domain_context_mapping_one(struct dmar_domain *domain,
1467 u8 bus, u8 devfn) 1329 int segment, u8 bus, u8 devfn)
1468{ 1330{
1469 struct context_entry *context; 1331 struct context_entry *context;
1470 unsigned long flags; 1332 unsigned long flags;
@@ -1479,7 +1341,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1479 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1341 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1480 BUG_ON(!domain->pgd); 1342 BUG_ON(!domain->pgd);
1481 1343
1482 iommu = device_to_iommu(bus, devfn); 1344 iommu = device_to_iommu(segment, bus, devfn);
1483 if (!iommu) 1345 if (!iommu)
1484 return -ENODEV; 1346 return -ENODEV;
1485 1347
@@ -1557,7 +1419,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1557 spin_lock_irqsave(&domain->iommu_lock, flags); 1419 spin_lock_irqsave(&domain->iommu_lock, flags);
1558 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { 1420 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1559 domain->iommu_count++; 1421 domain->iommu_count++;
1560 domain_update_iommu_coherency(domain); 1422 domain_update_iommu_cap(domain);
1561 } 1423 }
1562 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1424 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1563 return 0; 1425 return 0;
@@ -1569,8 +1431,8 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1569 int ret; 1431 int ret;
1570 struct pci_dev *tmp, *parent; 1432 struct pci_dev *tmp, *parent;
1571 1433
1572 ret = domain_context_mapping_one(domain, pdev->bus->number, 1434 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1573 pdev->devfn); 1435 pdev->bus->number, pdev->devfn);
1574 if (ret) 1436 if (ret)
1575 return ret; 1437 return ret;
1576 1438
@@ -1581,18 +1443,23 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1581 /* Secondary interface's bus number and devfn 0 */ 1443 /* Secondary interface's bus number and devfn 0 */
1582 parent = pdev->bus->self; 1444 parent = pdev->bus->self;
1583 while (parent != tmp) { 1445 while (parent != tmp) {
1584 ret = domain_context_mapping_one(domain, parent->bus->number, 1446 ret = domain_context_mapping_one(domain,
1585 parent->devfn); 1447 pci_domain_nr(parent->bus),
1448 parent->bus->number,
1449 parent->devfn);
1586 if (ret) 1450 if (ret)
1587 return ret; 1451 return ret;
1588 parent = parent->bus->self; 1452 parent = parent->bus->self;
1589 } 1453 }
1590 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 1454 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1591 return domain_context_mapping_one(domain, 1455 return domain_context_mapping_one(domain,
1592 tmp->subordinate->number, 0); 1456 pci_domain_nr(tmp->subordinate),
1457 tmp->subordinate->number, 0);
1593 else /* this is a legacy PCI bridge */ 1458 else /* this is a legacy PCI bridge */
1594 return domain_context_mapping_one(domain, 1459 return domain_context_mapping_one(domain,
1595 tmp->bus->number, tmp->devfn); 1460 pci_domain_nr(tmp->bus),
1461 tmp->bus->number,
1462 tmp->devfn);
1596} 1463}
1597 1464
1598static int domain_context_mapped(struct pci_dev *pdev) 1465static int domain_context_mapped(struct pci_dev *pdev)
@@ -1601,12 +1468,12 @@ static int domain_context_mapped(struct pci_dev *pdev)
1601 struct pci_dev *tmp, *parent; 1468 struct pci_dev *tmp, *parent;
1602 struct intel_iommu *iommu; 1469 struct intel_iommu *iommu;
1603 1470
1604 iommu = device_to_iommu(pdev->bus->number, pdev->devfn); 1471 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1472 pdev->devfn);
1605 if (!iommu) 1473 if (!iommu)
1606 return -ENODEV; 1474 return -ENODEV;
1607 1475
1608 ret = device_context_mapped(iommu, 1476 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1609 pdev->bus->number, pdev->devfn);
1610 if (!ret) 1477 if (!ret)
1611 return ret; 1478 return ret;
1612 /* dependent device mapping */ 1479 /* dependent device mapping */
@@ -1617,17 +1484,17 @@ static int domain_context_mapped(struct pci_dev *pdev)
1617 parent = pdev->bus->self; 1484 parent = pdev->bus->self;
1618 while (parent != tmp) { 1485 while (parent != tmp) {
1619 ret = device_context_mapped(iommu, parent->bus->number, 1486 ret = device_context_mapped(iommu, parent->bus->number,
1620 parent->devfn); 1487 parent->devfn);
1621 if (!ret) 1488 if (!ret)
1622 return ret; 1489 return ret;
1623 parent = parent->bus->self; 1490 parent = parent->bus->self;
1624 } 1491 }
1625 if (tmp->is_pcie) 1492 if (tmp->is_pcie)
1626 return device_context_mapped(iommu, 1493 return device_context_mapped(iommu, tmp->subordinate->number,
1627 tmp->subordinate->number, 0); 1494 0);
1628 else 1495 else
1629 return device_context_mapped(iommu, 1496 return device_context_mapped(iommu, tmp->bus->number,
1630 tmp->bus->number, tmp->devfn); 1497 tmp->devfn);
1631} 1498}
1632 1499
1633static int 1500static int
@@ -1657,6 +1524,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1657 BUG_ON(dma_pte_addr(pte)); 1524 BUG_ON(dma_pte_addr(pte));
1658 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); 1525 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1659 dma_set_pte_prot(pte, prot); 1526 dma_set_pte_prot(pte, prot);
1527 if (prot & DMA_PTE_SNP)
1528 dma_set_pte_snp(pte);
1660 domain_flush_cache(domain, pte, sizeof(*pte)); 1529 domain_flush_cache(domain, pte, sizeof(*pte));
1661 start_pfn++; 1530 start_pfn++;
1662 index++; 1531 index++;
@@ -1692,7 +1561,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1692 info->dev->dev.archdata.iommu = NULL; 1561 info->dev->dev.archdata.iommu = NULL;
1693 spin_unlock_irqrestore(&device_domain_lock, flags); 1562 spin_unlock_irqrestore(&device_domain_lock, flags);
1694 1563
1695 iommu = device_to_iommu(info->bus, info->devfn); 1564 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1696 iommu_detach_dev(iommu, info->bus, info->devfn); 1565 iommu_detach_dev(iommu, info->bus, info->devfn);
1697 free_devinfo_mem(info); 1566 free_devinfo_mem(info);
1698 1567
@@ -1727,11 +1596,14 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1727 struct pci_dev *dev_tmp; 1596 struct pci_dev *dev_tmp;
1728 unsigned long flags; 1597 unsigned long flags;
1729 int bus = 0, devfn = 0; 1598 int bus = 0, devfn = 0;
1599 int segment;
1730 1600
1731 domain = find_domain(pdev); 1601 domain = find_domain(pdev);
1732 if (domain) 1602 if (domain)
1733 return domain; 1603 return domain;
1734 1604
1605 segment = pci_domain_nr(pdev->bus);
1606
1735 dev_tmp = pci_find_upstream_pcie_bridge(pdev); 1607 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1736 if (dev_tmp) { 1608 if (dev_tmp) {
1737 if (dev_tmp->is_pcie) { 1609 if (dev_tmp->is_pcie) {
@@ -1743,7 +1615,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1743 } 1615 }
1744 spin_lock_irqsave(&device_domain_lock, flags); 1616 spin_lock_irqsave(&device_domain_lock, flags);
1745 list_for_each_entry(info, &device_domain_list, global) { 1617 list_for_each_entry(info, &device_domain_list, global) {
1746 if (info->bus == bus && info->devfn == devfn) { 1618 if (info->segment == segment &&
1619 info->bus == bus && info->devfn == devfn) {
1747 found = info->domain; 1620 found = info->domain;
1748 break; 1621 break;
1749 } 1622 }
@@ -1781,6 +1654,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1781 domain_exit(domain); 1654 domain_exit(domain);
1782 goto error; 1655 goto error;
1783 } 1656 }
1657 info->segment = segment;
1784 info->bus = bus; 1658 info->bus = bus;
1785 info->devfn = devfn; 1659 info->devfn = devfn;
1786 info->dev = NULL; 1660 info->dev = NULL;
@@ -1792,7 +1666,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1792 found = NULL; 1666 found = NULL;
1793 spin_lock_irqsave(&device_domain_lock, flags); 1667 spin_lock_irqsave(&device_domain_lock, flags);
1794 list_for_each_entry(tmp, &device_domain_list, global) { 1668 list_for_each_entry(tmp, &device_domain_list, global) {
1795 if (tmp->bus == bus && tmp->devfn == devfn) { 1669 if (tmp->segment == segment &&
1670 tmp->bus == bus && tmp->devfn == devfn) {
1796 found = tmp->domain; 1671 found = tmp->domain;
1797 break; 1672 break;
1798 } 1673 }
@@ -1812,6 +1687,7 @@ found_domain:
1812 info = alloc_devinfo_mem(); 1687 info = alloc_devinfo_mem();
1813 if (!info) 1688 if (!info)
1814 goto error; 1689 goto error;
1690 info->segment = segment;
1815 info->bus = pdev->bus->number; 1691 info->bus = pdev->bus->number;
1816 info->devfn = pdev->devfn; 1692 info->devfn = pdev->devfn;
1817 info->dev = pdev; 1693 info->dev = pdev;
@@ -1970,7 +1846,7 @@ static inline void iommu_prepare_isa(void)
1970 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 1846 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1971 1847
1972 if (ret) 1848 if (ret)
1973 printk("IOMMU: Failed to create 0-64M identity map, " 1849 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
1974 "floppy might not work\n"); 1850 "floppy might not work\n");
1975 1851
1976} 1852}
@@ -1987,7 +1863,7 @@ static int __init init_dmars(void)
1987 struct dmar_rmrr_unit *rmrr; 1863 struct dmar_rmrr_unit *rmrr;
1988 struct pci_dev *pdev; 1864 struct pci_dev *pdev;
1989 struct intel_iommu *iommu; 1865 struct intel_iommu *iommu;
1990 int i, ret, unit = 0; 1866 int i, ret;
1991 1867
1992 /* 1868 /*
1993 * for each drhd 1869 * for each drhd
@@ -2043,11 +1919,40 @@ static int __init init_dmars(void)
2043 } 1919 }
2044 } 1920 }
2045 1921
1922 /*
1923 * Start from the sane iommu hardware state.
1924 */
2046 for_each_drhd_unit(drhd) { 1925 for_each_drhd_unit(drhd) {
2047 if (drhd->ignored) 1926 if (drhd->ignored)
2048 continue; 1927 continue;
2049 1928
2050 iommu = drhd->iommu; 1929 iommu = drhd->iommu;
1930
1931 /*
1932 * If the queued invalidation is already initialized by us
1933 * (for example, while enabling interrupt-remapping) then
1934 * we got the things already rolling from a sane state.
1935 */
1936 if (iommu->qi)
1937 continue;
1938
1939 /*
1940 * Clear any previous faults.
1941 */
1942 dmar_fault(-1, iommu);
1943 /*
1944 * Disable queued invalidation if supported and already enabled
1945 * before OS handover.
1946 */
1947 dmar_disable_qi(iommu);
1948 }
1949
1950 for_each_drhd_unit(drhd) {
1951 if (drhd->ignored)
1952 continue;
1953
1954 iommu = drhd->iommu;
1955
2051 if (dmar_enable_qi(iommu)) { 1956 if (dmar_enable_qi(iommu)) {
2052 /* 1957 /*
2053 * Queued Invalidate not enabled, use Register Based 1958 * Queued Invalidate not enabled, use Register Based
@@ -2109,7 +2014,6 @@ static int __init init_dmars(void)
2109 if (drhd->ignored) 2014 if (drhd->ignored)
2110 continue; 2015 continue;
2111 iommu = drhd->iommu; 2016 iommu = drhd->iommu;
2112 sprintf (iommu->name, "dmar%d", unit++);
2113 2017
2114 iommu_flush_write_buffer(iommu); 2018 iommu_flush_write_buffer(iommu);
2115 2019
@@ -2171,15 +2075,15 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2171 struct pci_dev *pdev = to_pci_dev(dev); 2075 struct pci_dev *pdev = to_pci_dev(dev);
2172 struct iova *iova = NULL; 2076 struct iova *iova = NULL;
2173 2077
2174 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) 2078 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
2175 iova = iommu_alloc_iova(domain, size, dma_mask); 2079 iova = iommu_alloc_iova(domain, size, dma_mask);
2176 else { 2080 else {
2177 /* 2081 /*
2178 * First try to allocate an io virtual address in 2082 * First try to allocate an io virtual address in
2179 * DMA_32BIT_MASK and if that fails then try allocating 2083 * DMA_BIT_MASK(32) and if that fails then try allocating
2180 * from higher range 2084 * from higher range
2181 */ 2085 */
2182 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); 2086 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
2183 if (!iova) 2087 if (!iova)
2184 iova = iommu_alloc_iova(domain, size, dma_mask); 2088 iova = iommu_alloc_iova(domain, size, dma_mask);
2185 } 2089 }
@@ -2264,7 +2168,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2264 * is not a big problem 2168 * is not a big problem
2265 */ 2169 */
2266 ret = domain_page_mapping(domain, start_paddr, 2170 ret = domain_page_mapping(domain, start_paddr,
2267 ((u64)paddr) & PAGE_MASK, size, prot); 2171 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2172 size, prot);
2268 if (ret) 2173 if (ret)
2269 goto error; 2174 goto error;
2270 2175
@@ -2279,16 +2184,18 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2279error: 2184error:
2280 if (iova) 2185 if (iova)
2281 __free_iova(&domain->iovad, iova); 2186 __free_iova(&domain->iovad, iova);
2282 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 2187 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2283 pci_name(pdev), size, (unsigned long long)paddr, dir); 2188 pci_name(pdev), size, (unsigned long long)paddr, dir);
2284 return 0; 2189 return 0;
2285} 2190}
2286 2191
2287dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, 2192static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2288 size_t size, int dir) 2193 unsigned long offset, size_t size,
2194 enum dma_data_direction dir,
2195 struct dma_attrs *attrs)
2289{ 2196{
2290 return __intel_map_single(hwdev, paddr, size, dir, 2197 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2291 to_pci_dev(hwdev)->dma_mask); 2198 dir, to_pci_dev(dev)->dma_mask);
2292} 2199}
2293 2200
2294static void flush_unmaps(void) 2201static void flush_unmaps(void)
@@ -2352,8 +2259,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2352 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 2259 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2353} 2260}
2354 2261
2355void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, 2262static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2356 int dir) 2263 size_t size, enum dma_data_direction dir,
2264 struct dma_attrs *attrs)
2357{ 2265{
2358 struct pci_dev *pdev = to_pci_dev(dev); 2266 struct pci_dev *pdev = to_pci_dev(dev);
2359 struct dmar_domain *domain; 2267 struct dmar_domain *domain;
@@ -2375,7 +2283,7 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2375 start_addr = iova->pfn_lo << PAGE_SHIFT; 2283 start_addr = iova->pfn_lo << PAGE_SHIFT;
2376 size = aligned_size((u64)dev_addr, size); 2284 size = aligned_size((u64)dev_addr, size);
2377 2285
2378 pr_debug("Device %s unmapping: %lx@%llx\n", 2286 pr_debug("Device %s unmapping: %zx@%llx\n",
2379 pci_name(pdev), size, (unsigned long long)start_addr); 2287 pci_name(pdev), size, (unsigned long long)start_addr);
2380 2288
2381 /* clear the whole page */ 2289 /* clear the whole page */
@@ -2397,8 +2305,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2397 } 2305 }
2398} 2306}
2399 2307
2400void *intel_alloc_coherent(struct device *hwdev, size_t size, 2308static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2401 dma_addr_t *dma_handle, gfp_t flags) 2309 int dir)
2310{
2311 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2312}
2313
2314static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2315 dma_addr_t *dma_handle, gfp_t flags)
2402{ 2316{
2403 void *vaddr; 2317 void *vaddr;
2404 int order; 2318 int order;
@@ -2421,8 +2335,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
2421 return NULL; 2335 return NULL;
2422} 2336}
2423 2337
2424void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 2338static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2425 dma_addr_t dma_handle) 2339 dma_addr_t dma_handle)
2426{ 2340{
2427 int order; 2341 int order;
2428 2342
@@ -2433,10 +2347,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2433 free_pages((unsigned long)vaddr, order); 2347 free_pages((unsigned long)vaddr, order);
2434} 2348}
2435 2349
2436#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2350static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2437 2351 int nelems, enum dma_data_direction dir,
2438void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2352 struct dma_attrs *attrs)
2439 int nelems, int dir)
2440{ 2353{
2441 int i; 2354 int i;
2442 struct pci_dev *pdev = to_pci_dev(hwdev); 2355 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2444,7 +2357,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2444 unsigned long start_addr; 2357 unsigned long start_addr;
2445 struct iova *iova; 2358 struct iova *iova;
2446 size_t size = 0; 2359 size_t size = 0;
2447 void *addr; 2360 phys_addr_t addr;
2448 struct scatterlist *sg; 2361 struct scatterlist *sg;
2449 struct intel_iommu *iommu; 2362 struct intel_iommu *iommu;
2450 2363
@@ -2460,7 +2373,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2460 if (!iova) 2373 if (!iova)
2461 return; 2374 return;
2462 for_each_sg(sglist, sg, nelems, i) { 2375 for_each_sg(sglist, sg, nelems, i) {
2463 addr = SG_ENT_VIRT_ADDRESS(sg); 2376 addr = page_to_phys(sg_page(sg)) + sg->offset;
2464 size += aligned_size((u64)addr, sg->length); 2377 size += aligned_size((u64)addr, sg->length);
2465 } 2378 }
2466 2379
@@ -2487,16 +2400,16 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2487 2400
2488 for_each_sg(sglist, sg, nelems, i) { 2401 for_each_sg(sglist, sg, nelems, i) {
2489 BUG_ON(!sg_page(sg)); 2402 BUG_ON(!sg_page(sg));
2490 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); 2403 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2491 sg->dma_length = sg->length; 2404 sg->dma_length = sg->length;
2492 } 2405 }
2493 return nelems; 2406 return nelems;
2494} 2407}
2495 2408
2496int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2409static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2497 int dir) 2410 enum dma_data_direction dir, struct dma_attrs *attrs)
2498{ 2411{
2499 void *addr; 2412 phys_addr_t addr;
2500 int i; 2413 int i;
2501 struct pci_dev *pdev = to_pci_dev(hwdev); 2414 struct pci_dev *pdev = to_pci_dev(hwdev);
2502 struct dmar_domain *domain; 2415 struct dmar_domain *domain;
@@ -2520,8 +2433,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2520 iommu = domain_get_iommu(domain); 2433 iommu = domain_get_iommu(domain);
2521 2434
2522 for_each_sg(sglist, sg, nelems, i) { 2435 for_each_sg(sglist, sg, nelems, i) {
2523 addr = SG_ENT_VIRT_ADDRESS(sg); 2436 addr = page_to_phys(sg_page(sg)) + sg->offset;
2524 addr = (void *)virt_to_phys(addr);
2525 size += aligned_size((u64)addr, sg->length); 2437 size += aligned_size((u64)addr, sg->length);
2526 } 2438 }
2527 2439
@@ -2544,12 +2456,11 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2544 start_addr = iova->pfn_lo << PAGE_SHIFT; 2456 start_addr = iova->pfn_lo << PAGE_SHIFT;
2545 offset = 0; 2457 offset = 0;
2546 for_each_sg(sglist, sg, nelems, i) { 2458 for_each_sg(sglist, sg, nelems, i) {
2547 addr = SG_ENT_VIRT_ADDRESS(sg); 2459 addr = page_to_phys(sg_page(sg)) + sg->offset;
2548 addr = (void *)virt_to_phys(addr);
2549 size = aligned_size((u64)addr, sg->length); 2460 size = aligned_size((u64)addr, sg->length);
2550 ret = domain_page_mapping(domain, start_addr + offset, 2461 ret = domain_page_mapping(domain, start_addr + offset,
2551 ((u64)addr) & PAGE_MASK, 2462 ((u64)addr) & PHYSICAL_PAGE_MASK,
2552 size, prot); 2463 size, prot);
2553 if (ret) { 2464 if (ret) {
2554 /* clear the page */ 2465 /* clear the page */
2555 dma_pte_clear_range(domain, start_addr, 2466 dma_pte_clear_range(domain, start_addr,
@@ -2574,13 +2485,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2574 return nelems; 2485 return nelems;
2575} 2486}
2576 2487
2577static struct dma_mapping_ops intel_dma_ops = { 2488static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2489{
2490 return !dma_addr;
2491}
2492
2493struct dma_map_ops intel_dma_ops = {
2578 .alloc_coherent = intel_alloc_coherent, 2494 .alloc_coherent = intel_alloc_coherent,
2579 .free_coherent = intel_free_coherent, 2495 .free_coherent = intel_free_coherent,
2580 .map_single = intel_map_single,
2581 .unmap_single = intel_unmap_single,
2582 .map_sg = intel_map_sg, 2496 .map_sg = intel_map_sg,
2583 .unmap_sg = intel_unmap_sg, 2497 .unmap_sg = intel_unmap_sg,
2498 .map_page = intel_map_page,
2499 .unmap_page = intel_unmap_page,
2500 .mapping_error = intel_mapping_error,
2584}; 2501};
2585 2502
2586static inline int iommu_domain_cache_init(void) 2503static inline int iommu_domain_cache_init(void)
@@ -2707,6 +2624,150 @@ static void __init init_no_remapping_devices(void)
2707 } 2624 }
2708} 2625}
2709 2626
2627#ifdef CONFIG_SUSPEND
2628static int init_iommu_hw(void)
2629{
2630 struct dmar_drhd_unit *drhd;
2631 struct intel_iommu *iommu = NULL;
2632
2633 for_each_active_iommu(iommu, drhd)
2634 if (iommu->qi)
2635 dmar_reenable_qi(iommu);
2636
2637 for_each_active_iommu(iommu, drhd) {
2638 iommu_flush_write_buffer(iommu);
2639
2640 iommu_set_root_entry(iommu);
2641
2642 iommu->flush.flush_context(iommu, 0, 0, 0,
2643 DMA_CCMD_GLOBAL_INVL, 0);
2644 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2645 DMA_TLB_GLOBAL_FLUSH, 0);
2646 iommu_disable_protect_mem_regions(iommu);
2647 iommu_enable_translation(iommu);
2648 }
2649
2650 return 0;
2651}
2652
2653static void iommu_flush_all(void)
2654{
2655 struct dmar_drhd_unit *drhd;
2656 struct intel_iommu *iommu;
2657
2658 for_each_active_iommu(iommu, drhd) {
2659 iommu->flush.flush_context(iommu, 0, 0, 0,
2660 DMA_CCMD_GLOBAL_INVL, 0);
2661 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2662 DMA_TLB_GLOBAL_FLUSH, 0);
2663 }
2664}
2665
2666static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2667{
2668 struct dmar_drhd_unit *drhd;
2669 struct intel_iommu *iommu = NULL;
2670 unsigned long flag;
2671
2672 for_each_active_iommu(iommu, drhd) {
2673 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2674 GFP_ATOMIC);
2675 if (!iommu->iommu_state)
2676 goto nomem;
2677 }
2678
2679 iommu_flush_all();
2680
2681 for_each_active_iommu(iommu, drhd) {
2682 iommu_disable_translation(iommu);
2683
2684 spin_lock_irqsave(&iommu->register_lock, flag);
2685
2686 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2687 readl(iommu->reg + DMAR_FECTL_REG);
2688 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2689 readl(iommu->reg + DMAR_FEDATA_REG);
2690 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2691 readl(iommu->reg + DMAR_FEADDR_REG);
2692 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2693 readl(iommu->reg + DMAR_FEUADDR_REG);
2694
2695 spin_unlock_irqrestore(&iommu->register_lock, flag);
2696 }
2697 return 0;
2698
2699nomem:
2700 for_each_active_iommu(iommu, drhd)
2701 kfree(iommu->iommu_state);
2702
2703 return -ENOMEM;
2704}
2705
2706static int iommu_resume(struct sys_device *dev)
2707{
2708 struct dmar_drhd_unit *drhd;
2709 struct intel_iommu *iommu = NULL;
2710 unsigned long flag;
2711
2712 if (init_iommu_hw()) {
2713 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2714 return -EIO;
2715 }
2716
2717 for_each_active_iommu(iommu, drhd) {
2718
2719 spin_lock_irqsave(&iommu->register_lock, flag);
2720
2721 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2722 iommu->reg + DMAR_FECTL_REG);
2723 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2724 iommu->reg + DMAR_FEDATA_REG);
2725 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2726 iommu->reg + DMAR_FEADDR_REG);
2727 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2728 iommu->reg + DMAR_FEUADDR_REG);
2729
2730 spin_unlock_irqrestore(&iommu->register_lock, flag);
2731 }
2732
2733 for_each_active_iommu(iommu, drhd)
2734 kfree(iommu->iommu_state);
2735
2736 return 0;
2737}
2738
2739static struct sysdev_class iommu_sysclass = {
2740 .name = "iommu",
2741 .resume = iommu_resume,
2742 .suspend = iommu_suspend,
2743};
2744
2745static struct sys_device device_iommu = {
2746 .cls = &iommu_sysclass,
2747};
2748
2749static int __init init_iommu_sysfs(void)
2750{
2751 int error;
2752
2753 error = sysdev_class_register(&iommu_sysclass);
2754 if (error)
2755 return error;
2756
2757 error = sysdev_register(&device_iommu);
2758 if (error)
2759 sysdev_class_unregister(&iommu_sysclass);
2760
2761 return error;
2762}
2763
2764#else
2765static int __init init_iommu_sysfs(void)
2766{
2767 return 0;
2768}
2769#endif /* CONFIG_PM */
2770
2710int __init intel_iommu_init(void) 2771int __init intel_iommu_init(void)
2711{ 2772{
2712 int ret = 0; 2773 int ret = 0;
@@ -2742,6 +2803,7 @@ int __init intel_iommu_init(void)
2742 init_timer(&unmap_timer); 2803 init_timer(&unmap_timer);
2743 force_iommu = 1; 2804 force_iommu = 1;
2744 dma_ops = &intel_dma_ops; 2805 dma_ops = &intel_dma_ops;
2806 init_iommu_sysfs();
2745 2807
2746 register_iommu(&intel_iommu_ops); 2808 register_iommu(&intel_iommu_ops);
2747 2809
@@ -2758,6 +2820,7 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
2758 if (!info) 2820 if (!info)
2759 return -ENOMEM; 2821 return -ENOMEM;
2760 2822
2823 info->segment = pci_domain_nr(pdev->bus);
2761 info->bus = pdev->bus->number; 2824 info->bus = pdev->bus->number;
2762 info->devfn = pdev->devfn; 2825 info->devfn = pdev->devfn;
2763 info->dev = pdev; 2826 info->dev = pdev;
@@ -2772,6 +2835,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
2772 return 0; 2835 return 0;
2773} 2836}
2774 2837
2838static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2839 struct pci_dev *pdev)
2840{
2841 struct pci_dev *tmp, *parent;
2842
2843 if (!iommu || !pdev)
2844 return;
2845
2846 /* dependent device detach */
2847 tmp = pci_find_upstream_pcie_bridge(pdev);
2848 /* Secondary interface's bus number and devfn 0 */
2849 if (tmp) {
2850 parent = pdev->bus->self;
2851 while (parent != tmp) {
2852 iommu_detach_dev(iommu, parent->bus->number,
2853 parent->devfn);
2854 parent = parent->bus->self;
2855 }
2856 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2857 iommu_detach_dev(iommu,
2858 tmp->subordinate->number, 0);
2859 else /* this is a legacy PCI bridge */
2860 iommu_detach_dev(iommu, tmp->bus->number,
2861 tmp->devfn);
2862 }
2863}
2864
2775static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, 2865static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2776 struct pci_dev *pdev) 2866 struct pci_dev *pdev)
2777{ 2867{
@@ -2781,13 +2871,15 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2781 int found = 0; 2871 int found = 0;
2782 struct list_head *entry, *tmp; 2872 struct list_head *entry, *tmp;
2783 2873
2784 iommu = device_to_iommu(pdev->bus->number, pdev->devfn); 2874 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
2875 pdev->devfn);
2785 if (!iommu) 2876 if (!iommu)
2786 return; 2877 return;
2787 2878
2788 spin_lock_irqsave(&device_domain_lock, flags); 2879 spin_lock_irqsave(&device_domain_lock, flags);
2789 list_for_each_safe(entry, tmp, &domain->devices) { 2880 list_for_each_safe(entry, tmp, &domain->devices) {
2790 info = list_entry(entry, struct device_domain_info, link); 2881 info = list_entry(entry, struct device_domain_info, link);
2882 /* No need to compare PCI domain; it has to be the same */
2791 if (info->bus == pdev->bus->number && 2883 if (info->bus == pdev->bus->number &&
2792 info->devfn == pdev->devfn) { 2884 info->devfn == pdev->devfn) {
2793 list_del(&info->link); 2885 list_del(&info->link);
@@ -2797,6 +2889,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2797 spin_unlock_irqrestore(&device_domain_lock, flags); 2889 spin_unlock_irqrestore(&device_domain_lock, flags);
2798 2890
2799 iommu_detach_dev(iommu, info->bus, info->devfn); 2891 iommu_detach_dev(iommu, info->bus, info->devfn);
2892 iommu_detach_dependent_devices(iommu, pdev);
2800 free_devinfo_mem(info); 2893 free_devinfo_mem(info);
2801 2894
2802 spin_lock_irqsave(&device_domain_lock, flags); 2895 spin_lock_irqsave(&device_domain_lock, flags);
@@ -2811,7 +2904,8 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2811 * owned by this domain, clear this iommu in iommu_bmp 2904 * owned by this domain, clear this iommu in iommu_bmp
2812 * update iommu count and coherency 2905 * update iommu count and coherency
2813 */ 2906 */
2814 if (device_to_iommu(info->bus, info->devfn) == iommu) 2907 if (iommu == device_to_iommu(info->segment, info->bus,
2908 info->devfn))
2815 found = 1; 2909 found = 1;
2816 } 2910 }
2817 2911
@@ -2820,7 +2914,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2820 spin_lock_irqsave(&domain->iommu_lock, tmp_flags); 2914 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2821 clear_bit(iommu->seq_id, &domain->iommu_bmp); 2915 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2822 domain->iommu_count--; 2916 domain->iommu_count--;
2823 domain_update_iommu_coherency(domain); 2917 domain_update_iommu_cap(domain);
2824 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 2918 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2825 } 2919 }
2826 2920
@@ -2844,17 +2938,18 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2844 2938
2845 spin_unlock_irqrestore(&device_domain_lock, flags1); 2939 spin_unlock_irqrestore(&device_domain_lock, flags1);
2846 2940
2847 iommu = device_to_iommu(info->bus, info->devfn); 2941 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
2848 iommu_detach_dev(iommu, info->bus, info->devfn); 2942 iommu_detach_dev(iommu, info->bus, info->devfn);
2943 iommu_detach_dependent_devices(iommu, info->dev);
2849 2944
2850 /* clear this iommu in iommu_bmp, update iommu count 2945 /* clear this iommu in iommu_bmp, update iommu count
2851 * and coherency 2946 * and capabilities
2852 */ 2947 */
2853 spin_lock_irqsave(&domain->iommu_lock, flags2); 2948 spin_lock_irqsave(&domain->iommu_lock, flags2);
2854 if (test_and_clear_bit(iommu->seq_id, 2949 if (test_and_clear_bit(iommu->seq_id,
2855 &domain->iommu_bmp)) { 2950 &domain->iommu_bmp)) {
2856 domain->iommu_count--; 2951 domain->iommu_count--;
2857 domain_update_iommu_coherency(domain); 2952 domain_update_iommu_cap(domain);
2858 } 2953 }
2859 spin_unlock_irqrestore(&domain->iommu_lock, flags2); 2954 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2860 2955
@@ -3031,7 +3126,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3031 } 3126 }
3032 } 3127 }
3033 3128
3034 iommu = device_to_iommu(pdev->bus->number, pdev->devfn); 3129 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3130 pdev->devfn);
3035 if (!iommu) 3131 if (!iommu)
3036 return -ENODEV; 3132 return -ENODEV;
3037 3133
@@ -3077,6 +3173,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3077 prot |= DMA_PTE_READ; 3173 prot |= DMA_PTE_READ;
3078 if (iommu_prot & IOMMU_WRITE) 3174 if (iommu_prot & IOMMU_WRITE)
3079 prot |= DMA_PTE_WRITE; 3175 prot |= DMA_PTE_WRITE;
3176 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3177 prot |= DMA_PTE_SNP;
3080 3178
3081 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3179 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3082 if (dmar_domain->max_addr < max_addr) { 3180 if (dmar_domain->max_addr < max_addr) {
@@ -3130,6 +3228,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3130 return phys; 3228 return phys;
3131} 3229}
3132 3230
3231static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3232 unsigned long cap)
3233{
3234 struct dmar_domain *dmar_domain = domain->priv;
3235
3236 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3237 return dmar_domain->iommu_snooping;
3238
3239 return 0;
3240}
3241
3133static struct iommu_ops intel_iommu_ops = { 3242static struct iommu_ops intel_iommu_ops = {
3134 .domain_init = intel_iommu_domain_init, 3243 .domain_init = intel_iommu_domain_init,
3135 .domain_destroy = intel_iommu_domain_destroy, 3244 .domain_destroy = intel_iommu_domain_destroy,
@@ -3138,6 +3247,7 @@ static struct iommu_ops intel_iommu_ops = {
3138 .map = intel_iommu_map_range, 3247 .map = intel_iommu_map_range,
3139 .unmap = intel_iommu_unmap_range, 3248 .unmap = intel_iommu_unmap_range,
3140 .iova_to_phys = intel_iommu_iova_to_phys, 3249 .iova_to_phys = intel_iommu_iova_to_phys,
3250 .domain_has_cap = intel_iommu_domain_has_cap,
3141}; 3251};
3142 3252
3143static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 3253static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 45effc5726c0..3a0cb0bb0593 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -6,13 +6,23 @@
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <asm/io_apic.h> 7#include <asm/io_apic.h>
8#include <asm/smp.h> 8#include <asm/smp.h>
9#include <asm/cpu.h>
9#include <linux/intel-iommu.h> 10#include <linux/intel-iommu.h>
10#include "intr_remapping.h" 11#include "intr_remapping.h"
12#include <acpi/acpi.h>
11 13
12static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 14static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
13static int ir_ioapic_num; 15static int ir_ioapic_num;
14int intr_remapping_enabled; 16int intr_remapping_enabled;
15 17
18static int disable_intremap;
19static __init int setup_nointremap(char *str)
20{
21 disable_intremap = 1;
22 return 0;
23}
24early_param("nointremap", setup_nointremap);
25
16struct irq_2_iommu { 26struct irq_2_iommu {
17 struct intel_iommu *iommu; 27 struct intel_iommu *iommu;
18 u16 irte_index; 28 u16 irte_index;
@@ -20,16 +30,13 @@ struct irq_2_iommu {
20 u8 irte_mask; 30 u8 irte_mask;
21}; 31};
22 32
23#ifdef CONFIG_SPARSE_IRQ 33#ifdef CONFIG_GENERIC_HARDIRQS
24static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) 34static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
25{ 35{
26 struct irq_2_iommu *iommu; 36 struct irq_2_iommu *iommu;
27 int node;
28
29 node = cpu_to_node(cpu);
30 37
31 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); 38 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
32 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); 39 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
33 40
34 return iommu; 41 return iommu;
35} 42}
@@ -46,7 +53,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
46 return desc->irq_2_iommu; 53 return desc->irq_2_iommu;
47} 54}
48 55
49static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) 56static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
50{ 57{
51 struct irq_desc *desc; 58 struct irq_desc *desc;
52 struct irq_2_iommu *irq_iommu; 59 struct irq_2_iommu *irq_iommu;
@@ -54,7 +61,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
54 /* 61 /*
55 * alloc irq desc if not allocated already. 62 * alloc irq desc if not allocated already.
56 */ 63 */
57 desc = irq_to_desc_alloc_cpu(irq, cpu); 64 desc = irq_to_desc_alloc_node(irq, node);
58 if (!desc) { 65 if (!desc) {
59 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 66 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
60 return NULL; 67 return NULL;
@@ -63,14 +70,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
63 irq_iommu = desc->irq_2_iommu; 70 irq_iommu = desc->irq_2_iommu;
64 71
65 if (!irq_iommu) 72 if (!irq_iommu)
66 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); 73 desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
67 74
68 return desc->irq_2_iommu; 75 return desc->irq_2_iommu;
69} 76}
70 77
71static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 78static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
72{ 79{
73 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); 80 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
74} 81}
75 82
76#else /* !CONFIG_SPARSE_IRQ */ 83#else /* !CONFIG_SPARSE_IRQ */
@@ -116,21 +123,22 @@ int get_irte(int irq, struct irte *entry)
116{ 123{
117 int index; 124 int index;
118 struct irq_2_iommu *irq_iommu; 125 struct irq_2_iommu *irq_iommu;
126 unsigned long flags;
119 127
120 if (!entry) 128 if (!entry)
121 return -1; 129 return -1;
122 130
123 spin_lock(&irq_2_ir_lock); 131 spin_lock_irqsave(&irq_2_ir_lock, flags);
124 irq_iommu = valid_irq_2_iommu(irq); 132 irq_iommu = valid_irq_2_iommu(irq);
125 if (!irq_iommu) { 133 if (!irq_iommu) {
126 spin_unlock(&irq_2_ir_lock); 134 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
127 return -1; 135 return -1;
128 } 136 }
129 137
130 index = irq_iommu->irte_index + irq_iommu->sub_handle; 138 index = irq_iommu->irte_index + irq_iommu->sub_handle;
131 *entry = *(irq_iommu->iommu->ir_table->base + index); 139 *entry = *(irq_iommu->iommu->ir_table->base + index);
132 140
133 spin_unlock(&irq_2_ir_lock); 141 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
134 return 0; 142 return 0;
135} 143}
136 144
@@ -140,6 +148,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
140 struct irq_2_iommu *irq_iommu; 148 struct irq_2_iommu *irq_iommu;
141 u16 index, start_index; 149 u16 index, start_index;
142 unsigned int mask = 0; 150 unsigned int mask = 0;
151 unsigned long flags;
143 int i; 152 int i;
144 153
145 if (!count) 154 if (!count)
@@ -169,7 +178,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
169 return -1; 178 return -1;
170 } 179 }
171 180
172 spin_lock(&irq_2_ir_lock); 181 spin_lock_irqsave(&irq_2_ir_lock, flags);
173 do { 182 do {
174 for (i = index; i < index + count; i++) 183 for (i = index; i < index + count; i++)
175 if (table->base[i].present) 184 if (table->base[i].present)
@@ -181,7 +190,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
181 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 190 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
182 191
183 if (index == start_index) { 192 if (index == start_index) {
184 spin_unlock(&irq_2_ir_lock); 193 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
185 printk(KERN_ERR "can't allocate an IRTE\n"); 194 printk(KERN_ERR "can't allocate an IRTE\n");
186 return -1; 195 return -1;
187 } 196 }
@@ -192,7 +201,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
192 201
193 irq_iommu = irq_2_iommu_alloc(irq); 202 irq_iommu = irq_2_iommu_alloc(irq);
194 if (!irq_iommu) { 203 if (!irq_iommu) {
195 spin_unlock(&irq_2_ir_lock); 204 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
196 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 205 printk(KERN_ERR "can't allocate irq_2_iommu\n");
197 return -1; 206 return -1;
198 } 207 }
@@ -202,7 +211,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
202 irq_iommu->sub_handle = 0; 211 irq_iommu->sub_handle = 0;
203 irq_iommu->irte_mask = mask; 212 irq_iommu->irte_mask = mask;
204 213
205 spin_unlock(&irq_2_ir_lock); 214 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
206 215
207 return index; 216 return index;
208} 217}
@@ -222,30 +231,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
222{ 231{
223 int index; 232 int index;
224 struct irq_2_iommu *irq_iommu; 233 struct irq_2_iommu *irq_iommu;
234 unsigned long flags;
225 235
226 spin_lock(&irq_2_ir_lock); 236 spin_lock_irqsave(&irq_2_ir_lock, flags);
227 irq_iommu = valid_irq_2_iommu(irq); 237 irq_iommu = valid_irq_2_iommu(irq);
228 if (!irq_iommu) { 238 if (!irq_iommu) {
229 spin_unlock(&irq_2_ir_lock); 239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
230 return -1; 240 return -1;
231 } 241 }
232 242
233 *sub_handle = irq_iommu->sub_handle; 243 *sub_handle = irq_iommu->sub_handle;
234 index = irq_iommu->irte_index; 244 index = irq_iommu->irte_index;
235 spin_unlock(&irq_2_ir_lock); 245 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
236 return index; 246 return index;
237} 247}
238 248
239int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 249int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
240{ 250{
241 struct irq_2_iommu *irq_iommu; 251 struct irq_2_iommu *irq_iommu;
252 unsigned long flags;
242 253
243 spin_lock(&irq_2_ir_lock); 254 spin_lock_irqsave(&irq_2_ir_lock, flags);
244 255
245 irq_iommu = irq_2_iommu_alloc(irq); 256 irq_iommu = irq_2_iommu_alloc(irq);
246 257
247 if (!irq_iommu) { 258 if (!irq_iommu) {
248 spin_unlock(&irq_2_ir_lock); 259 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
249 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 260 printk(KERN_ERR "can't allocate irq_2_iommu\n");
250 return -1; 261 return -1;
251 } 262 }
@@ -255,7 +266,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
255 irq_iommu->sub_handle = subhandle; 266 irq_iommu->sub_handle = subhandle;
256 irq_iommu->irte_mask = 0; 267 irq_iommu->irte_mask = 0;
257 268
258 spin_unlock(&irq_2_ir_lock); 269 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
259 270
260 return 0; 271 return 0;
261} 272}
@@ -263,11 +274,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
263int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 274int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
264{ 275{
265 struct irq_2_iommu *irq_iommu; 276 struct irq_2_iommu *irq_iommu;
277 unsigned long flags;
266 278
267 spin_lock(&irq_2_ir_lock); 279 spin_lock_irqsave(&irq_2_ir_lock, flags);
268 irq_iommu = valid_irq_2_iommu(irq); 280 irq_iommu = valid_irq_2_iommu(irq);
269 if (!irq_iommu) { 281 if (!irq_iommu) {
270 spin_unlock(&irq_2_ir_lock); 282 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
271 return -1; 283 return -1;
272 } 284 }
273 285
@@ -276,7 +288,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
276 irq_iommu->sub_handle = 0; 288 irq_iommu->sub_handle = 0;
277 irq_2_iommu(irq)->irte_mask = 0; 289 irq_2_iommu(irq)->irte_mask = 0;
278 290
279 spin_unlock(&irq_2_ir_lock); 291 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280 292
281 return 0; 293 return 0;
282} 294}
@@ -288,11 +300,12 @@ int modify_irte(int irq, struct irte *irte_modified)
288 struct irte *irte; 300 struct irte *irte;
289 struct intel_iommu *iommu; 301 struct intel_iommu *iommu;
290 struct irq_2_iommu *irq_iommu; 302 struct irq_2_iommu *irq_iommu;
303 unsigned long flags;
291 304
292 spin_lock(&irq_2_ir_lock); 305 spin_lock_irqsave(&irq_2_ir_lock, flags);
293 irq_iommu = valid_irq_2_iommu(irq); 306 irq_iommu = valid_irq_2_iommu(irq);
294 if (!irq_iommu) { 307 if (!irq_iommu) {
295 spin_unlock(&irq_2_ir_lock); 308 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
296 return -1; 309 return -1;
297 } 310 }
298 311
@@ -301,11 +314,11 @@ int modify_irte(int irq, struct irte *irte_modified)
301 index = irq_iommu->irte_index + irq_iommu->sub_handle; 314 index = irq_iommu->irte_index + irq_iommu->sub_handle;
302 irte = &iommu->ir_table->base[index]; 315 irte = &iommu->ir_table->base[index];
303 316
304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 317 set_64bit((unsigned long *)irte, irte_modified->low);
305 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 318 __iommu_flush_cache(iommu, irte, sizeof(*irte));
306 319
307 rc = qi_flush_iec(iommu, index, 0); 320 rc = qi_flush_iec(iommu, index, 0);
308 spin_unlock(&irq_2_ir_lock); 321 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
309 322
310 return rc; 323 return rc;
311} 324}
@@ -316,11 +329,12 @@ int flush_irte(int irq)
316 int index; 329 int index;
317 struct intel_iommu *iommu; 330 struct intel_iommu *iommu;
318 struct irq_2_iommu *irq_iommu; 331 struct irq_2_iommu *irq_iommu;
332 unsigned long flags;
319 333
320 spin_lock(&irq_2_ir_lock); 334 spin_lock_irqsave(&irq_2_ir_lock, flags);
321 irq_iommu = valid_irq_2_iommu(irq); 335 irq_iommu = valid_irq_2_iommu(irq);
322 if (!irq_iommu) { 336 if (!irq_iommu) {
323 spin_unlock(&irq_2_ir_lock); 337 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
324 return -1; 338 return -1;
325 } 339 }
326 340
@@ -329,7 +343,7 @@ int flush_irte(int irq)
329 index = irq_iommu->irte_index + irq_iommu->sub_handle; 343 index = irq_iommu->irte_index + irq_iommu->sub_handle;
330 344
331 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 345 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
332 spin_unlock(&irq_2_ir_lock); 346 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
333 347
334 return rc; 348 return rc;
335} 349}
@@ -362,11 +376,12 @@ int free_irte(int irq)
362 struct irte *irte; 376 struct irte *irte;
363 struct intel_iommu *iommu; 377 struct intel_iommu *iommu;
364 struct irq_2_iommu *irq_iommu; 378 struct irq_2_iommu *irq_iommu;
379 unsigned long flags;
365 380
366 spin_lock(&irq_2_ir_lock); 381 spin_lock_irqsave(&irq_2_ir_lock, flags);
367 irq_iommu = valid_irq_2_iommu(irq); 382 irq_iommu = valid_irq_2_iommu(irq);
368 if (!irq_iommu) { 383 if (!irq_iommu) {
369 spin_unlock(&irq_2_ir_lock); 384 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
370 return -1; 385 return -1;
371 } 386 }
372 387
@@ -377,7 +392,7 @@ int free_irte(int irq)
377 392
378 if (!irq_iommu->sub_handle) { 393 if (!irq_iommu->sub_handle) {
379 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 394 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
380 set_64bit((unsigned long *)irte, 0); 395 set_64bit((unsigned long *)(irte + i), 0);
381 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 396 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
382 } 397 }
383 398
@@ -386,7 +401,7 @@ int free_irte(int irq)
386 irq_iommu->sub_handle = 0; 401 irq_iommu->sub_handle = 0;
387 irq_iommu->irte_mask = 0; 402 irq_iommu->irte_mask = 0;
388 403
389 spin_unlock(&irq_2_ir_lock); 404 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
390 405
391 return rc; 406 return rc;
392} 407}
@@ -406,6 +421,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
406 421
407 /* Set interrupt-remapping table pointer */ 422 /* Set interrupt-remapping table pointer */
408 cmd = iommu->gcmd | DMA_GCMD_SIRTP; 423 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
424 iommu->gcmd |= DMA_GCMD_SIRTP;
409 writel(cmd, iommu->reg + DMAR_GCMD_REG); 425 writel(cmd, iommu->reg + DMAR_GCMD_REG);
410 426
411 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 427 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
@@ -438,12 +454,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
438 struct page *pages; 454 struct page *pages;
439 455
440 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 456 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
441 GFP_KERNEL); 457 GFP_ATOMIC);
442 458
443 if (!iommu->ir_table) 459 if (!iommu->ir_table)
444 return -ENOMEM; 460 return -ENOMEM;
445 461
446 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 462 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
447 463
448 if (!pages) { 464 if (!pages) {
449 printk(KERN_ERR "failed to allocate pages of order %d\n", 465 printk(KERN_ERR "failed to allocate pages of order %d\n",
@@ -458,11 +474,85 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
458 return 0; 474 return 0;
459} 475}
460 476
477/*
478 * Disable Interrupt Remapping.
479 */
480static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
481{
482 unsigned long flags;
483 u32 sts;
484
485 if (!ecap_ir_support(iommu->ecap))
486 return;
487
488 /*
489 * global invalidation of interrupt entry cache before disabling
490 * interrupt-remapping.
491 */
492 qi_global_iec(iommu);
493
494 spin_lock_irqsave(&iommu->register_lock, flags);
495
496 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
497 if (!(sts & DMA_GSTS_IRES))
498 goto end;
499
500 iommu->gcmd &= ~DMA_GCMD_IRE;
501 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
502
503 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
504 readl, !(sts & DMA_GSTS_IRES), sts);
505
506end:
507 spin_unlock_irqrestore(&iommu->register_lock, flags);
508}
509
510int __init intr_remapping_supported(void)
511{
512 struct dmar_drhd_unit *drhd;
513
514 if (disable_intremap)
515 return 0;
516
517 for_each_drhd_unit(drhd) {
518 struct intel_iommu *iommu = drhd->iommu;
519
520 if (!ecap_ir_support(iommu->ecap))
521 return 0;
522 }
523
524 return 1;
525}
526
461int __init enable_intr_remapping(int eim) 527int __init enable_intr_remapping(int eim)
462{ 528{
463 struct dmar_drhd_unit *drhd; 529 struct dmar_drhd_unit *drhd;
464 int setup = 0; 530 int setup = 0;
465 531
532 for_each_drhd_unit(drhd) {
533 struct intel_iommu *iommu = drhd->iommu;
534
535 /*
536 * If the queued invalidation is already initialized,
537 * shouldn't disable it.
538 */
539 if (iommu->qi)
540 continue;
541
542 /*
543 * Clear previous faults.
544 */
545 dmar_fault(-1, iommu);
546
547 /*
548 * Disable intr remapping and queued invalidation, if already
549 * enabled prior to OS handover.
550 */
551 iommu_disable_intr_remapping(iommu);
552
553 dmar_disable_qi(iommu);
554 }
555
466 /* 556 /*
467 * check for the Interrupt-remapping support 557 * check for the Interrupt-remapping support
468 */ 558 */
@@ -586,3 +676,54 @@ int __init parse_ioapics_under_ir(void)
586 676
587 return ir_supported; 677 return ir_supported;
588} 678}
679
680void disable_intr_remapping(void)
681{
682 struct dmar_drhd_unit *drhd;
683 struct intel_iommu *iommu = NULL;
684
685 /*
686 * Disable Interrupt-remapping for all the DRHD's now.
687 */
688 for_each_iommu(iommu, drhd) {
689 if (!ecap_ir_support(iommu->ecap))
690 continue;
691
692 iommu_disable_intr_remapping(iommu);
693 }
694}
695
696int reenable_intr_remapping(int eim)
697{
698 struct dmar_drhd_unit *drhd;
699 int setup = 0;
700 struct intel_iommu *iommu = NULL;
701
702 for_each_iommu(iommu, drhd)
703 if (iommu->qi)
704 dmar_reenable_qi(iommu);
705
706 /*
707 * Setup Interrupt-remapping for all the DRHD's now.
708 */
709 for_each_iommu(iommu, drhd) {
710 if (!ecap_ir_support(iommu->ecap))
711 continue;
712
713 /* Set up interrupt remapping for iommu.*/
714 iommu_set_intr_remapping(iommu, eim);
715 setup = 1;
716 }
717
718 if (!setup)
719 goto error;
720
721 return 0;
722
723error:
724 /*
725 * handle error condition gracefully here!
726 */
727 return -1;
728}
729
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
new file mode 100644
index 000000000000..03c7706c0a09
--- /dev/null
+++ b/drivers/pci/iov.c
@@ -0,0 +1,683 @@
1/*
2 * drivers/pci/iov.c
3 *
4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
5 *
6 * PCI Express I/O Virtualization (IOV) support.
7 * Single Root IOV 1.0
8 */
9
10#include <linux/pci.h>
11#include <linux/mutex.h>
12#include <linux/string.h>
13#include <linux/delay.h>
14#include "pci.h"
15
16#define VIRTFN_ID_LEN 16
17
18static inline u8 virtfn_bus(struct pci_dev *dev, int id)
19{
20 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
21 dev->sriov->stride * id) >> 8);
22}
23
24static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
25{
26 return (dev->devfn + dev->sriov->offset +
27 dev->sriov->stride * id) & 0xff;
28}
29
30static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
31{
32 int rc;
33 struct pci_bus *child;
34
35 if (bus->number == busnr)
36 return bus;
37
38 child = pci_find_bus(pci_domain_nr(bus), busnr);
39 if (child)
40 return child;
41
42 child = pci_add_new_bus(bus, NULL, busnr);
43 if (!child)
44 return NULL;
45
46 child->subordinate = busnr;
47 child->dev.parent = bus->bridge;
48 rc = pci_bus_add_child(child);
49 if (rc) {
50 pci_remove_bus(child);
51 return NULL;
52 }
53
54 return child;
55}
56
57static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
58{
59 struct pci_bus *child;
60
61 if (bus->number == busnr)
62 return;
63
64 child = pci_find_bus(pci_domain_nr(bus), busnr);
65 BUG_ON(!child);
66
67 if (list_empty(&child->devices))
68 pci_remove_bus(child);
69}
70
71static int virtfn_add(struct pci_dev *dev, int id, int reset)
72{
73 int i;
74 int rc;
75 u64 size;
76 char buf[VIRTFN_ID_LEN];
77 struct pci_dev *virtfn;
78 struct resource *res;
79 struct pci_sriov *iov = dev->sriov;
80
81 virtfn = alloc_pci_dev();
82 if (!virtfn)
83 return -ENOMEM;
84
85 mutex_lock(&iov->dev->sriov->lock);
86 virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
87 if (!virtfn->bus) {
88 kfree(virtfn);
89 mutex_unlock(&iov->dev->sriov->lock);
90 return -ENOMEM;
91 }
92 virtfn->devfn = virtfn_devfn(dev, id);
93 virtfn->vendor = dev->vendor;
94 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
95 pci_setup_device(virtfn);
96 virtfn->dev.parent = dev->dev.parent;
97
98 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
99 res = dev->resource + PCI_IOV_RESOURCES + i;
100 if (!res->parent)
101 continue;
102 virtfn->resource[i].name = pci_name(virtfn);
103 virtfn->resource[i].flags = res->flags;
104 size = resource_size(res);
105 do_div(size, iov->total);
106 virtfn->resource[i].start = res->start + size * id;
107 virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
108 rc = request_resource(res, &virtfn->resource[i]);
109 BUG_ON(rc);
110 }
111
112 if (reset)
113 __pci_reset_function(virtfn);
114
115 pci_device_add(virtfn, virtfn->bus);
116 mutex_unlock(&iov->dev->sriov->lock);
117
118 virtfn->physfn = pci_dev_get(dev);
119 virtfn->is_virtfn = 1;
120
121 rc = pci_bus_add_device(virtfn);
122 if (rc)
123 goto failed1;
124 sprintf(buf, "virtfn%u", id);
125 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
126 if (rc)
127 goto failed1;
128 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
129 if (rc)
130 goto failed2;
131
132 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
133
134 return 0;
135
136failed2:
137 sysfs_remove_link(&dev->dev.kobj, buf);
138failed1:
139 pci_dev_put(dev);
140 mutex_lock(&iov->dev->sriov->lock);
141 pci_remove_bus_device(virtfn);
142 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
143 mutex_unlock(&iov->dev->sriov->lock);
144
145 return rc;
146}
147
148static void virtfn_remove(struct pci_dev *dev, int id, int reset)
149{
150 char buf[VIRTFN_ID_LEN];
151 struct pci_bus *bus;
152 struct pci_dev *virtfn;
153 struct pci_sriov *iov = dev->sriov;
154
155 bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
156 if (!bus)
157 return;
158
159 virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
160 if (!virtfn)
161 return;
162
163 pci_dev_put(virtfn);
164
165 if (reset) {
166 device_release_driver(&virtfn->dev);
167 __pci_reset_function(virtfn);
168 }
169
170 sprintf(buf, "virtfn%u", id);
171 sysfs_remove_link(&dev->dev.kobj, buf);
172 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
173
174 mutex_lock(&iov->dev->sriov->lock);
175 pci_remove_bus_device(virtfn);
176 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
177 mutex_unlock(&iov->dev->sriov->lock);
178
179 pci_dev_put(dev);
180}
181
182static int sriov_migration(struct pci_dev *dev)
183{
184 u16 status;
185 struct pci_sriov *iov = dev->sriov;
186
187 if (!iov->nr_virtfn)
188 return 0;
189
190 if (!(iov->cap & PCI_SRIOV_CAP_VFM))
191 return 0;
192
193 pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
194 if (!(status & PCI_SRIOV_STATUS_VFM))
195 return 0;
196
197 schedule_work(&iov->mtask);
198
199 return 1;
200}
201
202static void sriov_migration_task(struct work_struct *work)
203{
204 int i;
205 u8 state;
206 u16 status;
207 struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
208
209 for (i = iov->initial; i < iov->nr_virtfn; i++) {
210 state = readb(iov->mstate + i);
211 if (state == PCI_SRIOV_VFM_MI) {
212 writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
213 state = readb(iov->mstate + i);
214 if (state == PCI_SRIOV_VFM_AV)
215 virtfn_add(iov->self, i, 1);
216 } else if (state == PCI_SRIOV_VFM_MO) {
217 virtfn_remove(iov->self, i, 1);
218 writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
219 state = readb(iov->mstate + i);
220 if (state == PCI_SRIOV_VFM_AV)
221 virtfn_add(iov->self, i, 0);
222 }
223 }
224
225 pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
226 status &= ~PCI_SRIOV_STATUS_VFM;
227 pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
228}
229
230static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
231{
232 int bir;
233 u32 table;
234 resource_size_t pa;
235 struct pci_sriov *iov = dev->sriov;
236
237 if (nr_virtfn <= iov->initial)
238 return 0;
239
240 pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
241 bir = PCI_SRIOV_VFM_BIR(table);
242 if (bir > PCI_STD_RESOURCE_END)
243 return -EIO;
244
245 table = PCI_SRIOV_VFM_OFFSET(table);
246 if (table + nr_virtfn > pci_resource_len(dev, bir))
247 return -EIO;
248
249 pa = pci_resource_start(dev, bir) + table;
250 iov->mstate = ioremap(pa, nr_virtfn);
251 if (!iov->mstate)
252 return -ENOMEM;
253
254 INIT_WORK(&iov->mtask, sriov_migration_task);
255
256 iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
257 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
258
259 return 0;
260}
261
262static void sriov_disable_migration(struct pci_dev *dev)
263{
264 struct pci_sriov *iov = dev->sriov;
265
266 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
267 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
268
269 cancel_work_sync(&iov->mtask);
270 iounmap(iov->mstate);
271}
272
273static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
274{
275 int rc;
276 int i, j;
277 int nres;
278 u16 offset, stride, initial;
279 struct resource *res;
280 struct pci_dev *pdev;
281 struct pci_sriov *iov = dev->sriov;
282
283 if (!nr_virtfn)
284 return 0;
285
286 if (iov->nr_virtfn)
287 return -EINVAL;
288
289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
290 if (initial > iov->total ||
291 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
292 return -EIO;
293
294 if (nr_virtfn < 0 || nr_virtfn > iov->total ||
295 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
296 return -EINVAL;
297
298 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
299 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
300 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
301 if (!offset || (nr_virtfn > 1 && !stride))
302 return -EIO;
303
304 nres = 0;
305 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
306 res = dev->resource + PCI_IOV_RESOURCES + i;
307 if (res->parent)
308 nres++;
309 }
310 if (nres != iov->nres) {
311 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
312 return -ENOMEM;
313 }
314
315 iov->offset = offset;
316 iov->stride = stride;
317
318 if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
319 dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
320 return -ENOMEM;
321 }
322
323 if (iov->link != dev->devfn) {
324 pdev = pci_get_slot(dev->bus, iov->link);
325 if (!pdev)
326 return -ENODEV;
327
328 pci_dev_put(pdev);
329
330 if (!pdev->is_physfn)
331 return -ENODEV;
332
333 rc = sysfs_create_link(&dev->dev.kobj,
334 &pdev->dev.kobj, "dep_link");
335 if (rc)
336 return rc;
337 }
338
339 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
340 pci_block_user_cfg_access(dev);
341 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
342 msleep(100);
343 pci_unblock_user_cfg_access(dev);
344
345 iov->initial = initial;
346 if (nr_virtfn < initial)
347 initial = nr_virtfn;
348
349 for (i = 0; i < initial; i++) {
350 rc = virtfn_add(dev, i, 0);
351 if (rc)
352 goto failed;
353 }
354
355 if (iov->cap & PCI_SRIOV_CAP_VFM) {
356 rc = sriov_enable_migration(dev, nr_virtfn);
357 if (rc)
358 goto failed;
359 }
360
361 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
362 iov->nr_virtfn = nr_virtfn;
363
364 return 0;
365
366failed:
367 for (j = 0; j < i; j++)
368 virtfn_remove(dev, j, 0);
369
370 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
371 pci_block_user_cfg_access(dev);
372 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
373 ssleep(1);
374 pci_unblock_user_cfg_access(dev);
375
376 if (iov->link != dev->devfn)
377 sysfs_remove_link(&dev->dev.kobj, "dep_link");
378
379 return rc;
380}
381
382static void sriov_disable(struct pci_dev *dev)
383{
384 int i;
385 struct pci_sriov *iov = dev->sriov;
386
387 if (!iov->nr_virtfn)
388 return;
389
390 if (iov->cap & PCI_SRIOV_CAP_VFM)
391 sriov_disable_migration(dev);
392
393 for (i = 0; i < iov->nr_virtfn; i++)
394 virtfn_remove(dev, i, 0);
395
396 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
397 pci_block_user_cfg_access(dev);
398 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
399 ssleep(1);
400 pci_unblock_user_cfg_access(dev);
401
402 if (iov->link != dev->devfn)
403 sysfs_remove_link(&dev->dev.kobj, "dep_link");
404
405 iov->nr_virtfn = 0;
406}
407
408static int sriov_init(struct pci_dev *dev, int pos)
409{
410 int i;
411 int rc;
412 int nres;
413 u32 pgsz;
414 u16 ctrl, total, offset, stride;
415 struct pci_sriov *iov;
416 struct resource *res;
417 struct pci_dev *pdev;
418
419 if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
420 dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
421 return -ENODEV;
422
423 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
424 if (ctrl & PCI_SRIOV_CTRL_VFE) {
425 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
426 ssleep(1);
427 }
428
429 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
430 if (!total)
431 return 0;
432
433 ctrl = 0;
434 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
435 if (pdev->is_physfn)
436 goto found;
437
438 pdev = NULL;
439 if (pci_ari_enabled(dev->bus))
440 ctrl |= PCI_SRIOV_CTRL_ARI;
441
442found:
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
446 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
447 if (!offset || (total > 1 && !stride))
448 return -EIO;
449
450 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
451 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
452 pgsz &= ~((1 << i) - 1);
453 if (!pgsz)
454 return -EIO;
455
456 pgsz &= ~(pgsz - 1);
457 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
458
459 nres = 0;
460 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
461 res = dev->resource + PCI_IOV_RESOURCES + i;
462 i += __pci_read_base(dev, pci_bar_unknown, res,
463 pos + PCI_SRIOV_BAR + i * 4);
464 if (!res->flags)
465 continue;
466 if (resource_size(res) & (PAGE_SIZE - 1)) {
467 rc = -EIO;
468 goto failed;
469 }
470 res->end = res->start + resource_size(res) * total - 1;
471 nres++;
472 }
473
474 iov = kzalloc(sizeof(*iov), GFP_KERNEL);
475 if (!iov) {
476 rc = -ENOMEM;
477 goto failed;
478 }
479
480 iov->pos = pos;
481 iov->nres = nres;
482 iov->ctrl = ctrl;
483 iov->total = total;
484 iov->offset = offset;
485 iov->stride = stride;
486 iov->pgsz = pgsz;
487 iov->self = dev;
488 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
489 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
490 if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
491 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
492
493 if (pdev)
494 iov->dev = pci_dev_get(pdev);
495 else {
496 iov->dev = dev;
497 mutex_init(&iov->lock);
498 }
499
500 dev->sriov = iov;
501 dev->is_physfn = 1;
502
503 return 0;
504
505failed:
506 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
507 res = dev->resource + PCI_IOV_RESOURCES + i;
508 res->flags = 0;
509 }
510
511 return rc;
512}
513
514static void sriov_release(struct pci_dev *dev)
515{
516 BUG_ON(dev->sriov->nr_virtfn);
517
518 if (dev == dev->sriov->dev)
519 mutex_destroy(&dev->sriov->lock);
520 else
521 pci_dev_put(dev->sriov->dev);
522
523 kfree(dev->sriov);
524 dev->sriov = NULL;
525}
526
527static void sriov_restore_state(struct pci_dev *dev)
528{
529 int i;
530 u16 ctrl;
531 struct pci_sriov *iov = dev->sriov;
532
533 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
534 if (ctrl & PCI_SRIOV_CTRL_VFE)
535 return;
536
537 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
538 pci_update_resource(dev, i);
539
540 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
541 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
542 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
543 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
544 msleep(100);
545}
546
547/**
548 * pci_iov_init - initialize the IOV capability
549 * @dev: the PCI device
550 *
551 * Returns 0 on success, or negative on failure.
552 */
553int pci_iov_init(struct pci_dev *dev)
554{
555 int pos;
556
557 if (!dev->is_pcie)
558 return -ENODEV;
559
560 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
561 if (pos)
562 return sriov_init(dev, pos);
563
564 return -ENODEV;
565}
566
567/**
568 * pci_iov_release - release resources used by the IOV capability
569 * @dev: the PCI device
570 */
571void pci_iov_release(struct pci_dev *dev)
572{
573 if (dev->is_physfn)
574 sriov_release(dev);
575}
576
577/**
578 * pci_iov_resource_bar - get position of the SR-IOV BAR
579 * @dev: the PCI device
580 * @resno: the resource number
581 * @type: the BAR type to be filled in
582 *
583 * Returns position of the BAR encapsulated in the SR-IOV capability.
584 */
585int pci_iov_resource_bar(struct pci_dev *dev, int resno,
586 enum pci_bar_type *type)
587{
588 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
589 return 0;
590
591 BUG_ON(!dev->is_physfn);
592
593 *type = pci_bar_unknown;
594
595 return dev->sriov->pos + PCI_SRIOV_BAR +
596 4 * (resno - PCI_IOV_RESOURCES);
597}
598
599/**
600 * pci_restore_iov_state - restore the state of the IOV capability
601 * @dev: the PCI device
602 */
603void pci_restore_iov_state(struct pci_dev *dev)
604{
605 if (dev->is_physfn)
606 sriov_restore_state(dev);
607}
608
609/**
610 * pci_iov_bus_range - find bus range used by Virtual Function
611 * @bus: the PCI bus
612 *
613 * Returns max number of buses (exclude current one) used by Virtual
614 * Functions.
615 */
616int pci_iov_bus_range(struct pci_bus *bus)
617{
618 int max = 0;
619 u8 busnr;
620 struct pci_dev *dev;
621
622 list_for_each_entry(dev, &bus->devices, bus_list) {
623 if (!dev->is_physfn)
624 continue;
625 busnr = virtfn_bus(dev, dev->sriov->total - 1);
626 if (busnr > max)
627 max = busnr;
628 }
629
630 return max ? max - bus->number : 0;
631}
632
633/**
634 * pci_enable_sriov - enable the SR-IOV capability
635 * @dev: the PCI device
636 * @nr_virtfn: number of virtual functions to enable
637 *
638 * Returns 0 on success, or negative on failure.
639 */
640int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
641{
642 might_sleep();
643
644 if (!dev->is_physfn)
645 return -ENODEV;
646
647 return sriov_enable(dev, nr_virtfn);
648}
649EXPORT_SYMBOL_GPL(pci_enable_sriov);
650
651/**
652 * pci_disable_sriov - disable the SR-IOV capability
653 * @dev: the PCI device
654 */
655void pci_disable_sriov(struct pci_dev *dev)
656{
657 might_sleep();
658
659 if (!dev->is_physfn)
660 return;
661
662 sriov_disable(dev);
663}
664EXPORT_SYMBOL_GPL(pci_disable_sriov);
665
666/**
667 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
668 * @dev: the PCI device
669 *
670 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
671 *
672 * Physical Function driver is responsible to register IRQ handler using
673 * VF Migration Interrupt Message Number, and call this function when the
674 * interrupt is generated by the hardware.
675 */
676irqreturn_t pci_sriov_migration(struct pci_dev *dev)
677{
678 if (!dev->is_physfn)
679 return IRQ_NONE;
680
681 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
682}
683EXPORT_SYMBOL_GPL(pci_sriov_migration);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index baba2eb5367d..d9f06fbfa0bf 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -27,65 +27,65 @@ static int pci_msi_enable = 1;
27 27
28/* Arch hooks */ 28/* Arch hooks */
29 29
30int __attribute__ ((weak)) 30#ifndef arch_msi_check_device
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{ 32{
33 return 0; 33 return 0;
34} 34}
35#endif
35 36
36int __attribute__ ((weak)) 37#ifndef arch_setup_msi_irqs
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
38{
39 return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{ 39{
45 struct msi_desc *entry; 40 struct msi_desc *entry;
46 int ret; 41 int ret;
47 42
43 /*
44 * If an architecture wants to support multiple MSI, it needs to
45 * override arch_setup_msi_irqs()
46 */
47 if (type == PCI_CAP_ID_MSI && nvec > 1)
48 return 1;
49
48 list_for_each_entry(entry, &dev->msi_list, list) { 50 list_for_each_entry(entry, &dev->msi_list, list) {
49 ret = arch_setup_msi_irq(dev, entry); 51 ret = arch_setup_msi_irq(dev, entry);
50 if (ret) 52 if (ret < 0)
51 return ret; 53 return ret;
54 if (ret > 0)
55 return -ENOSPC;
52 } 56 }
53 57
54 return 0; 58 return 0;
55} 59}
60#endif
56 61
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 62#ifndef arch_teardown_msi_irqs
58{ 63void arch_teardown_msi_irqs(struct pci_dev *dev)
59 return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{ 64{
65 struct msi_desc *entry; 65 struct msi_desc *entry;
66 66
67 list_for_each_entry(entry, &dev->msi_list, list) { 67 list_for_each_entry(entry, &dev->msi_list, list) {
68 if (entry->irq != 0) 68 int i, nvec;
69 arch_teardown_msi_irq(entry->irq); 69 if (entry->irq == 0)
70 continue;
71 nvec = 1 << entry->msi_attrib.multiple;
72 for (i = 0; i < nvec; i++)
73 arch_teardown_msi_irq(entry->irq + i);
70 } 74 }
71} 75}
76#endif
72 77
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{ 79{
75 u16 control; 80 u16 control;
76 81
77 if (pos) { 82 BUG_ON(!pos);
78 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
79 control &= ~PCI_MSI_FLAGS_ENABLE;
80 if (enable)
81 control |= PCI_MSI_FLAGS_ENABLE;
82 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
83 }
84}
85 83
86static void msi_set_enable(struct pci_dev *dev, int enable) 84 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
87{ 85 control &= ~PCI_MSI_FLAGS_ENABLE;
88 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 86 if (enable)
87 control |= PCI_MSI_FLAGS_ENABLE;
88 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
89} 89}
90 90
91static void msix_set_enable(struct pci_dev *dev, int enable) 91static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -111,27 +111,14 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
111 return (1 << (1 << x)) - 1; 111 return (1 << (1 << x)) - 1;
112} 112}
113 113
114static void msix_flush_writes(struct irq_desc *desc) 114static inline __attribute_const__ u32 msi_capable_mask(u16 control)
115{ 115{
116 struct msi_desc *entry; 116 return msi_mask((control >> 1) & 7);
117}
117 118
118 entry = get_irq_desc_msi(desc); 119static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
119 BUG_ON(!entry || !entry->dev); 120{
120 switch (entry->msi_attrib.type) { 121 return msi_mask((control >> 4) & 7);
121 case PCI_CAP_ID_MSI:
122 /* nothing to do */
123 break;
124 case PCI_CAP_ID_MSIX:
125 {
126 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
127 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
128 readl(entry->mask_base + offset);
129 break;
130 }
131 default:
132 BUG();
133 break;
134 }
135} 122}
136 123
137/* 124/*
@@ -139,53 +126,72 @@ static void msix_flush_writes(struct irq_desc *desc)
139 * mask all MSI interrupts by clearing the MSI enable bit does not work 126 * mask all MSI interrupts by clearing the MSI enable bit does not work
140 * reliably as devices without an INTx disable bit will then generate a 127 * reliably as devices without an INTx disable bit will then generate a
141 * level IRQ which will never be cleared. 128 * level IRQ which will never be cleared.
142 *
143 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
144 * doesn't support MSI masking.
145 */ 129 */
146static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 130static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
147{ 131{
148 struct msi_desc *entry; 132 u32 mask_bits = desc->masked;
149 133
150 entry = get_irq_desc_msi(desc); 134 if (!desc->msi_attrib.maskbit)
151 BUG_ON(!entry || !entry->dev); 135 return;
152 switch (entry->msi_attrib.type) { 136
153 case PCI_CAP_ID_MSI: 137 mask_bits &= ~mask;
154 if (entry->msi_attrib.maskbit) { 138 mask_bits |= flag;
155 int pos; 139 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
156 u32 mask_bits; 140 desc->masked = mask_bits;
157 141}
158 pos = (long)entry->mask_base; 142
159 pci_read_config_dword(entry->dev, pos, &mask_bits); 143/*
160 mask_bits &= ~(mask); 144 * This internal function does not flush PCI writes to the device.
161 mask_bits |= flag & mask; 145 * All users must ensure that they read from the device before either
162 pci_write_config_dword(entry->dev, pos, mask_bits); 146 * assuming that the device state is up to date, or returning out of this
163 } else { 147 * file. This saves a few milliseconds when initialising devices with lots
164 return 0; 148 * of MSI-X interrupts.
165 } 149 */
166 break; 150static void msix_mask_irq(struct msi_desc *desc, u32 flag)
167 case PCI_CAP_ID_MSIX: 151{
168 { 152 u32 mask_bits = desc->masked;
169 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 153 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
170 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 154 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
171 writel(flag, entry->mask_base + offset); 155 mask_bits &= ~1;
172 readl(entry->mask_base + offset); 156 mask_bits |= flag;
173 break; 157 writel(mask_bits, desc->mask_base + offset);
174 } 158 desc->masked = mask_bits;
175 default: 159}
176 BUG(); 160
177 break; 161static void msi_set_mask_bit(unsigned irq, u32 flag)
162{
163 struct msi_desc *desc = get_irq_msi(irq);
164
165 if (desc->msi_attrib.is_msix) {
166 msix_mask_irq(desc, flag);
167 readl(desc->mask_base); /* Flush write to device */
168 } else {
169 unsigned offset = irq - desc->dev->irq;
170 msi_mask_irq(desc, 1 << offset, flag << offset);
178 } 171 }
179 entry->msi_attrib.masked = !!flag; 172}
180 return 1; 173
174void mask_msi_irq(unsigned int irq)
175{
176 msi_set_mask_bit(irq, 1);
177}
178
179void unmask_msi_irq(unsigned int irq)
180{
181 msi_set_mask_bit(irq, 0);
181} 182}
182 183
183void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 184void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
184{ 185{
185 struct msi_desc *entry = get_irq_desc_msi(desc); 186 struct msi_desc *entry = get_irq_desc_msi(desc);
186 switch(entry->msi_attrib.type) { 187 if (entry->msi_attrib.is_msix) {
187 case PCI_CAP_ID_MSI: 188 void __iomem *base = entry->mask_base +
188 { 189 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
190
191 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
192 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
193 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
194 } else {
189 struct pci_dev *dev = entry->dev; 195 struct pci_dev *dev = entry->dev;
190 int pos = entry->msi_attrib.pos; 196 int pos = entry->msi_attrib.pos;
191 u16 data; 197 u16 data;
@@ -201,21 +207,6 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
201 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 207 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
202 } 208 }
203 msg->data = data; 209 msg->data = data;
204 break;
205 }
206 case PCI_CAP_ID_MSIX:
207 {
208 void __iomem *base;
209 base = entry->mask_base +
210 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
211
212 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
213 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
214 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
215 break;
216 }
217 default:
218 BUG();
219 } 210 }
220} 211}
221 212
@@ -229,11 +220,25 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
229void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 220void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
230{ 221{
231 struct msi_desc *entry = get_irq_desc_msi(desc); 222 struct msi_desc *entry = get_irq_desc_msi(desc);
232 switch (entry->msi_attrib.type) { 223 if (entry->msi_attrib.is_msix) {
233 case PCI_CAP_ID_MSI: 224 void __iomem *base;
234 { 225 base = entry->mask_base +
226 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
227
228 writel(msg->address_lo,
229 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
230 writel(msg->address_hi,
231 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
232 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
233 } else {
235 struct pci_dev *dev = entry->dev; 234 struct pci_dev *dev = entry->dev;
236 int pos = entry->msi_attrib.pos; 235 int pos = entry->msi_attrib.pos;
236 u16 msgctl;
237
238 pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
239 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
240 msgctl |= entry->msi_attrib.multiple << 4;
241 pci_write_config_word(dev, msi_control_reg(pos), msgctl);
237 242
238 pci_write_config_dword(dev, msi_lower_address_reg(pos), 243 pci_write_config_dword(dev, msi_lower_address_reg(pos),
239 msg->address_lo); 244 msg->address_lo);
@@ -246,23 +251,6 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
246 pci_write_config_word(dev, msi_data_reg(pos, 0), 251 pci_write_config_word(dev, msi_data_reg(pos, 0),
247 msg->data); 252 msg->data);
248 } 253 }
249 break;
250 }
251 case PCI_CAP_ID_MSIX:
252 {
253 void __iomem *base;
254 base = entry->mask_base +
255 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
256
257 writel(msg->address_lo,
258 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
259 writel(msg->address_hi,
260 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
261 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
262 break;
263 }
264 default:
265 BUG();
266 } 254 }
267 entry->msg = *msg; 255 entry->msg = *msg;
268} 256}
@@ -274,37 +262,18 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
274 write_msi_msg_desc(desc, msg); 262 write_msi_msg_desc(desc, msg);
275} 263}
276 264
277void mask_msi_irq(unsigned int irq)
278{
279 struct irq_desc *desc = irq_to_desc(irq);
280
281 msi_set_mask_bits(desc, 1, 1);
282 msix_flush_writes(desc);
283}
284
285void unmask_msi_irq(unsigned int irq)
286{
287 struct irq_desc *desc = irq_to_desc(irq);
288
289 msi_set_mask_bits(desc, 1, 0);
290 msix_flush_writes(desc);
291}
292
293static int msi_free_irqs(struct pci_dev* dev); 265static int msi_free_irqs(struct pci_dev* dev);
294 266
295static struct msi_desc* alloc_msi_entry(void) 267static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
296{ 268{
297 struct msi_desc *entry; 269 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
298 270 if (!desc)
299 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
300 if (!entry)
301 return NULL; 271 return NULL;
302 272
303 INIT_LIST_HEAD(&entry->list); 273 INIT_LIST_HEAD(&desc->list);
304 entry->irq = 0; 274 desc->dev = dev;
305 entry->dev = NULL;
306 275
307 return entry; 276 return desc;
308} 277}
309 278
310static void pci_intx_for_msi(struct pci_dev *dev, int enable) 279static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -326,17 +295,13 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
326 pos = entry->msi_attrib.pos; 295 pos = entry->msi_attrib.pos;
327 296
328 pci_intx_for_msi(dev, 0); 297 pci_intx_for_msi(dev, 0);
329 msi_set_enable(dev, 0); 298 msi_set_enable(dev, pos, 0);
330 write_msi_msg(dev->irq, &entry->msg); 299 write_msi_msg(dev->irq, &entry->msg);
331 if (entry->msi_attrib.maskbit) {
332 struct irq_desc *desc = irq_to_desc(dev->irq);
333 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
334 entry->msi_attrib.masked);
335 }
336 300
337 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 301 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
302 msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
338 control &= ~PCI_MSI_FLAGS_QSIZE; 303 control &= ~PCI_MSI_FLAGS_QSIZE;
339 control |= PCI_MSI_FLAGS_ENABLE; 304 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 305 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
341} 306}
342 307
@@ -348,23 +313,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
348 313
349 if (!dev->msix_enabled) 314 if (!dev->msix_enabled)
350 return; 315 return;
316 BUG_ON(list_empty(&dev->msi_list));
317 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
318 pos = entry->msi_attrib.pos;
319 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
351 320
352 /* route the table */ 321 /* route the table */
353 pci_intx_for_msi(dev, 0); 322 pci_intx_for_msi(dev, 0);
354 msix_set_enable(dev, 0); 323 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
324 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
355 325
356 list_for_each_entry(entry, &dev->msi_list, list) { 326 list_for_each_entry(entry, &dev->msi_list, list) {
357 struct irq_desc *desc = irq_to_desc(entry->irq);
358 write_msi_msg(entry->irq, &entry->msg); 327 write_msi_msg(entry->irq, &entry->msg);
359 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 328 msix_mask_irq(entry, entry->masked);
360 } 329 }
361 330
362 BUG_ON(list_empty(&dev->msi_list));
363 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
364 pos = entry->msi_attrib.pos;
365 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
366 control &= ~PCI_MSIX_FLAGS_MASKALL; 331 control &= ~PCI_MSIX_FLAGS_MASKALL;
367 control |= PCI_MSIX_FLAGS_ENABLE;
368 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 332 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
369} 333}
370 334
@@ -378,52 +342,48 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
378/** 342/**
379 * msi_capability_init - configure device's MSI capability structure 343 * msi_capability_init - configure device's MSI capability structure
380 * @dev: pointer to the pci_dev data structure of MSI device function 344 * @dev: pointer to the pci_dev data structure of MSI device function
345 * @nvec: number of interrupts to allocate
381 * 346 *
382 * Setup the MSI capability structure of device function with a single 347 * Setup the MSI capability structure of the device with the requested
383 * MSI irq, regardless of device function is capable of handling 348 * number of interrupts. A return value of zero indicates the successful
384 * multiple messages. A return of zero indicates the successful setup 349 * setup of an entry with the new MSI irq. A negative return value indicates
385 * of an entry zero with the new MSI irq or non-zero for otherwise. 350 * an error, and a positive return value indicates the number of interrupts
386 **/ 351 * which could have been allocated.
387static int msi_capability_init(struct pci_dev *dev) 352 */
353static int msi_capability_init(struct pci_dev *dev, int nvec)
388{ 354{
389 struct msi_desc *entry; 355 struct msi_desc *entry;
390 int pos, ret; 356 int pos, ret;
391 u16 control; 357 u16 control;
392 358 unsigned mask;
393 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
394 359
395 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 360 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
361 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
362
396 pci_read_config_word(dev, msi_control_reg(pos), &control); 363 pci_read_config_word(dev, msi_control_reg(pos), &control);
397 /* MSI Entry Initialization */ 364 /* MSI Entry Initialization */
398 entry = alloc_msi_entry(); 365 entry = alloc_msi_entry(dev);
399 if (!entry) 366 if (!entry)
400 return -ENOMEM; 367 return -ENOMEM;
401 368
402 entry->msi_attrib.type = PCI_CAP_ID_MSI; 369 entry->msi_attrib.is_msix = 0;
403 entry->msi_attrib.is_64 = is_64bit_address(control); 370 entry->msi_attrib.is_64 = is_64bit_address(control);
404 entry->msi_attrib.entry_nr = 0; 371 entry->msi_attrib.entry_nr = 0;
405 entry->msi_attrib.maskbit = is_mask_bit_support(control); 372 entry->msi_attrib.maskbit = is_mask_bit_support(control);
406 entry->msi_attrib.masked = 1;
407 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 373 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
408 entry->msi_attrib.pos = pos; 374 entry->msi_attrib.pos = pos;
409 entry->dev = dev; 375
410 if (entry->msi_attrib.maskbit) { 376 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
411 unsigned int base, maskbits, temp; 377 /* All MSIs are unmasked by default, Mask them all */
412 378 if (entry->msi_attrib.maskbit)
413 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 379 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
414 entry->mask_base = (void __iomem *)(long)base; 380 mask = msi_capable_mask(control);
415 381 msi_mask_irq(entry, mask, mask);
416 /* All MSIs are unmasked by default, Mask them all */ 382
417 pci_read_config_dword(dev, base, &maskbits);
418 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
419 maskbits |= temp;
420 pci_write_config_dword(dev, base, maskbits);
421 entry->msi_attrib.maskbits_mask = temp;
422 }
423 list_add_tail(&entry->list, &dev->msi_list); 383 list_add_tail(&entry->list, &dev->msi_list);
424 384
425 /* Configure MSI capability structure */ 385 /* Configure MSI capability structure */
426 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 386 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
427 if (ret) { 387 if (ret) {
428 msi_free_irqs(dev); 388 msi_free_irqs(dev);
429 return ret; 389 return ret;
@@ -431,7 +391,7 @@ static int msi_capability_init(struct pci_dev *dev)
431 391
432 /* Set MSI enabled bits */ 392 /* Set MSI enabled bits */
433 pci_intx_for_msi(dev, 0); 393 pci_intx_for_msi(dev, 0);
434 msi_set_enable(dev, 1); 394 msi_set_enable(dev, pos, 1);
435 dev->msi_enabled = 1; 395 dev->msi_enabled = 1;
436 396
437 dev->irq = entry->irq; 397 dev->irq = entry->irq;
@@ -459,11 +419,14 @@ static int msix_capability_init(struct pci_dev *dev,
459 u8 bir; 419 u8 bir;
460 void __iomem *base; 420 void __iomem *base;
461 421
462 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
463
464 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 422 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
423 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
424
425 /* Ensure MSI-X is disabled while it is set up */
426 control &= ~PCI_MSIX_FLAGS_ENABLE;
427 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
428
465 /* Request & Map MSI-X table region */ 429 /* Request & Map MSI-X table region */
466 pci_read_config_word(dev, msi_control_reg(pos), &control);
467 nr_entries = multi_msix_capable(control); 430 nr_entries = multi_msix_capable(control);
468 431
469 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 432 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -474,28 +437,26 @@ static int msix_capability_init(struct pci_dev *dev,
474 if (base == NULL) 437 if (base == NULL)
475 return -ENOMEM; 438 return -ENOMEM;
476 439
477 /* MSI-X Table Initialization */
478 for (i = 0; i < nvec; i++) { 440 for (i = 0; i < nvec; i++) {
479 entry = alloc_msi_entry(); 441 entry = alloc_msi_entry(dev);
480 if (!entry) 442 if (!entry)
481 break; 443 break;
482 444
483 j = entries[i].entry; 445 j = entries[i].entry;
484 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 446 entry->msi_attrib.is_msix = 1;
485 entry->msi_attrib.is_64 = 1; 447 entry->msi_attrib.is_64 = 1;
486 entry->msi_attrib.entry_nr = j; 448 entry->msi_attrib.entry_nr = j;
487 entry->msi_attrib.maskbit = 1;
488 entry->msi_attrib.masked = 1;
489 entry->msi_attrib.default_irq = dev->irq; 449 entry->msi_attrib.default_irq = dev->irq;
490 entry->msi_attrib.pos = pos; 450 entry->msi_attrib.pos = pos;
491 entry->dev = dev;
492 entry->mask_base = base; 451 entry->mask_base = base;
493 452
494 list_add_tail(&entry->list, &dev->msi_list); 453 list_add_tail(&entry->list, &dev->msi_list);
495 } 454 }
496 455
497 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 456 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
498 if (ret) { 457 if (ret < 0) {
458 /* If we had some success report the number of irqs
459 * we succeeded in setting up. */
499 int avail = 0; 460 int avail = 0;
500 list_for_each_entry(entry, &dev->msi_list, list) { 461 list_for_each_entry(entry, &dev->msi_list, list) {
501 if (entry->irq != 0) { 462 if (entry->irq != 0) {
@@ -503,27 +464,41 @@ static int msix_capability_init(struct pci_dev *dev,
503 } 464 }
504 } 465 }
505 466
506 msi_free_irqs(dev); 467 if (avail != 0)
468 ret = avail;
469 }
507 470
508 /* If we had some success report the number of irqs 471 if (ret) {
509 * we succeeded in setting up. 472 msi_free_irqs(dev);
510 */ 473 return ret;
511 if (avail == 0)
512 avail = ret;
513 return avail;
514 } 474 }
515 475
476 /*
477 * Some devices require MSI-X to be enabled before we can touch the
478 * MSI-X registers. We need to mask all the vectors to prevent
479 * interrupts coming in before they're fully set up.
480 */
481 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
482 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
483
516 i = 0; 484 i = 0;
517 list_for_each_entry(entry, &dev->msi_list, list) { 485 list_for_each_entry(entry, &dev->msi_list, list) {
518 entries[i].vector = entry->irq; 486 entries[i].vector = entry->irq;
519 set_irq_msi(entry->irq, entry); 487 set_irq_msi(entry->irq, entry);
488 j = entries[i].entry;
489 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
490 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
491 msix_mask_irq(entry, 1);
520 i++; 492 i++;
521 } 493 }
522 /* Set MSI-X enabled bits */ 494
495 /* Set MSI-X enabled bits and unmask the function */
523 pci_intx_for_msi(dev, 0); 496 pci_intx_for_msi(dev, 0);
524 msix_set_enable(dev, 1);
525 dev->msix_enabled = 1; 497 dev->msix_enabled = 1;
526 498
499 control &= ~PCI_MSIX_FLAGS_MASKALL;
500 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
501
527 return 0; 502 return 0;
528} 503}
529 504
@@ -575,61 +550,75 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
575} 550}
576 551
577/** 552/**
578 * pci_enable_msi - configure device's MSI capability structure 553 * pci_enable_msi_block - configure device's MSI capability structure
579 * @dev: pointer to the pci_dev data structure of MSI device function 554 * @dev: device to configure
555 * @nvec: number of interrupts to configure
580 * 556 *
581 * Setup the MSI capability structure of device function with 557 * Allocate IRQs for a device with the MSI capability.
582 * a single MSI irq upon its software driver call to request for 558 * This function returns a negative errno if an error occurs. If it
583 * MSI mode enabled on its hardware device function. A return of zero 559 * is unable to allocate the number of interrupts requested, it returns
584 * indicates the successful setup of an entry zero with the new MSI 560 * the number of interrupts it might be able to allocate. If it successfully
585 * irq or non-zero for otherwise. 561 * allocates at least the number of interrupts requested, it returns 0 and
586 **/ 562 * updates the @dev's irq member to the lowest new interrupt number; the
587int pci_enable_msi(struct pci_dev* dev) 563 * other interrupt numbers allocated to this device are consecutive.
564 */
565int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
588{ 566{
589 int status; 567 int status, pos, maxvec;
568 u16 msgctl;
569
570 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
571 if (!pos)
572 return -EINVAL;
573 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
574 maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
575 if (nvec > maxvec)
576 return maxvec;
590 577
591 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 578 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
592 if (status) 579 if (status)
593 return status; 580 return status;
594 581
595 WARN_ON(!!dev->msi_enabled); 582 WARN_ON(!!dev->msi_enabled);
596 583
597 /* Check whether driver already requested for MSI-X irqs */ 584 /* Check whether driver already requested MSI-X irqs */
598 if (dev->msix_enabled) { 585 if (dev->msix_enabled) {
599 dev_info(&dev->dev, "can't enable MSI " 586 dev_info(&dev->dev, "can't enable MSI "
600 "(MSI-X already enabled)\n"); 587 "(MSI-X already enabled)\n");
601 return -EINVAL; 588 return -EINVAL;
602 } 589 }
603 status = msi_capability_init(dev); 590
591 status = msi_capability_init(dev, nvec);
604 return status; 592 return status;
605} 593}
606EXPORT_SYMBOL(pci_enable_msi); 594EXPORT_SYMBOL(pci_enable_msi_block);
607 595
608void pci_msi_shutdown(struct pci_dev* dev) 596void pci_msi_shutdown(struct pci_dev *dev)
609{ 597{
610 struct msi_desc *entry; 598 struct msi_desc *desc;
599 u32 mask;
600 u16 ctrl;
601 unsigned pos;
611 602
612 if (!pci_msi_enable || !dev || !dev->msi_enabled) 603 if (!pci_msi_enable || !dev || !dev->msi_enabled)
613 return; 604 return;
614 605
615 msi_set_enable(dev, 0); 606 BUG_ON(list_empty(&dev->msi_list));
607 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
608 pos = desc->msi_attrib.pos;
609
610 msi_set_enable(dev, pos, 0);
616 pci_intx_for_msi(dev, 1); 611 pci_intx_for_msi(dev, 1);
617 dev->msi_enabled = 0; 612 dev->msi_enabled = 0;
618 613
619 BUG_ON(list_empty(&dev->msi_list)); 614 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
620 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 615 mask = msi_capable_mask(ctrl);
621 /* Return the the pci reset with msi irqs unmasked */ 616 msi_mask_irq(desc, mask, ~mask);
622 if (entry->msi_attrib.maskbit) {
623 u32 mask = entry->msi_attrib.maskbits_mask;
624 struct irq_desc *desc = irq_to_desc(dev->irq);
625 msi_set_mask_bits(desc, mask, ~mask);
626 }
627 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
628 return;
629 617
630 /* Restore dev->irq to its default pin-assertion irq */ 618 /* Restore dev->irq to its default pin-assertion irq */
631 dev->irq = entry->msi_attrib.default_irq; 619 dev->irq = desc->msi_attrib.default_irq;
632} 620}
621
633void pci_disable_msi(struct pci_dev* dev) 622void pci_disable_msi(struct pci_dev* dev)
634{ 623{
635 struct msi_desc *entry; 624 struct msi_desc *entry;
@@ -640,7 +629,7 @@ void pci_disable_msi(struct pci_dev* dev)
640 pci_msi_shutdown(dev); 629 pci_msi_shutdown(dev);
641 630
642 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 631 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
643 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 632 if (entry->msi_attrib.is_msix)
644 return; 633 return;
645 634
646 msi_free_irqs(dev); 635 msi_free_irqs(dev);
@@ -652,18 +641,19 @@ static int msi_free_irqs(struct pci_dev* dev)
652 struct msi_desc *entry, *tmp; 641 struct msi_desc *entry, *tmp;
653 642
654 list_for_each_entry(entry, &dev->msi_list, list) { 643 list_for_each_entry(entry, &dev->msi_list, list) {
655 if (entry->irq) 644 int i, nvec;
656 BUG_ON(irq_has_action(entry->irq)); 645 if (!entry->irq)
646 continue;
647 nvec = 1 << entry->msi_attrib.multiple;
648 for (i = 0; i < nvec; i++)
649 BUG_ON(irq_has_action(entry->irq + i));
657 } 650 }
658 651
659 arch_teardown_msi_irqs(dev); 652 arch_teardown_msi_irqs(dev);
660 653
661 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 654 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
662 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 655 if (entry->msi_attrib.is_msix) {
663 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 656 msix_mask_irq(entry, 1);
664 * PCI_MSIX_ENTRY_SIZE
665 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
666
667 if (list_is_last(&entry->list, &dev->msi_list)) 657 if (list_is_last(&entry->list, &dev->msi_list))
668 iounmap(entry->mask_base); 658 iounmap(entry->mask_base);
669 } 659 }
@@ -675,6 +665,23 @@ static int msi_free_irqs(struct pci_dev* dev)
675} 665}
676 666
677/** 667/**
668 * pci_msix_table_size - return the number of device's MSI-X table entries
669 * @dev: pointer to the pci_dev data structure of MSI-X device function
670 */
671int pci_msix_table_size(struct pci_dev *dev)
672{
673 int pos;
674 u16 control;
675
676 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
677 if (!pos)
678 return 0;
679
680 pci_read_config_word(dev, msi_control_reg(pos), &control);
681 return multi_msix_capable(control);
682}
683
684/**
678 * pci_enable_msix - configure device's MSI-X capability structure 685 * pci_enable_msix - configure device's MSI-X capability structure
679 * @dev: pointer to the pci_dev data structure of MSI-X device function 686 * @dev: pointer to the pci_dev data structure of MSI-X device function
680 * @entries: pointer to an array of MSI-X entries 687 * @entries: pointer to an array of MSI-X entries
@@ -686,14 +693,13 @@ static int msi_free_irqs(struct pci_dev* dev)
686 * indicates the successful configuration of MSI-X capability structure 693 * indicates the successful configuration of MSI-X capability structure
687 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 694 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
688 * Or a return of > 0 indicates that driver request is exceeding the number 695 * Or a return of > 0 indicates that driver request is exceeding the number
689 * of irqs available. Driver should use the returned value to re-send 696 * of irqs or MSI-X vectors available. Driver should use the returned value to
690 * its request. 697 * re-send its request.
691 **/ 698 **/
692int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 699int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
693{ 700{
694 int status, pos, nr_entries; 701 int status, nr_entries;
695 int i, j; 702 int i, j;
696 u16 control;
697 703
698 if (!entries) 704 if (!entries)
699 return -EINVAL; 705 return -EINVAL;
@@ -702,11 +708,9 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
702 if (status) 708 if (status)
703 return status; 709 return status;
704 710
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 711 nr_entries = pci_msix_table_size(dev);
706 pci_read_config_word(dev, msi_control_reg(pos), &control);
707 nr_entries = multi_msix_capable(control);
708 if (nvec > nr_entries) 712 if (nvec > nr_entries)
709 return -EINVAL; 713 return nr_entries;
710 714
711 /* Check for any invalid entries */ 715 /* Check for any invalid entries */
712 for (i = 0; i < nvec; i++) { 716 for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 3898f5237144..a0662842550b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,27 +16,15 @@
16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) 16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) 17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
18#define msi_data_reg(base, is64bit) \ 18#define msi_data_reg(base, is64bit) \
19 ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 ) 19 (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
20#define msi_mask_bits_reg(base, is64bit) \ 20#define msi_mask_reg(base, is64bit) \
21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) 21 (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
23#define multi_msi_capable(control) \
24 (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
25#define multi_msi_enable(control, num) \
26 control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
27#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 22#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
28#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 23#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
29#define msi_enable(control, num) multi_msi_enable(control, num); \
30 control |= PCI_MSI_FLAGS_ENABLE
31 24
32#define msix_table_offset_reg(base) (base + 0x04) 25#define msix_table_offset_reg(base) (base + 0x04)
33#define msix_pba_offset_reg(base) (base + 0x08) 26#define msix_pba_offset_reg(base) (base + 0x08)
34#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
35#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
36#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) 27#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
37#define multi_msix_capable msix_table_size 28#define multi_msix_capable(control) msix_table_size((control))
38#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
39#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
40#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
41 29
42#endif /* MSI_H */ 30#endif /* MSI_H */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index deea8a187eb8..ea15b0537457 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,221 +18,6 @@
18#include <linux/pci-acpi.h> 18#include <linux/pci-acpi.h>
19#include "pci.h" 19#include "pci.h"
20 20
21struct acpi_osc_data {
22 acpi_handle handle;
23 u32 support_set;
24 u32 control_set;
25 u32 control_query;
26 int is_queried;
27 struct list_head sibiling;
28};
29static LIST_HEAD(acpi_osc_data_list);
30
31struct acpi_osc_args {
32 u32 capbuf[3];
33};
34
35static DEFINE_MUTEX(pci_acpi_lock);
36
37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
38{
39 struct acpi_osc_data *data;
40
41 list_for_each_entry(data, &acpi_osc_data_list, sibiling) {
42 if (data->handle == handle)
43 return data;
44 }
45 data = kzalloc(sizeof(*data), GFP_KERNEL);
46 if (!data)
47 return NULL;
48 INIT_LIST_HEAD(&data->sibiling);
49 data->handle = handle;
50 list_add_tail(&data->sibiling, &acpi_osc_data_list);
51 return data;
52}
53
54static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
55 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
56
57static acpi_status acpi_run_osc(acpi_handle handle,
58 struct acpi_osc_args *osc_args, u32 *retval)
59{
60 acpi_status status;
61 struct acpi_object_list input;
62 union acpi_object in_params[4];
63 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
64 union acpi_object *out_obj;
65 u32 errors, flags = osc_args->capbuf[OSC_QUERY_TYPE];
66
67 /* Setting up input parameters */
68 input.count = 4;
69 input.pointer = in_params;
70 in_params[0].type = ACPI_TYPE_BUFFER;
71 in_params[0].buffer.length = 16;
72 in_params[0].buffer.pointer = OSC_UUID;
73 in_params[1].type = ACPI_TYPE_INTEGER;
74 in_params[1].integer.value = 1;
75 in_params[2].type = ACPI_TYPE_INTEGER;
76 in_params[2].integer.value = 3;
77 in_params[3].type = ACPI_TYPE_BUFFER;
78 in_params[3].buffer.length = 12;
79 in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
80
81 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
82 if (ACPI_FAILURE(status))
83 return status;
84
85 if (!output.length)
86 return AE_NULL_OBJECT;
87
88 out_obj = output.pointer;
89 if (out_obj->type != ACPI_TYPE_BUFFER) {
90 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
91 status = AE_TYPE;
92 goto out_kfree;
93 }
94 /* Need to ignore the bit0 in result code */
95 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
96 if (errors) {
97 if (errors & OSC_REQUEST_ERROR)
98 printk(KERN_DEBUG "_OSC request fails\n");
99 if (errors & OSC_INVALID_UUID_ERROR)
100 printk(KERN_DEBUG "_OSC invalid UUID\n");
101 if (errors & OSC_INVALID_REVISION_ERROR)
102 printk(KERN_DEBUG "_OSC invalid revision\n");
103 if (errors & OSC_CAPABILITIES_MASK_ERROR) {
104 if (flags & OSC_QUERY_ENABLE)
105 goto out_success;
106 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
107 status = AE_SUPPORT;
108 goto out_kfree;
109 }
110 status = AE_ERROR;
111 goto out_kfree;
112 }
113out_success:
114 *retval = *((u32 *)(out_obj->buffer.pointer + 8));
115 status = AE_OK;
116
117out_kfree:
118 kfree(output.pointer);
119 return status;
120}
121
122static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
123{
124 acpi_status status;
125 u32 support_set, result;
126 struct acpi_osc_args osc_args;
127
128 /* do _OSC query for all possible controls */
129 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
130 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
131 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
132 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
133
134 status = acpi_run_osc(osc_data->handle, &osc_args, &result);
135 if (ACPI_SUCCESS(status)) {
136 osc_data->support_set = support_set;
137 osc_data->control_query = result;
138 osc_data->is_queried = 1;
139 }
140
141 return status;
142}
143
144/*
145 * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
146 * @flags: Bitmask of flags to support
147 *
148 * See the ACPI spec for the definition of the flags
149 */
150int pci_acpi_osc_support(acpi_handle handle, u32 flags)
151{
152 acpi_status status;
153 acpi_handle tmp;
154 struct acpi_osc_data *osc_data;
155 int rc = 0;
156
157 status = acpi_get_handle(handle, "_OSC", &tmp);
158 if (ACPI_FAILURE(status))
159 return -ENOTTY;
160
161 mutex_lock(&pci_acpi_lock);
162 osc_data = acpi_get_osc_data(handle);
163 if (!osc_data) {
164 printk(KERN_ERR "acpi osc data array is full\n");
165 rc = -ENOMEM;
166 goto out;
167 }
168
169 __acpi_query_osc(flags, osc_data);
170out:
171 mutex_unlock(&pci_acpi_lock);
172 return rc;
173}
174
175/**
176 * pci_osc_control_set - commit requested control to Firmware
177 * @handle: acpi_handle for the target ACPI object
178 * @flags: driver's requested control bits
179 *
180 * Attempt to take control from Firmware on requested control bits.
181 **/
182acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
183{
184 acpi_status status;
185 u32 control_req, control_set, result;
186 acpi_handle tmp;
187 struct acpi_osc_data *osc_data;
188 struct acpi_osc_args osc_args;
189
190 status = acpi_get_handle(handle, "_OSC", &tmp);
191 if (ACPI_FAILURE(status))
192 return status;
193
194 mutex_lock(&pci_acpi_lock);
195 osc_data = acpi_get_osc_data(handle);
196 if (!osc_data) {
197 printk(KERN_ERR "acpi osc data array is full\n");
198 status = AE_ERROR;
199 goto out;
200 }
201
202 control_req = (flags & OSC_CONTROL_MASKS);
203 if (!control_req) {
204 status = AE_TYPE;
205 goto out;
206 }
207
208 /* No need to evaluate _OSC if the control was already granted. */
209 if ((osc_data->control_set & control_req) == control_req)
210 goto out;
211
212 if (!osc_data->is_queried) {
213 status = __acpi_query_osc(osc_data->support_set, osc_data);
214 if (ACPI_FAILURE(status))
215 goto out;
216 }
217
218 if ((osc_data->control_query & control_req) != control_req) {
219 status = AE_SUPPORT;
220 goto out;
221 }
222
223 control_set = osc_data->control_set | control_req;
224 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
225 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
226 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
227 status = acpi_run_osc(handle, &osc_args, &result);
228 if (ACPI_SUCCESS(status))
229 osc_data->control_set = result;
230out:
231 mutex_unlock(&pci_acpi_lock);
232 return status;
233}
234EXPORT_SYMBOL(pci_osc_control_set);
235
236/* 21/*
237 * _SxD returns the D-state with the highest power 22 * _SxD returns the D-state with the highest power
238 * (lowest D-state number) supported in the S-state "x". 23 * (lowest D-state number) supported in the S-state "x".
@@ -386,12 +171,12 @@ static int __init acpi_pci_init(void)
386{ 171{
387 int ret; 172 int ret;
388 173
389 if (acpi_gbl_FADT.boot_flags & BAF_MSI_NOT_SUPPORTED) { 174 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
390 printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); 175 printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n");
391 pci_no_msi(); 176 pci_no_msi();
392 } 177 }
393 178
394 if (acpi_gbl_FADT.boot_flags & BAF_PCIE_ASPM_CONTROL) { 179 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
395 printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 180 printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
396 pcie_no_aspm(); 181 pcie_no_aspm();
397 } 182 }
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 93eac1423585..d76c4c85367e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -99,6 +99,52 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
99} 99}
100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
101 101
102/**
103 * store_remove_id - remove a PCI device ID from this driver
104 * @driver: target device driver
105 * @buf: buffer for scanning device ID data
106 * @count: input size
107 *
108 * Removes a dynamic pci device ID to this driver.
109 */
110static ssize_t
111store_remove_id(struct device_driver *driver, const char *buf, size_t count)
112{
113 struct pci_dynid *dynid, *n;
114 struct pci_driver *pdrv = to_pci_driver(driver);
115 __u32 vendor, device, subvendor = PCI_ANY_ID,
116 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
117 int fields = 0;
118 int retval = -ENODEV;
119
120 fields = sscanf(buf, "%x %x %x %x %x %x",
121 &vendor, &device, &subvendor, &subdevice,
122 &class, &class_mask);
123 if (fields < 2)
124 return -EINVAL;
125
126 spin_lock(&pdrv->dynids.lock);
127 list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
128 struct pci_device_id *id = &dynid->id;
129 if ((id->vendor == vendor) &&
130 (id->device == device) &&
131 (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
132 (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
133 !((id->class ^ class) & class_mask)) {
134 list_del(&dynid->node);
135 kfree(dynid);
136 retval = 0;
137 break;
138 }
139 }
140 spin_unlock(&pdrv->dynids.lock);
141
142 if (retval)
143 return retval;
144 return count;
145}
146static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
147
102static void 148static void
103pci_free_dynids(struct pci_driver *drv) 149pci_free_dynids(struct pci_driver *drv)
104{ 150{
@@ -125,6 +171,20 @@ static void pci_remove_newid_file(struct pci_driver *drv)
125{ 171{
126 driver_remove_file(&drv->driver, &driver_attr_new_id); 172 driver_remove_file(&drv->driver, &driver_attr_new_id);
127} 173}
174
175static int
176pci_create_removeid_file(struct pci_driver *drv)
177{
178 int error = 0;
179 if (drv->probe != NULL)
180 error = driver_create_file(&drv->driver,&driver_attr_remove_id);
181 return error;
182}
183
184static void pci_remove_removeid_file(struct pci_driver *drv)
185{
186 driver_remove_file(&drv->driver, &driver_attr_remove_id);
187}
128#else /* !CONFIG_HOTPLUG */ 188#else /* !CONFIG_HOTPLUG */
129static inline void pci_free_dynids(struct pci_driver *drv) {} 189static inline void pci_free_dynids(struct pci_driver *drv) {}
130static inline int pci_create_newid_file(struct pci_driver *drv) 190static inline int pci_create_newid_file(struct pci_driver *drv)
@@ -132,6 +192,11 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
132 return 0; 192 return 0;
133} 193}
134static inline void pci_remove_newid_file(struct pci_driver *drv) {} 194static inline void pci_remove_newid_file(struct pci_driver *drv) {}
195static inline int pci_create_removeid_file(struct pci_driver *drv)
196{
197 return 0;
198}
199static inline void pci_remove_removeid_file(struct pci_driver *drv) {}
135#endif 200#endif
136 201
137/** 202/**
@@ -212,10 +277,9 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
212 node = dev_to_node(&dev->dev); 277 node = dev_to_node(&dev->dev);
213 if (node >= 0) { 278 if (node >= 0) {
214 int cpu; 279 int cpu;
215 node_to_cpumask_ptr(nodecpumask, node);
216 280
217 get_online_cpus(); 281 get_online_cpus();
218 cpu = cpumask_any_and(nodecpumask, cpu_online_mask); 282 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
219 if (cpu < nr_cpu_ids) 283 if (cpu < nr_cpu_ids)
220 error = work_on_cpu(cpu, local_pci_probe, &ddi); 284 error = work_on_cpu(cpu, local_pci_probe, &ddi);
221 else 285 else
@@ -352,53 +416,60 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
352{ 416{
353 struct pci_dev * pci_dev = to_pci_dev(dev); 417 struct pci_dev * pci_dev = to_pci_dev(dev);
354 struct pci_driver * drv = pci_dev->driver; 418 struct pci_driver * drv = pci_dev->driver;
355 int i = 0; 419
420 pci_dev->state_saved = false;
356 421
357 if (drv && drv->suspend) { 422 if (drv && drv->suspend) {
358 pci_power_t prev = pci_dev->current_state; 423 pci_power_t prev = pci_dev->current_state;
424 int error;
359 425
360 pci_dev->state_saved = false; 426 error = drv->suspend(pci_dev, state);
361 427 suspend_report_result(drv->suspend, error);
362 i = drv->suspend(pci_dev, state); 428 if (error)
363 suspend_report_result(drv->suspend, i); 429 return error;
364 if (i)
365 return i;
366
367 if (pci_dev->state_saved)
368 goto Fixup;
369 430
370 if (pci_dev->current_state != PCI_D0 431 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
371 && pci_dev->current_state != PCI_UNKNOWN) { 432 && pci_dev->current_state != PCI_UNKNOWN) {
372 WARN_ONCE(pci_dev->current_state != prev, 433 WARN_ONCE(pci_dev->current_state != prev,
373 "PCI PM: Device state not saved by %pF\n", 434 "PCI PM: Device state not saved by %pF\n",
374 drv->suspend); 435 drv->suspend);
375 goto Fixup;
376 } 436 }
377 } 437 }
378 438
379 pci_save_state(pci_dev);
380 /*
381 * This is for compatibility with existing code with legacy PM support.
382 */
383 pci_pm_set_unknown_state(pci_dev);
384
385 Fixup:
386 pci_fixup_device(pci_fixup_suspend, pci_dev); 439 pci_fixup_device(pci_fixup_suspend, pci_dev);
387 440
388 return i; 441 return 0;
389} 442}
390 443
391static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) 444static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
392{ 445{
393 struct pci_dev * pci_dev = to_pci_dev(dev); 446 struct pci_dev * pci_dev = to_pci_dev(dev);
394 struct pci_driver * drv = pci_dev->driver; 447 struct pci_driver * drv = pci_dev->driver;
395 int i = 0;
396 448
397 if (drv && drv->suspend_late) { 449 if (drv && drv->suspend_late) {
398 i = drv->suspend_late(pci_dev, state); 450 pci_power_t prev = pci_dev->current_state;
399 suspend_report_result(drv->suspend_late, i); 451 int error;
452
453 error = drv->suspend_late(pci_dev, state);
454 suspend_report_result(drv->suspend_late, error);
455 if (error)
456 return error;
457
458 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
459 && pci_dev->current_state != PCI_UNKNOWN) {
460 WARN_ONCE(pci_dev->current_state != prev,
461 "PCI PM: Device state not saved by %pF\n",
462 drv->suspend_late);
463 return 0;
464 }
400 } 465 }
401 return i; 466
467 if (!pci_dev->state_saved)
468 pci_save_state(pci_dev);
469
470 pci_pm_set_unknown_state(pci_dev);
471
472 return 0;
402} 473}
403 474
404static int pci_legacy_resume_early(struct device *dev) 475static int pci_legacy_resume_early(struct device *dev)
@@ -423,6 +494,23 @@ static int pci_legacy_resume(struct device *dev)
423 494
424/* Auxiliary functions used by the new power management framework */ 495/* Auxiliary functions used by the new power management framework */
425 496
497/**
498 * pci_restore_standard_config - restore standard config registers of PCI device
499 * @pci_dev: PCI device to handle
500 */
501static int pci_restore_standard_config(struct pci_dev *pci_dev)
502{
503 pci_update_current_state(pci_dev, PCI_UNKNOWN);
504
505 if (pci_dev->current_state != PCI_D0) {
506 int error = pci_set_power_state(pci_dev, PCI_D0);
507 if (error)
508 return error;
509 }
510
511 return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0;
512}
513
426static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 514static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
427{ 515{
428 pci_restore_standard_config(pci_dev); 516 pci_restore_standard_config(pci_dev);
@@ -443,7 +531,6 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
443 /* Disable non-bridge devices without PM support */ 531 /* Disable non-bridge devices without PM support */
444 if (!pci_is_bridge(pci_dev)) 532 if (!pci_is_bridge(pci_dev))
445 pci_disable_enabled_device(pci_dev); 533 pci_disable_enabled_device(pci_dev);
446 pci_save_state(pci_dev);
447} 534}
448 535
449static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 536static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
@@ -493,13 +580,13 @@ static int pci_pm_suspend(struct device *dev)
493 if (pci_has_legacy_pm_support(pci_dev)) 580 if (pci_has_legacy_pm_support(pci_dev))
494 return pci_legacy_suspend(dev, PMSG_SUSPEND); 581 return pci_legacy_suspend(dev, PMSG_SUSPEND);
495 582
583 pci_dev->state_saved = false;
584
496 if (!pm) { 585 if (!pm) {
497 pci_pm_default_suspend(pci_dev); 586 pci_pm_default_suspend(pci_dev);
498 goto Fixup; 587 goto Fixup;
499 } 588 }
500 589
501 pci_dev->state_saved = false;
502
503 if (pm->suspend) { 590 if (pm->suspend) {
504 pci_power_t prev = pci_dev->current_state; 591 pci_power_t prev = pci_dev->current_state;
505 int error; 592 int error;
@@ -509,24 +596,14 @@ static int pci_pm_suspend(struct device *dev)
509 if (error) 596 if (error)
510 return error; 597 return error;
511 598
512 if (pci_dev->state_saved) 599 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
513 goto Fixup;
514
515 if (pci_dev->current_state != PCI_D0
516 && pci_dev->current_state != PCI_UNKNOWN) { 600 && pci_dev->current_state != PCI_UNKNOWN) {
517 WARN_ONCE(pci_dev->current_state != prev, 601 WARN_ONCE(pci_dev->current_state != prev,
518 "PCI PM: State of device not saved by %pF\n", 602 "PCI PM: State of device not saved by %pF\n",
519 pm->suspend); 603 pm->suspend);
520 goto Fixup;
521 } 604 }
522 } 605 }
523 606
524 if (!pci_dev->state_saved) {
525 pci_save_state(pci_dev);
526 if (!pci_is_bridge(pci_dev))
527 pci_prepare_to_sleep(pci_dev);
528 }
529
530 Fixup: 607 Fixup:
531 pci_fixup_device(pci_fixup_suspend, pci_dev); 608 pci_fixup_device(pci_fixup_suspend, pci_dev);
532 609
@@ -536,21 +613,43 @@ static int pci_pm_suspend(struct device *dev)
536static int pci_pm_suspend_noirq(struct device *dev) 613static int pci_pm_suspend_noirq(struct device *dev)
537{ 614{
538 struct pci_dev *pci_dev = to_pci_dev(dev); 615 struct pci_dev *pci_dev = to_pci_dev(dev);
539 struct device_driver *drv = dev->driver; 616 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
540 int error = 0;
541 617
542 if (pci_has_legacy_pm_support(pci_dev)) 618 if (pci_has_legacy_pm_support(pci_dev))
543 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 619 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
544 620
545 if (drv && drv->pm && drv->pm->suspend_noirq) { 621 if (!pm) {
546 error = drv->pm->suspend_noirq(dev); 622 pci_save_state(pci_dev);
547 suspend_report_result(drv->pm->suspend_noirq, error); 623 return 0;
548 } 624 }
549 625
550 if (!error) 626 if (pm->suspend_noirq) {
551 pci_pm_set_unknown_state(pci_dev); 627 pci_power_t prev = pci_dev->current_state;
628 int error;
552 629
553 return error; 630 error = pm->suspend_noirq(dev);
631 suspend_report_result(pm->suspend_noirq, error);
632 if (error)
633 return error;
634
635 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
636 && pci_dev->current_state != PCI_UNKNOWN) {
637 WARN_ONCE(pci_dev->current_state != prev,
638 "PCI PM: State of device not saved by %pF\n",
639 pm->suspend_noirq);
640 return 0;
641 }
642 }
643
644 if (!pci_dev->state_saved) {
645 pci_save_state(pci_dev);
646 if (!pci_is_bridge(pci_dev))
647 pci_prepare_to_sleep(pci_dev);
648 }
649
650 pci_pm_set_unknown_state(pci_dev);
651
652 return 0;
554} 653}
555 654
556static int pci_pm_resume_noirq(struct device *dev) 655static int pci_pm_resume_noirq(struct device *dev)
@@ -617,13 +716,13 @@ static int pci_pm_freeze(struct device *dev)
617 if (pci_has_legacy_pm_support(pci_dev)) 716 if (pci_has_legacy_pm_support(pci_dev))
618 return pci_legacy_suspend(dev, PMSG_FREEZE); 717 return pci_legacy_suspend(dev, PMSG_FREEZE);
619 718
719 pci_dev->state_saved = false;
720
620 if (!pm) { 721 if (!pm) {
621 pci_pm_default_suspend(pci_dev); 722 pci_pm_default_suspend(pci_dev);
622 return 0; 723 return 0;
623 } 724 }
624 725
625 pci_dev->state_saved = false;
626
627 if (pm->freeze) { 726 if (pm->freeze) {
628 int error; 727 int error;
629 728
@@ -633,9 +732,6 @@ static int pci_pm_freeze(struct device *dev)
633 return error; 732 return error;
634 } 733 }
635 734
636 if (!pci_dev->state_saved)
637 pci_save_state(pci_dev);
638
639 return 0; 735 return 0;
640} 736}
641 737
@@ -643,20 +739,25 @@ static int pci_pm_freeze_noirq(struct device *dev)
643{ 739{
644 struct pci_dev *pci_dev = to_pci_dev(dev); 740 struct pci_dev *pci_dev = to_pci_dev(dev);
645 struct device_driver *drv = dev->driver; 741 struct device_driver *drv = dev->driver;
646 int error = 0;
647 742
648 if (pci_has_legacy_pm_support(pci_dev)) 743 if (pci_has_legacy_pm_support(pci_dev))
649 return pci_legacy_suspend_late(dev, PMSG_FREEZE); 744 return pci_legacy_suspend_late(dev, PMSG_FREEZE);
650 745
651 if (drv && drv->pm && drv->pm->freeze_noirq) { 746 if (drv && drv->pm && drv->pm->freeze_noirq) {
747 int error;
748
652 error = drv->pm->freeze_noirq(dev); 749 error = drv->pm->freeze_noirq(dev);
653 suspend_report_result(drv->pm->freeze_noirq, error); 750 suspend_report_result(drv->pm->freeze_noirq, error);
751 if (error)
752 return error;
654 } 753 }
655 754
656 if (!error) 755 if (!pci_dev->state_saved)
657 pci_pm_set_unknown_state(pci_dev); 756 pci_save_state(pci_dev);
658 757
659 return error; 758 pci_pm_set_unknown_state(pci_dev);
759
760 return 0;
660} 761}
661 762
662static int pci_pm_thaw_noirq(struct device *dev) 763static int pci_pm_thaw_noirq(struct device *dev)
@@ -699,46 +800,56 @@ static int pci_pm_poweroff(struct device *dev)
699{ 800{
700 struct pci_dev *pci_dev = to_pci_dev(dev); 801 struct pci_dev *pci_dev = to_pci_dev(dev);
701 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 802 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
702 int error = 0;
703 803
704 if (pci_has_legacy_pm_support(pci_dev)) 804 if (pci_has_legacy_pm_support(pci_dev))
705 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 805 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
706 806
807 pci_dev->state_saved = false;
808
707 if (!pm) { 809 if (!pm) {
708 pci_pm_default_suspend(pci_dev); 810 pci_pm_default_suspend(pci_dev);
709 goto Fixup; 811 goto Fixup;
710 } 812 }
711 813
712 pci_dev->state_saved = false;
713
714 if (pm->poweroff) { 814 if (pm->poweroff) {
815 int error;
816
715 error = pm->poweroff(dev); 817 error = pm->poweroff(dev);
716 suspend_report_result(pm->poweroff, error); 818 suspend_report_result(pm->poweroff, error);
819 if (error)
820 return error;
717 } 821 }
718 822
719 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
720 pci_prepare_to_sleep(pci_dev);
721
722 Fixup: 823 Fixup:
723 pci_fixup_device(pci_fixup_suspend, pci_dev); 824 pci_fixup_device(pci_fixup_suspend, pci_dev);
724 825
725 return error; 826 return 0;
726} 827}
727 828
728static int pci_pm_poweroff_noirq(struct device *dev) 829static int pci_pm_poweroff_noirq(struct device *dev)
729{ 830{
831 struct pci_dev *pci_dev = to_pci_dev(dev);
730 struct device_driver *drv = dev->driver; 832 struct device_driver *drv = dev->driver;
731 int error = 0;
732 833
733 if (pci_has_legacy_pm_support(to_pci_dev(dev))) 834 if (pci_has_legacy_pm_support(to_pci_dev(dev)))
734 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 835 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
735 836
736 if (drv && drv->pm && drv->pm->poweroff_noirq) { 837 if (!drv || !drv->pm)
838 return 0;
839
840 if (drv->pm->poweroff_noirq) {
841 int error;
842
737 error = drv->pm->poweroff_noirq(dev); 843 error = drv->pm->poweroff_noirq(dev);
738 suspend_report_result(drv->pm->poweroff_noirq, error); 844 suspend_report_result(drv->pm->poweroff_noirq, error);
845 if (error)
846 return error;
739 } 847 }
740 848
741 return error; 849 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
850 pci_prepare_to_sleep(pci_dev);
851
852 return 0;
742} 853}
743 854
744static int pci_pm_restore_noirq(struct device *dev) 855static int pci_pm_restore_noirq(struct device *dev)
@@ -852,13 +963,23 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
852 /* register with core */ 963 /* register with core */
853 error = driver_register(&drv->driver); 964 error = driver_register(&drv->driver);
854 if (error) 965 if (error)
855 return error; 966 goto out;
856 967
857 error = pci_create_newid_file(drv); 968 error = pci_create_newid_file(drv);
858 if (error) 969 if (error)
859 driver_unregister(&drv->driver); 970 goto out_newid;
860 971
972 error = pci_create_removeid_file(drv);
973 if (error)
974 goto out_removeid;
975out:
861 return error; 976 return error;
977
978out_removeid:
979 pci_remove_newid_file(drv);
980out_newid:
981 driver_unregister(&drv->driver);
982 goto out;
862} 983}
863 984
864/** 985/**
@@ -874,6 +995,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
874void 995void
875pci_unregister_driver(struct pci_driver *drv) 996pci_unregister_driver(struct pci_driver *drv)
876{ 997{
998 pci_remove_removeid_file(drv);
877 pci_remove_newid_file(drv); 999 pci_remove_newid_file(drv);
878 driver_unregister(&drv->driver); 1000 driver_unregister(&drv->driver);
879 pci_free_dynids(drv); 1001 pci_free_dynids(drv);
@@ -973,6 +1095,7 @@ struct bus_type pci_bus_type = {
973 .remove = pci_device_remove, 1095 .remove = pci_device_remove,
974 .shutdown = pci_device_shutdown, 1096 .shutdown = pci_device_shutdown,
975 .dev_attrs = pci_dev_attrs, 1097 .dev_attrs = pci_dev_attrs,
1098 .bus_attrs = pci_bus_attrs,
976 .pm = PCI_PM_OPS_PTR, 1099 .pm = PCI_PM_OPS_PTR,
977}; 1100};
978 1101
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index dfc4e0ddf241..85ebd02a64a7 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -148,7 +148,7 @@ static ssize_t is_enabled_store(struct device *dev,
148 return -EPERM; 148 return -EPERM;
149 149
150 if (!val) { 150 if (!val) {
151 if (atomic_read(&pdev->enable_cnt) != 0) 151 if (pci_is_enabled(pdev))
152 pci_disable_device(pdev); 152 pci_disable_device(pdev);
153 else 153 else
154 result = -EIO; 154 result = -EIO;
@@ -219,6 +219,79 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
219 return count; 219 return count;
220} 220}
221 221
222#ifdef CONFIG_HOTPLUG
223static DEFINE_MUTEX(pci_remove_rescan_mutex);
224static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
225 size_t count)
226{
227 unsigned long val;
228 struct pci_bus *b = NULL;
229
230 if (strict_strtoul(buf, 0, &val) < 0)
231 return -EINVAL;
232
233 if (val) {
234 mutex_lock(&pci_remove_rescan_mutex);
235 while ((b = pci_find_next_bus(b)) != NULL)
236 pci_rescan_bus(b);
237 mutex_unlock(&pci_remove_rescan_mutex);
238 }
239 return count;
240}
241
242struct bus_attribute pci_bus_attrs[] = {
243 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
244 __ATTR_NULL
245};
246
247static ssize_t
248dev_rescan_store(struct device *dev, struct device_attribute *attr,
249 const char *buf, size_t count)
250{
251 unsigned long val;
252 struct pci_dev *pdev = to_pci_dev(dev);
253
254 if (strict_strtoul(buf, 0, &val) < 0)
255 return -EINVAL;
256
257 if (val) {
258 mutex_lock(&pci_remove_rescan_mutex);
259 pci_rescan_bus(pdev->bus);
260 mutex_unlock(&pci_remove_rescan_mutex);
261 }
262 return count;
263}
264
265static void remove_callback(struct device *dev)
266{
267 struct pci_dev *pdev = to_pci_dev(dev);
268
269 mutex_lock(&pci_remove_rescan_mutex);
270 pci_remove_bus_device(pdev);
271 mutex_unlock(&pci_remove_rescan_mutex);
272}
273
274static ssize_t
275remove_store(struct device *dev, struct device_attribute *dummy,
276 const char *buf, size_t count)
277{
278 int ret = 0;
279 unsigned long val;
280
281 if (strict_strtoul(buf, 0, &val) < 0)
282 return -EINVAL;
283
284 /* An attribute cannot be unregistered by one of its own methods,
285 * so we have to use this roundabout approach.
286 */
287 if (val)
288 ret = device_schedule_callback(dev, remove_callback);
289 if (ret)
290 count = ret;
291 return count;
292}
293#endif
294
222struct device_attribute pci_dev_attrs[] = { 295struct device_attribute pci_dev_attrs[] = {
223 __ATTR_RO(resource), 296 __ATTR_RO(resource),
224 __ATTR_RO(vendor), 297 __ATTR_RO(vendor),
@@ -237,10 +310,25 @@ struct device_attribute pci_dev_attrs[] = {
237 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), 310 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
238 broken_parity_status_show,broken_parity_status_store), 311 broken_parity_status_show,broken_parity_status_store),
239 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store), 312 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
313#ifdef CONFIG_HOTPLUG
314 __ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
315 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
316#endif
240 __ATTR_NULL, 317 __ATTR_NULL,
241}; 318};
242 319
243static ssize_t 320static ssize_t
321boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
322{
323 struct pci_dev *pdev = to_pci_dev(dev);
324
325 return sprintf(buf, "%u\n",
326 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
327 IORESOURCE_ROM_SHADOW));
328}
329struct device_attribute vga_attr = __ATTR_RO(boot_vga);
330
331static ssize_t
244pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, 332pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
245 char *buf, loff_t off, size_t count) 333 char *buf, loff_t off, size_t count)
246{ 334{
@@ -404,6 +492,7 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
404/** 492/**
405 * pci_read_legacy_io - read byte(s) from legacy I/O port space 493 * pci_read_legacy_io - read byte(s) from legacy I/O port space
406 * @kobj: kobject corresponding to file to read from 494 * @kobj: kobject corresponding to file to read from
495 * @bin_attr: struct bin_attribute for this file
407 * @buf: buffer to store results 496 * @buf: buffer to store results
408 * @off: offset into legacy I/O port space 497 * @off: offset into legacy I/O port space
409 * @count: number of bytes to read 498 * @count: number of bytes to read
@@ -429,6 +518,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
429/** 518/**
430 * pci_write_legacy_io - write byte(s) to legacy I/O port space 519 * pci_write_legacy_io - write byte(s) to legacy I/O port space
431 * @kobj: kobject corresponding to file to read from 520 * @kobj: kobject corresponding to file to read from
521 * @bin_attr: struct bin_attribute for this file
432 * @buf: buffer containing value to be written 522 * @buf: buffer containing value to be written
433 * @off: offset into legacy I/O port space 523 * @off: offset into legacy I/O port space
434 * @count: number of bytes to write 524 * @count: number of bytes to write
@@ -493,6 +583,19 @@ pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
493} 583}
494 584
495/** 585/**
586 * pci_adjust_legacy_attr - adjustment of legacy file attributes
587 * @b: bus to create files under
588 * @mmap_type: I/O port or memory
589 *
590 * Stub implementation. Can be overridden by arch if necessary.
591 */
592void __weak
593pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
594{
595 return;
596}
597
598/**
496 * pci_create_legacy_files - create legacy I/O port and memory files 599 * pci_create_legacy_files - create legacy I/O port and memory files
497 * @b: bus to create files under 600 * @b: bus to create files under
498 * 601 *
@@ -518,6 +621,7 @@ void pci_create_legacy_files(struct pci_bus *b)
518 b->legacy_io->read = pci_read_legacy_io; 621 b->legacy_io->read = pci_read_legacy_io;
519 b->legacy_io->write = pci_write_legacy_io; 622 b->legacy_io->write = pci_write_legacy_io;
520 b->legacy_io->mmap = pci_mmap_legacy_io; 623 b->legacy_io->mmap = pci_mmap_legacy_io;
624 pci_adjust_legacy_attr(b, pci_mmap_io);
521 error = device_create_bin_file(&b->dev, b->legacy_io); 625 error = device_create_bin_file(&b->dev, b->legacy_io);
522 if (error) 626 if (error)
523 goto legacy_io_err; 627 goto legacy_io_err;
@@ -528,6 +632,7 @@ void pci_create_legacy_files(struct pci_bus *b)
528 b->legacy_mem->size = 1024*1024; 632 b->legacy_mem->size = 1024*1024;
529 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 633 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
530 b->legacy_mem->mmap = pci_mmap_legacy_mem; 634 b->legacy_mem->mmap = pci_mmap_legacy_mem;
635 pci_adjust_legacy_attr(b, pci_mmap_mem);
531 error = device_create_bin_file(&b->dev, b->legacy_mem); 636 error = device_create_bin_file(&b->dev, b->legacy_mem);
532 if (error) 637 if (error)
533 goto legacy_mem_err; 638 goto legacy_mem_err;
@@ -630,9 +735,9 @@ pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
630 735
631/** 736/**
632 * pci_remove_resource_files - cleanup resource files 737 * pci_remove_resource_files - cleanup resource files
633 * @dev: dev to cleanup 738 * @pdev: dev to cleanup
634 * 739 *
635 * If we created resource files for @dev, remove them from sysfs and 740 * If we created resource files for @pdev, remove them from sysfs and
636 * free their resources. 741 * free their resources.
637 */ 742 */
638static void 743static void
@@ -690,9 +795,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
690 795
691/** 796/**
692 * pci_create_resource_files - create resource files in sysfs for @dev 797 * pci_create_resource_files - create resource files in sysfs for @dev
693 * @dev: dev in question 798 * @pdev: dev in question
694 * 799 *
695 * Walk the resources in @dev creating files for each resource available. 800 * Walk the resources in @pdev creating files for each resource available.
696 */ 801 */
697static int pci_create_resource_files(struct pci_dev *pdev) 802static int pci_create_resource_files(struct pci_dev *pdev)
698{ 803{
@@ -719,13 +824,14 @@ static int pci_create_resource_files(struct pci_dev *pdev)
719 return 0; 824 return 0;
720} 825}
721#else /* !HAVE_PCI_MMAP */ 826#else /* !HAVE_PCI_MMAP */
722static inline int pci_create_resource_files(struct pci_dev *dev) { return 0; } 827int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
723static inline void pci_remove_resource_files(struct pci_dev *dev) { return; } 828void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
724#endif /* HAVE_PCI_MMAP */ 829#endif /* HAVE_PCI_MMAP */
725 830
726/** 831/**
727 * pci_write_rom - used to enable access to the PCI ROM display 832 * pci_write_rom - used to enable access to the PCI ROM display
728 * @kobj: kernel object handle 833 * @kobj: kernel object handle
834 * @bin_attr: struct bin_attribute for this file
729 * @buf: user input 835 * @buf: user input
730 * @off: file offset 836 * @off: file offset
731 * @count: number of byte in input 837 * @count: number of byte in input
@@ -749,6 +855,7 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
749/** 855/**
750 * pci_read_rom - read a PCI ROM 856 * pci_read_rom - read a PCI ROM
751 * @kobj: kernel object handle 857 * @kobj: kernel object handle
858 * @bin_attr: struct bin_attribute for this file
752 * @buf: where to put the data we read from the ROM 859 * @buf: where to put the data we read from the ROM
753 * @off: file offset 860 * @off: file offset
754 * @count: number of bytes to read 861 * @count: number of bytes to read
@@ -884,18 +991,27 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
884 pdev->rom_attr = attr; 991 pdev->rom_attr = attr;
885 } 992 }
886 993
994 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
995 retval = device_create_file(&pdev->dev, &vga_attr);
996 if (retval)
997 goto err_rom_file;
998 }
999
887 /* add platform-specific attributes */ 1000 /* add platform-specific attributes */
888 retval = pcibios_add_platform_entries(pdev); 1001 retval = pcibios_add_platform_entries(pdev);
889 if (retval) 1002 if (retval)
890 goto err_rom_file; 1003 goto err_vga_file;
891 1004
892 /* add sysfs entries for various capabilities */ 1005 /* add sysfs entries for various capabilities */
893 retval = pci_create_capabilities_sysfs(pdev); 1006 retval = pci_create_capabilities_sysfs(pdev);
894 if (retval) 1007 if (retval)
895 goto err_rom_file; 1008 goto err_vga_file;
896 1009
897 return 0; 1010 return 0;
898 1011
1012err_vga_file:
1013 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
1014 device_remove_file(&pdev->dev, &vga_attr);
899err_rom_file: 1015err_rom_file:
900 if (rom_size) { 1016 if (rom_size) {
901 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1017 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6d6120007af4..6c93af5ced18 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -20,8 +20,15 @@
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include <linux/device.h>
24#include <asm/setup.h>
23#include "pci.h" 25#include "pci.h"
24 26
27const char *pci_power_names[] = {
28 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
29};
30EXPORT_SYMBOL_GPL(pci_power_names);
31
25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 32unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
26 33
27#ifdef CONFIG_PCI_DOMAINS 34#ifdef CONFIG_PCI_DOMAINS
@@ -426,7 +433,6 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
426 * given PCI device 433 * given PCI device
427 * @dev: PCI device to handle. 434 * @dev: PCI device to handle.
428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 435 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
429 * @wait: If 'true', wait for the device to change its power state
430 * 436 *
431 * RETURN VALUE: 437 * RETURN VALUE:
432 * -EINVAL if the requested state is invalid. 438 * -EINVAL if the requested state is invalid.
@@ -435,12 +441,15 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
435 * 0 if device already is in the requested state. 441 * 0 if device already is in the requested state.
436 * 0 if device's power state has been successfully changed. 442 * 0 if device's power state has been successfully changed.
437 */ 443 */
438static int 444static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
439pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
440{ 445{
441 u16 pmcsr; 446 u16 pmcsr;
442 bool need_restore = false; 447 bool need_restore = false;
443 448
449 /* Check if we're already there */
450 if (dev->current_state == state)
451 return 0;
452
444 if (!dev->pm_cap) 453 if (!dev->pm_cap)
445 return -EIO; 454 return -EIO;
446 455
@@ -451,10 +460,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
451 * Can enter D0 from any state, but if we can only go deeper 460 * Can enter D0 from any state, but if we can only go deeper
452 * to sleep if we're already in a low power state 461 * to sleep if we're already in a low power state
453 */ 462 */
454 if (dev->current_state == state) { 463 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
455 /* we're already there */
456 return 0;
457 } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
458 && dev->current_state > state) { 464 && dev->current_state > state) {
459 dev_err(&dev->dev, "invalid power transition " 465 dev_err(&dev->dev, "invalid power transition "
460 "(from state %d to %d)\n", dev->current_state, state); 466 "(from state %d to %d)\n", dev->current_state, state);
@@ -479,12 +485,12 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
479 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 485 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
480 pmcsr |= state; 486 pmcsr |= state;
481 break; 487 break;
488 case PCI_D3hot:
489 case PCI_D3cold:
482 case PCI_UNKNOWN: /* Boot-up */ 490 case PCI_UNKNOWN: /* Boot-up */
483 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 491 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
484 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) { 492 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
485 need_restore = true; 493 need_restore = true;
486 wait = true;
487 }
488 /* Fall-through: force to D0 */ 494 /* Fall-through: force to D0 */
489 default: 495 default:
490 pmcsr = 0; 496 pmcsr = 0;
@@ -494,9 +500,6 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
494 /* enter specified state */ 500 /* enter specified state */
495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 501 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
496 502
497 if (!wait)
498 return 0;
499
500 /* Mandatory power management transition delays */ 503 /* Mandatory power management transition delays */
501 /* see PCI PM 1.1 5.6.1 table 18 */ 504 /* see PCI PM 1.1 5.6.1 table 18 */
502 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 505 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
@@ -521,7 +524,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
521 if (need_restore) 524 if (need_restore)
522 pci_restore_bars(dev); 525 pci_restore_bars(dev);
523 526
524 if (wait && dev->bus->self) 527 if (dev->bus->self)
525 pcie_aspm_pm_state_change(dev->bus->self); 528 pcie_aspm_pm_state_change(dev->bus->self);
526 529
527 return 0; 530 return 0;
@@ -546,11 +549,59 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
546} 549}
547 550
548/** 551/**
552 * pci_platform_power_transition - Use platform to change device power state
553 * @dev: PCI device to handle.
554 * @state: State to put the device into.
555 */
556static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
557{
558 int error;
559
560 if (platform_pci_power_manageable(dev)) {
561 error = platform_pci_set_power_state(dev, state);
562 if (!error)
563 pci_update_current_state(dev, state);
564 } else {
565 error = -ENODEV;
566 /* Fall back to PCI_D0 if native PM is not supported */
567 if (!dev->pm_cap)
568 dev->current_state = PCI_D0;
569 }
570
571 return error;
572}
573
574/**
575 * __pci_start_power_transition - Start power transition of a PCI device
576 * @dev: PCI device to handle.
577 * @state: State to put the device into.
578 */
579static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
580{
581 if (state == PCI_D0)
582 pci_platform_power_transition(dev, PCI_D0);
583}
584
585/**
586 * __pci_complete_power_transition - Complete power transition of a PCI device
587 * @dev: PCI device to handle.
588 * @state: State to put the device into.
589 *
590 * This function should not be called directly by device drivers.
591 */
592int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
593{
594 return state > PCI_D0 ?
595 pci_platform_power_transition(dev, state) : -EINVAL;
596}
597EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
598
599/**
549 * pci_set_power_state - Set the power state of a PCI device 600 * pci_set_power_state - Set the power state of a PCI device
550 * @dev: PCI device to handle. 601 * @dev: PCI device to handle.
551 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 602 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
552 * 603 *
553 * Transition a device to a new power state, using the platform formware and/or 604 * Transition a device to a new power state, using the platform firmware and/or
554 * the device's PCI PM registers. 605 * the device's PCI PM registers.
555 * 606 *
556 * RETURN VALUE: 607 * RETURN VALUE:
@@ -577,30 +628,21 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
577 */ 628 */
578 return 0; 629 return 0;
579 630
580 if (state == PCI_D0 && platform_pci_power_manageable(dev)) { 631 /* Check if we're already there */
581 /* 632 if (dev->current_state == state)
582 * Allow the platform to change the state, for example via ACPI 633 return 0;
583 * _PR0, _PS0 and some such, but do not trust it. 634
584 */ 635 __pci_start_power_transition(dev, state);
585 int ret = platform_pci_set_power_state(dev, PCI_D0); 636
586 if (!ret)
587 pci_update_current_state(dev, PCI_D0);
588 }
589 /* This device is quirked not to be put into D3, so 637 /* This device is quirked not to be put into D3, so
590 don't put it in D3 */ 638 don't put it in D3 */
591 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 639 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
592 return 0; 640 return 0;
593 641
594 error = pci_raw_set_power_state(dev, state, true); 642 error = pci_raw_set_power_state(dev, state);
595 643
596 if (state > PCI_D0 && platform_pci_power_manageable(dev)) { 644 if (!__pci_complete_power_transition(dev, state))
597 /* Allow the platform to finalize the transition */ 645 error = 0;
598 int ret = platform_pci_set_power_state(dev, state);
599 if (!ret) {
600 pci_update_current_state(dev, state);
601 error = 0;
602 }
603 }
604 646
605 return error; 647 return error;
606} 648}
@@ -645,11 +687,36 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
645 687
646EXPORT_SYMBOL(pci_choose_state); 688EXPORT_SYMBOL(pci_choose_state);
647 689
690#define PCI_EXP_SAVE_REGS 7
691
692#define pcie_cap_has_devctl(type, flags) 1
693#define pcie_cap_has_lnkctl(type, flags) \
694 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
695 (type == PCI_EXP_TYPE_ROOT_PORT || \
696 type == PCI_EXP_TYPE_ENDPOINT || \
697 type == PCI_EXP_TYPE_LEG_END))
698#define pcie_cap_has_sltctl(type, flags) \
699 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
700 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
701 (type == PCI_EXP_TYPE_DOWNSTREAM && \
702 (flags & PCI_EXP_FLAGS_SLOT))))
703#define pcie_cap_has_rtctl(type, flags) \
704 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
705 (type == PCI_EXP_TYPE_ROOT_PORT || \
706 type == PCI_EXP_TYPE_RC_EC))
707#define pcie_cap_has_devctl2(type, flags) \
708 ((flags & PCI_EXP_FLAGS_VERS) > 1)
709#define pcie_cap_has_lnkctl2(type, flags) \
710 ((flags & PCI_EXP_FLAGS_VERS) > 1)
711#define pcie_cap_has_sltctl2(type, flags) \
712 ((flags & PCI_EXP_FLAGS_VERS) > 1)
713
648static int pci_save_pcie_state(struct pci_dev *dev) 714static int pci_save_pcie_state(struct pci_dev *dev)
649{ 715{
650 int pos, i = 0; 716 int pos, i = 0;
651 struct pci_cap_saved_state *save_state; 717 struct pci_cap_saved_state *save_state;
652 u16 *cap; 718 u16 *cap;
719 u16 flags;
653 720
654 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 721 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
655 if (pos <= 0) 722 if (pos <= 0)
@@ -657,15 +724,27 @@ static int pci_save_pcie_state(struct pci_dev *dev)
657 724
658 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 725 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
659 if (!save_state) { 726 if (!save_state) {
660 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 727 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
661 return -ENOMEM; 728 return -ENOMEM;
662 } 729 }
663 cap = (u16 *)&save_state->data[0]; 730 cap = (u16 *)&save_state->data[0];
664 731
665 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 732 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
666 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 733
667 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 734 if (pcie_cap_has_devctl(dev->pcie_type, flags))
668 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 735 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
736 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
737 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
738 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
739 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
740 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
741 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
742 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
743 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
744 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
745 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
746 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
747 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
669 748
670 return 0; 749 return 0;
671} 750}
@@ -675,6 +754,7 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
675 int i = 0, pos; 754 int i = 0, pos;
676 struct pci_cap_saved_state *save_state; 755 struct pci_cap_saved_state *save_state;
677 u16 *cap; 756 u16 *cap;
757 u16 flags;
678 758
679 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 759 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
680 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 760 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
@@ -682,10 +762,22 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
682 return; 762 return;
683 cap = (u16 *)&save_state->data[0]; 763 cap = (u16 *)&save_state->data[0];
684 764
685 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 765 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
686 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 766
687 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 767 if (pcie_cap_has_devctl(dev->pcie_type, flags))
688 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 768 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
769 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
770 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
771 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
772 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
773 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
774 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
775 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
776 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
777 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
778 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
779 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
780 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
689} 781}
690 782
691 783
@@ -700,7 +792,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
700 792
701 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 793 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
702 if (!save_state) { 794 if (!save_state) {
703 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 795 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
704 return -ENOMEM; 796 return -ENOMEM;
705 } 797 }
706 798
@@ -773,6 +865,7 @@ pci_restore_state(struct pci_dev *dev)
773 } 865 }
774 pci_restore_pcix_state(dev); 866 pci_restore_pcix_state(dev);
775 pci_restore_msi_state(dev); 867 pci_restore_msi_state(dev);
868 pci_restore_iov_state(dev);
776 869
777 return 0; 870 return 0;
778} 871}
@@ -801,7 +894,7 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
801 */ 894 */
802int pci_reenable_device(struct pci_dev *dev) 895int pci_reenable_device(struct pci_dev *dev)
803{ 896{
804 if (atomic_read(&dev->enable_cnt)) 897 if (pci_is_enabled(dev))
805 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 898 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
806 return 0; 899 return 0;
807} 900}
@@ -999,7 +1092,7 @@ static void do_pci_disable_device(struct pci_dev *dev)
999 */ 1092 */
1000void pci_disable_enabled_device(struct pci_dev *dev) 1093void pci_disable_enabled_device(struct pci_dev *dev)
1001{ 1094{
1002 if (atomic_read(&dev->enable_cnt)) 1095 if (pci_is_enabled(dev))
1003 do_pci_disable_device(dev); 1096 do_pci_disable_device(dev);
1004} 1097}
1005 1098
@@ -1117,7 +1210,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1117 * Error code depending on the platform is returned if both the platform and 1210 * Error code depending on the platform is returned if both the platform and
1118 * the native mechanism fail to enable the generation of wake-up events 1211 * the native mechanism fail to enable the generation of wake-up events
1119 */ 1212 */
1120int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1213int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1121{ 1214{
1122 int error = 0; 1215 int error = 0;
1123 bool pme_done = false; 1216 bool pme_done = false;
@@ -1196,15 +1289,14 @@ pci_power_t pci_target_state(struct pci_dev *dev)
1196 default: 1289 default:
1197 target_state = state; 1290 target_state = state;
1198 } 1291 }
1292 } else if (!dev->pm_cap) {
1293 target_state = PCI_D0;
1199 } else if (device_may_wakeup(&dev->dev)) { 1294 } else if (device_may_wakeup(&dev->dev)) {
1200 /* 1295 /*
1201 * Find the deepest state from which the device can generate 1296 * Find the deepest state from which the device can generate
1202 * wake-up events, make it the target state and enable device 1297 * wake-up events, make it the target state and enable device
1203 * to generate PME#. 1298 * to generate PME#.
1204 */ 1299 */
1205 if (!dev->pm_cap)
1206 return PCI_POWER_ERROR;
1207
1208 if (dev->pme_support) { 1300 if (dev->pme_support) {
1209 while (target_state 1301 while (target_state
1210 && !(dev->pme_support & (1 << target_state))) 1302 && !(dev->pme_support & (1 << target_state)))
@@ -1231,7 +1323,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1231 if (target_state == PCI_POWER_ERROR) 1323 if (target_state == PCI_POWER_ERROR)
1232 return -EIO; 1324 return -EIO;
1233 1325
1234 pci_enable_wake(dev, target_state, true); 1326 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1235 1327
1236 error = pci_set_power_state(dev, target_state); 1328 error = pci_set_power_state(dev, target_state);
1237 1329
@@ -1369,7 +1461,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1369{ 1461{
1370 int error; 1462 int error;
1371 1463
1372 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); 1464 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1465 PCI_EXP_SAVE_REGS * sizeof(u16));
1373 if (error) 1466 if (error)
1374 dev_err(&dev->dev, 1467 dev_err(&dev->dev,
1375 "unable to preallocate PCI Express save buffer\n"); 1468 "unable to preallocate PCI Express save buffer\n");
@@ -1381,50 +1474,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1381} 1474}
1382 1475
1383/** 1476/**
1384 * pci_restore_standard_config - restore standard config registers of PCI device
1385 * @dev: PCI device to handle
1386 *
1387 * This function assumes that the device's configuration space is accessible.
1388 * If the device needs to be powered up, the function will wait for it to
1389 * change the state.
1390 */
1391int pci_restore_standard_config(struct pci_dev *dev)
1392{
1393 pci_power_t prev_state;
1394 int error;
1395
1396 pci_update_current_state(dev, PCI_D0);
1397
1398 prev_state = dev->current_state;
1399 if (prev_state == PCI_D0)
1400 goto Restore;
1401
1402 error = pci_raw_set_power_state(dev, PCI_D0, false);
1403 if (error)
1404 return error;
1405
1406 /*
1407 * This assumes that we won't get a bus in B2 or B3 from the BIOS, but
1408 * we've made this assumption forever and it appears to be universally
1409 * satisfied.
1410 */
1411 switch(prev_state) {
1412 case PCI_D3cold:
1413 case PCI_D3hot:
1414 mdelay(pci_pm_d3_delay);
1415 break;
1416 case PCI_D2:
1417 udelay(PCI_PM_D2_DELAY);
1418 break;
1419 }
1420
1421 pci_update_current_state(dev, PCI_D0);
1422
1423 Restore:
1424 return dev->state_saved ? pci_restore_state(dev) : 0;
1425}
1426
1427/**
1428 * pci_enable_ari - enable ARI forwarding if hardware support it 1477 * pci_enable_ari - enable ARI forwarding if hardware support it
1429 * @dev: the PCI device 1478 * @dev: the PCI device
1430 */ 1479 */
@@ -1484,7 +1533,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1484 if (!pin) 1533 if (!pin)
1485 return -1; 1534 return -1;
1486 1535
1487 while (dev->bus->self) { 1536 while (!pci_is_root_bus(dev->bus)) {
1488 pin = pci_swizzle_interrupt_pin(dev, pin); 1537 pin = pci_swizzle_interrupt_pin(dev, pin);
1489 dev = dev->bus->self; 1538 dev = dev->bus->self;
1490 } 1539 }
@@ -1504,7 +1553,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1504{ 1553{
1505 u8 pin = *pinp; 1554 u8 pin = *pinp;
1506 1555
1507 while (dev->bus->self) { 1556 while (!pci_is_root_bus(dev->bus)) {
1508 pin = pci_swizzle_interrupt_pin(dev, pin); 1557 pin = pci_swizzle_interrupt_pin(dev, pin);
1509 dev = dev->bus->self; 1558 dev = dev->bus->self;
1510 } 1559 }
@@ -2010,99 +2059,177 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2010EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2059EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2011#endif 2060#endif
2012 2061
2013static int __pcie_flr(struct pci_dev *dev, int probe) 2062static int pcie_flr(struct pci_dev *dev, int probe)
2014{ 2063{
2015 u16 status; 2064 int i;
2065 int pos;
2016 u32 cap; 2066 u32 cap;
2017 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2067 u16 status;
2018 2068
2019 if (!exppos) 2069 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
2070 if (!pos)
2020 return -ENOTTY; 2071 return -ENOTTY;
2021 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 2072
2073 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2022 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2074 if (!(cap & PCI_EXP_DEVCAP_FLR))
2023 return -ENOTTY; 2075 return -ENOTTY;
2024 2076
2025 if (probe) 2077 if (probe)
2026 return 0; 2078 return 0;
2027 2079
2028 pci_block_user_cfg_access(dev);
2029
2030 /* Wait for Transaction Pending bit clean */ 2080 /* Wait for Transaction Pending bit clean */
2031 msleep(100); 2081 for (i = 0; i < 4; i++) {
2032 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2082 if (i)
2033 if (status & PCI_EXP_DEVSTA_TRPND) { 2083 msleep((1 << (i - 1)) * 100);
2034 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2084
2035 "sleeping for 1 second\n"); 2085 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2036 ssleep(1); 2086 if (!(status & PCI_EXP_DEVSTA_TRPND))
2037 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2087 goto clear;
2038 if (status & PCI_EXP_DEVSTA_TRPND)
2039 dev_info(&dev->dev, "Still busy after 1s; "
2040 "proceeding with reset anyway\n");
2041 } 2088 }
2042 2089
2043 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, 2090 dev_err(&dev->dev, "transaction is not cleared; "
2091 "proceeding with reset anyway\n");
2092
2093clear:
2094 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
2044 PCI_EXP_DEVCTL_BCR_FLR); 2095 PCI_EXP_DEVCTL_BCR_FLR);
2045 mdelay(100); 2096 msleep(100);
2046 2097
2047 pci_unblock_user_cfg_access(dev);
2048 return 0; 2098 return 0;
2049} 2099}
2050 2100
2051static int __pci_af_flr(struct pci_dev *dev, int probe) 2101static int pci_af_flr(struct pci_dev *dev, int probe)
2052{ 2102{
2053 int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); 2103 int i;
2054 u8 status; 2104 int pos;
2055 u8 cap; 2105 u8 cap;
2106 u8 status;
2056 2107
2057 if (!cappos) 2108 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2109 if (!pos)
2058 return -ENOTTY; 2110 return -ENOTTY;
2059 pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); 2111
2112 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2060 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2113 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2061 return -ENOTTY; 2114 return -ENOTTY;
2062 2115
2063 if (probe) 2116 if (probe)
2064 return 0; 2117 return 0;
2065 2118
2066 pci_block_user_cfg_access(dev);
2067
2068 /* Wait for Transaction Pending bit clean */ 2119 /* Wait for Transaction Pending bit clean */
2069 msleep(100); 2120 for (i = 0; i < 4; i++) {
2070 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2121 if (i)
2071 if (status & PCI_AF_STATUS_TP) { 2122 msleep((1 << (i - 1)) * 100);
2072 dev_info(&dev->dev, "Busy after 100ms while trying to" 2123
2073 " reset; sleeping for 1 second\n"); 2124 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2074 ssleep(1); 2125 if (!(status & PCI_AF_STATUS_TP))
2075 pci_read_config_byte(dev, 2126 goto clear;
2076 cappos + PCI_AF_STATUS, &status);
2077 if (status & PCI_AF_STATUS_TP)
2078 dev_info(&dev->dev, "Still busy after 1s; "
2079 "proceeding with reset anyway\n");
2080 } 2127 }
2081 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2082 mdelay(100);
2083 2128
2084 pci_unblock_user_cfg_access(dev); 2129 dev_err(&dev->dev, "transaction is not cleared; "
2130 "proceeding with reset anyway\n");
2131
2132clear:
2133 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2134 msleep(100);
2135
2085 return 0; 2136 return 0;
2086} 2137}
2087 2138
2088static int __pci_reset_function(struct pci_dev *pdev, int probe) 2139static int pci_pm_reset(struct pci_dev *dev, int probe)
2089{ 2140{
2090 int res; 2141 u16 csr;
2142
2143 if (!dev->pm_cap)
2144 return -ENOTTY;
2145
2146 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2147 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2148 return -ENOTTY;
2149
2150 if (probe)
2151 return 0;
2091 2152
2092 res = __pcie_flr(pdev, probe); 2153 if (dev->current_state != PCI_D0)
2093 if (res != -ENOTTY) 2154 return -EINVAL;
2094 return res; 2155
2156 csr &= ~PCI_PM_CTRL_STATE_MASK;
2157 csr |= PCI_D3hot;
2158 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2159 msleep(pci_pm_d3_delay);
2095 2160
2096 res = __pci_af_flr(pdev, probe); 2161 csr &= ~PCI_PM_CTRL_STATE_MASK;
2097 if (res != -ENOTTY) 2162 csr |= PCI_D0;
2098 return res; 2163 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2164 msleep(pci_pm_d3_delay);
2099 2165
2100 return res; 2166 return 0;
2167}
2168
2169static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2170{
2171 u16 ctrl;
2172 struct pci_dev *pdev;
2173
2174 if (dev->subordinate)
2175 return -ENOTTY;
2176
2177 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2178 if (pdev != dev)
2179 return -ENOTTY;
2180
2181 if (probe)
2182 return 0;
2183
2184 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2185 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2186 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2187 msleep(100);
2188
2189 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2190 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2191 msleep(100);
2192
2193 return 0;
2194}
2195
2196static int pci_dev_reset(struct pci_dev *dev, int probe)
2197{
2198 int rc;
2199
2200 might_sleep();
2201
2202 if (!probe) {
2203 pci_block_user_cfg_access(dev);
2204 /* block PM suspend, driver probe, etc. */
2205 down(&dev->dev.sem);
2206 }
2207
2208 rc = pcie_flr(dev, probe);
2209 if (rc != -ENOTTY)
2210 goto done;
2211
2212 rc = pci_af_flr(dev, probe);
2213 if (rc != -ENOTTY)
2214 goto done;
2215
2216 rc = pci_pm_reset(dev, probe);
2217 if (rc != -ENOTTY)
2218 goto done;
2219
2220 rc = pci_parent_bus_reset(dev, probe);
2221done:
2222 if (!probe) {
2223 up(&dev->dev.sem);
2224 pci_unblock_user_cfg_access(dev);
2225 }
2226
2227 return rc;
2101} 2228}
2102 2229
2103/** 2230/**
2104 * pci_execute_reset_function() - Reset a PCI device function 2231 * __pci_reset_function - reset a PCI device function
2105 * @dev: Device function to reset 2232 * @dev: PCI device to reset
2106 * 2233 *
2107 * Some devices allow an individual function to be reset without affecting 2234 * Some devices allow an individual function to be reset without affecting
2108 * other functions in the same device. The PCI device must be responsive 2235 * other functions in the same device. The PCI device must be responsive
@@ -2114,18 +2241,18 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
2114 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2241 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2115 * etc. 2242 * etc.
2116 * 2243 *
2117 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2244 * Returns 0 if the device function was successfully reset or negative if the
2118 * device doesn't support resetting a single function. 2245 * device doesn't support resetting a single function.
2119 */ 2246 */
2120int pci_execute_reset_function(struct pci_dev *dev) 2247int __pci_reset_function(struct pci_dev *dev)
2121{ 2248{
2122 return __pci_reset_function(dev, 0); 2249 return pci_dev_reset(dev, 0);
2123} 2250}
2124EXPORT_SYMBOL_GPL(pci_execute_reset_function); 2251EXPORT_SYMBOL_GPL(__pci_reset_function);
2125 2252
2126/** 2253/**
2127 * pci_reset_function() - quiesce and reset a PCI device function 2254 * pci_reset_function - quiesce and reset a PCI device function
2128 * @dev: Device function to reset 2255 * @dev: PCI device to reset
2129 * 2256 *
2130 * Some devices allow an individual function to be reset without affecting 2257 * Some devices allow an individual function to be reset without affecting
2131 * other functions in the same device. The PCI device must be responsive 2258 * other functions in the same device. The PCI device must be responsive
@@ -2133,32 +2260,33 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
2133 * 2260 *
2134 * This function does not just reset the PCI portion of a device, but 2261 * This function does not just reset the PCI portion of a device, but
2135 * clears all the state associated with the device. This function differs 2262 * clears all the state associated with the device. This function differs
2136 * from pci_execute_reset_function in that it saves and restores device state 2263 * from __pci_reset_function in that it saves and restores device state
2137 * over the reset. 2264 * over the reset.
2138 * 2265 *
2139 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2266 * Returns 0 if the device function was successfully reset or negative if the
2140 * device doesn't support resetting a single function. 2267 * device doesn't support resetting a single function.
2141 */ 2268 */
2142int pci_reset_function(struct pci_dev *dev) 2269int pci_reset_function(struct pci_dev *dev)
2143{ 2270{
2144 int r = __pci_reset_function(dev, 1); 2271 int rc;
2145 2272
2146 if (r < 0) 2273 rc = pci_dev_reset(dev, 1);
2147 return r; 2274 if (rc)
2275 return rc;
2148 2276
2149 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
2150 disable_irq(dev->irq);
2151 pci_save_state(dev); 2277 pci_save_state(dev);
2152 2278
2279 /*
2280 * both INTx and MSI are disabled after the Interrupt Disable bit
2281 * is set and the Bus Master bit is cleared.
2282 */
2153 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2283 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2154 2284
2155 r = pci_execute_reset_function(dev); 2285 rc = pci_dev_reset(dev, 0);
2156 2286
2157 pci_restore_state(dev); 2287 pci_restore_state(dev);
2158 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
2159 enable_irq(dev->irq);
2160 2288
2161 return r; 2289 return rc;
2162} 2290}
2163EXPORT_SYMBOL_GPL(pci_reset_function); 2291EXPORT_SYMBOL_GPL(pci_reset_function);
2164 2292
@@ -2346,18 +2474,140 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2346 */ 2474 */
2347int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2475int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2348{ 2476{
2477 int reg;
2478
2349 if (resno < PCI_ROM_RESOURCE) { 2479 if (resno < PCI_ROM_RESOURCE) {
2350 *type = pci_bar_unknown; 2480 *type = pci_bar_unknown;
2351 return PCI_BASE_ADDRESS_0 + 4 * resno; 2481 return PCI_BASE_ADDRESS_0 + 4 * resno;
2352 } else if (resno == PCI_ROM_RESOURCE) { 2482 } else if (resno == PCI_ROM_RESOURCE) {
2353 *type = pci_bar_mem32; 2483 *type = pci_bar_mem32;
2354 return dev->rom_base_reg; 2484 return dev->rom_base_reg;
2485 } else if (resno < PCI_BRIDGE_RESOURCES) {
2486 /* device specific resource */
2487 reg = pci_iov_resource_bar(dev, resno, type);
2488 if (reg)
2489 return reg;
2355 } 2490 }
2356 2491
2357 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2492 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
2358 return 0; 2493 return 0;
2359} 2494}
2360 2495
2496#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2497static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2498spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
2499
2500/**
2501 * pci_specified_resource_alignment - get resource alignment specified by user.
2502 * @dev: the PCI device to get
2503 *
2504 * RETURNS: Resource alignment if it is specified.
2505 * Zero if it is not specified.
2506 */
2507resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2508{
2509 int seg, bus, slot, func, align_order, count;
2510 resource_size_t align = 0;
2511 char *p;
2512
2513 spin_lock(&resource_alignment_lock);
2514 p = resource_alignment_param;
2515 while (*p) {
2516 count = 0;
2517 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2518 p[count] == '@') {
2519 p += count + 1;
2520 } else {
2521 align_order = -1;
2522 }
2523 if (sscanf(p, "%x:%x:%x.%x%n",
2524 &seg, &bus, &slot, &func, &count) != 4) {
2525 seg = 0;
2526 if (sscanf(p, "%x:%x.%x%n",
2527 &bus, &slot, &func, &count) != 3) {
2528 /* Invalid format */
2529 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2530 p);
2531 break;
2532 }
2533 }
2534 p += count;
2535 if (seg == pci_domain_nr(dev->bus) &&
2536 bus == dev->bus->number &&
2537 slot == PCI_SLOT(dev->devfn) &&
2538 func == PCI_FUNC(dev->devfn)) {
2539 if (align_order == -1) {
2540 align = PAGE_SIZE;
2541 } else {
2542 align = 1 << align_order;
2543 }
2544 /* Found */
2545 break;
2546 }
2547 if (*p != ';' && *p != ',') {
2548 /* End of param or invalid format */
2549 break;
2550 }
2551 p++;
2552 }
2553 spin_unlock(&resource_alignment_lock);
2554 return align;
2555}
2556
2557/**
2558 * pci_is_reassigndev - check if specified PCI is target device to reassign
2559 * @dev: the PCI device to check
2560 *
2561 * RETURNS: non-zero for PCI device is a target device to reassign,
2562 * or zero is not.
2563 */
2564int pci_is_reassigndev(struct pci_dev *dev)
2565{
2566 return (pci_specified_resource_alignment(dev) != 0);
2567}
2568
2569ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2570{
2571 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2572 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2573 spin_lock(&resource_alignment_lock);
2574 strncpy(resource_alignment_param, buf, count);
2575 resource_alignment_param[count] = '\0';
2576 spin_unlock(&resource_alignment_lock);
2577 return count;
2578}
2579
2580ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
2581{
2582 size_t count;
2583 spin_lock(&resource_alignment_lock);
2584 count = snprintf(buf, size, "%s", resource_alignment_param);
2585 spin_unlock(&resource_alignment_lock);
2586 return count;
2587}
2588
2589static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
2590{
2591 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
2592}
2593
2594static ssize_t pci_resource_alignment_store(struct bus_type *bus,
2595 const char *buf, size_t count)
2596{
2597 return pci_set_resource_alignment_param(buf, count);
2598}
2599
2600BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
2601 pci_resource_alignment_store);
2602
2603static int __init pci_resource_alignment_sysfs_init(void)
2604{
2605 return bus_create_file(&pci_bus_type,
2606 &bus_attr_resource_alignment);
2607}
2608
2609late_initcall(pci_resource_alignment_sysfs_init);
2610
2361static void __devinit pci_no_domains(void) 2611static void __devinit pci_no_domains(void)
2362{ 2612{
2363#ifdef CONFIG_PCI_DOMAINS 2613#ifdef CONFIG_PCI_DOMAINS
@@ -2406,6 +2656,11 @@ static int __init pci_setup(char *str)
2406 pci_cardbus_io_size = memparse(str + 9, &str); 2656 pci_cardbus_io_size = memparse(str + 9, &str);
2407 } else if (!strncmp(str, "cbmemsize=", 10)) { 2657 } else if (!strncmp(str, "cbmemsize=", 10)) {
2408 pci_cardbus_mem_size = memparse(str + 10, &str); 2658 pci_cardbus_mem_size = memparse(str + 10, &str);
2659 } else if (!strncmp(str, "resource_alignment=", 19)) {
2660 pci_set_resource_alignment_param(str + 19,
2661 strlen(str + 19));
2662 } else if (!strncmp(str, "ecrc=", 5)) {
2663 pcie_ecrc_get_policy(str + 5);
2409 } else { 2664 } else {
2410 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2665 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2411 str); 2666 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 07c0aa5275e6..d03f6b99f292 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,8 @@
1#ifndef DRIVERS_PCI_H 1#ifndef DRIVERS_PCI_H
2#define DRIVERS_PCI_H 2#define DRIVERS_PCI_H
3 3
4#include <linux/workqueue.h>
5
4#define PCI_CFG_SPACE_SIZE 256 6#define PCI_CFG_SPACE_SIZE 256
5#define PCI_CFG_SPACE_EXP_SIZE 4096 7#define PCI_CFG_SPACE_EXP_SIZE 4096
6 8
@@ -49,7 +51,6 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
49extern void pci_pm_init(struct pci_dev *dev); 51extern void pci_pm_init(struct pci_dev *dev);
50extern void platform_pci_wakeup_init(struct pci_dev *dev); 52extern void platform_pci_wakeup_init(struct pci_dev *dev);
51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 53extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
52extern int pci_restore_standard_config(struct pci_dev *dev);
53 54
54static inline bool pci_is_bridge(struct pci_dev *pci_dev) 55static inline bool pci_is_bridge(struct pci_dev *pci_dev)
55{ 56{
@@ -136,6 +137,12 @@ extern int pcie_mch_quirk;
136extern struct device_attribute pci_dev_attrs[]; 137extern struct device_attribute pci_dev_attrs[];
137extern struct device_attribute dev_attr_cpuaffinity; 138extern struct device_attribute dev_attr_cpuaffinity;
138extern struct device_attribute dev_attr_cpulistaffinity; 139extern struct device_attribute dev_attr_cpulistaffinity;
140#ifdef CONFIG_HOTPLUG
141extern struct bus_attribute pci_bus_attrs[];
142#else
143#define pci_bus_attrs NULL
144#endif
145
139 146
140/** 147/**
141 * pci_match_one_device - Tell if a PCI device structure has a matching 148 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -178,6 +185,7 @@ enum pci_bar_type {
178 pci_bar_mem64, /* A 64-bit memory BAR */ 185 pci_bar_mem64, /* A 64-bit memory BAR */
179}; 186};
180 187
188extern int pci_setup_device(struct pci_dev *dev);
181extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 189extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
182 struct resource *res, unsigned int reg); 190 struct resource *res, unsigned int reg);
183extern int pci_resource_bar(struct pci_dev *dev, int resno, 191extern int pci_resource_bar(struct pci_dev *dev, int resno,
@@ -195,4 +203,60 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
195 return bus->self && bus->self->ari_enabled; 203 return bus->self && bus->self->ari_enabled;
196} 204}
197 205
206#ifdef CONFIG_PCI_QUIRKS
207extern int pci_is_reassigndev(struct pci_dev *dev);
208resource_size_t pci_specified_resource_alignment(struct pci_dev *dev);
209extern void pci_disable_bridge_window(struct pci_dev *dev);
210#endif
211
212/* Single Root I/O Virtualization */
213struct pci_sriov {
214 int pos; /* capability position */
215 int nres; /* number of resources */
216 u32 cap; /* SR-IOV Capabilities */
217 u16 ctrl; /* SR-IOV Control */
218 u16 total; /* total VFs associated with the PF */
219 u16 initial; /* initial VFs associated with the PF */
220 u16 nr_virtfn; /* number of VFs available */
221 u16 offset; /* first VF Routing ID offset */
222 u16 stride; /* following VF stride */
223 u32 pgsz; /* page size for BAR alignment */
224 u8 link; /* Function Dependency Link */
225 struct pci_dev *dev; /* lowest numbered PF */
226 struct pci_dev *self; /* this PF */
227 struct mutex lock; /* lock for VF bus */
228 struct work_struct mtask; /* VF Migration task */
229 u8 __iomem *mstate; /* VF Migration State Array */
230};
231
232#ifdef CONFIG_PCI_IOV
233extern int pci_iov_init(struct pci_dev *dev);
234extern void pci_iov_release(struct pci_dev *dev);
235extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
236 enum pci_bar_type *type);
237extern void pci_restore_iov_state(struct pci_dev *dev);
238extern int pci_iov_bus_range(struct pci_bus *bus);
239#else
240static inline int pci_iov_init(struct pci_dev *dev)
241{
242 return -ENODEV;
243}
244static inline void pci_iov_release(struct pci_dev *dev)
245
246{
247}
248static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno,
249 enum pci_bar_type *type)
250{
251 return 0;
252}
253static inline void pci_restore_iov_state(struct pci_dev *dev)
254{
255}
256static inline int pci_iov_bus_range(struct pci_bus *bus)
257{
258 return 0;
259}
260#endif /* CONFIG_PCI_IOV */
261
198#endif /* DRIVERS_PCI_H */ 262#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde588aa13..50e94e02378a 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@ config PCIEAER
10 This enables PCI Express Root Port Advanced Error Reporting 10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) driver support. Error reporting messages sent to Root 11 (AER) driver support. Error reporting messages sent to Root
12 Port will be handled by PCI Express AER driver. 12 Port will be handled by PCI Express AER driver.
13
14
15#
16# PCI Express ECRC
17#
18config PCIE_ECRC
19 bool "PCI Express ECRC settings control"
20 depends on PCIEAER
21 help
22 Used to override firmware/bios settings for PCI Express ECRC
23 (transaction layer end-to-end CRC checking).
24
25 When in doubt, say N.
26
27source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 000000000000..b8c925c1f6aa
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
1#
2# PCI Express Root Port Device AER Debug Configuration
3#
4
5config PCIEAER_INJECT
6 tristate "PCIE AER error injector support"
7 depends on PCIEAER
8 default n
9 help
10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) software error injector.
12
13 Debuging PCIE AER code is quite difficult because it is hard
14 to trigger various real hardware errors. Software based
15 error injection can fake almost all kinds of errors with the
16 help of a user space helper tool aer-inject, which can be
17 gotten from:
18 http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8455a8..2cba67510dc8 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
4 4
5obj-$(CONFIG_PCIEAER) += aerdriver.o 5obj-$(CONFIG_PCIEAER) += aerdriver.o
6 6
7obj-$(CONFIG_PCIE_ECRC) += ecrc.o
8
7aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o 9aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
8aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o 10aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
9 11
12obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 000000000000..d92ae21a59d8
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
1/*
2 * PCIE AER software error injection support.
3 *
4 * Debuging PCIE AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from:
8 * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
9 *
10 * Copyright 2009 Intel Corporation.
11 * Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; version 2
16 * of the License.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/miscdevice.h>
23#include <linux/pci.h>
24#include <linux/fs.h>
25#include <asm/uaccess.h>
26#include "aerdrv.h"
27
28struct aer_error_inj
29{
30 u8 bus;
31 u8 dev;
32 u8 fn;
33 u32 uncor_status;
34 u32 cor_status;
35 u32 header_log0;
36 u32 header_log1;
37 u32 header_log2;
38 u32 header_log3;
39};
40
41struct aer_error
42{
43 struct list_head list;
44 unsigned int bus;
45 unsigned int devfn;
46 int pos_cap_err;
47
48 u32 uncor_status;
49 u32 cor_status;
50 u32 header_log0;
51 u32 header_log1;
52 u32 header_log2;
53 u32 header_log3;
54 u32 root_status;
55 u32 source_id;
56};
57
58struct pci_bus_ops
59{
60 struct list_head list;
61 struct pci_bus *bus;
62 struct pci_ops *ops;
63};
64
65static LIST_HEAD(einjected);
66
67static LIST_HEAD(pci_bus_ops_list);
68
69/* Protect einjected and pci_bus_ops_list */
70static DEFINE_SPINLOCK(inject_lock);
71
72static void aer_error_init(struct aer_error *err, unsigned int bus,
73 unsigned int devfn, int pos_cap_err)
74{
75 INIT_LIST_HEAD(&err->list);
76 err->bus = bus;
77 err->devfn = devfn;
78 err->pos_cap_err = pos_cap_err;
79}
80
81/* inject_lock must be held before calling */
82static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
83{
84 struct aer_error *err;
85
86 list_for_each_entry(err, &einjected, list) {
87 if (bus == err->bus && devfn == err->devfn)
88 return err;
89 }
90 return NULL;
91}
92
93/* inject_lock must be held before calling */
94static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
95{
96 return __find_aer_error(dev->bus->number, dev->devfn);
97}
98
99/* inject_lock must be held before calling */
100static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
101{
102 struct pci_bus_ops *bus_ops;
103
104 list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
105 if (bus_ops->bus == bus)
106 return bus_ops->ops;
107 }
108 return NULL;
109}
110
111static struct pci_bus_ops *pci_bus_ops_pop(void)
112{
113 unsigned long flags;
114 struct pci_bus_ops *bus_ops = NULL;
115
116 spin_lock_irqsave(&inject_lock, flags);
117 if (list_empty(&pci_bus_ops_list))
118 bus_ops = NULL;
119 else {
120 struct list_head *lh = pci_bus_ops_list.next;
121 list_del(lh);
122 bus_ops = list_entry(lh, struct pci_bus_ops, list);
123 }
124 spin_unlock_irqrestore(&inject_lock, flags);
125 return bus_ops;
126}
127
128static u32 *find_pci_config_dword(struct aer_error *err, int where,
129 int *prw1cs)
130{
131 int rw1cs = 0;
132 u32 *target = NULL;
133
134 if (err->pos_cap_err == -1)
135 return NULL;
136
137 switch (where - err->pos_cap_err) {
138 case PCI_ERR_UNCOR_STATUS:
139 target = &err->uncor_status;
140 rw1cs = 1;
141 break;
142 case PCI_ERR_COR_STATUS:
143 target = &err->cor_status;
144 rw1cs = 1;
145 break;
146 case PCI_ERR_HEADER_LOG:
147 target = &err->header_log0;
148 break;
149 case PCI_ERR_HEADER_LOG+4:
150 target = &err->header_log1;
151 break;
152 case PCI_ERR_HEADER_LOG+8:
153 target = &err->header_log2;
154 break;
155 case PCI_ERR_HEADER_LOG+12:
156 target = &err->header_log3;
157 break;
158 case PCI_ERR_ROOT_STATUS:
159 target = &err->root_status;
160 rw1cs = 1;
161 break;
162 case PCI_ERR_ROOT_COR_SRC:
163 target = &err->source_id;
164 break;
165 }
166 if (prw1cs)
167 *prw1cs = rw1cs;
168 return target;
169}
170
171static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
172 int size, u32 *val)
173{
174 u32 *sim;
175 struct aer_error *err;
176 unsigned long flags;
177 struct pci_ops *ops;
178
179 spin_lock_irqsave(&inject_lock, flags);
180 if (size != sizeof(u32))
181 goto out;
182 err = __find_aer_error(bus->number, devfn);
183 if (!err)
184 goto out;
185
186 sim = find_pci_config_dword(err, where, NULL);
187 if (sim) {
188 *val = *sim;
189 spin_unlock_irqrestore(&inject_lock, flags);
190 return 0;
191 }
192out:
193 ops = __find_pci_bus_ops(bus);
194 spin_unlock_irqrestore(&inject_lock, flags);
195 return ops->read(bus, devfn, where, size, val);
196}
197
198int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
199 u32 val)
200{
201 u32 *sim;
202 struct aer_error *err;
203 unsigned long flags;
204 int rw1cs;
205 struct pci_ops *ops;
206
207 spin_lock_irqsave(&inject_lock, flags);
208 if (size != sizeof(u32))
209 goto out;
210 err = __find_aer_error(bus->number, devfn);
211 if (!err)
212 goto out;
213
214 sim = find_pci_config_dword(err, where, &rw1cs);
215 if (sim) {
216 if (rw1cs)
217 *sim ^= val;
218 else
219 *sim = val;
220 spin_unlock_irqrestore(&inject_lock, flags);
221 return 0;
222 }
223out:
224 ops = __find_pci_bus_ops(bus);
225 spin_unlock_irqrestore(&inject_lock, flags);
226 return ops->write(bus, devfn, where, size, val);
227}
228
229static struct pci_ops pci_ops_aer = {
230 .read = pci_read_aer,
231 .write = pci_write_aer,
232};
233
234static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
235 struct pci_bus *bus,
236 struct pci_ops *ops)
237{
238 INIT_LIST_HEAD(&bus_ops->list);
239 bus_ops->bus = bus;
240 bus_ops->ops = ops;
241}
242
243static int pci_bus_set_aer_ops(struct pci_bus *bus)
244{
245 struct pci_ops *ops;
246 struct pci_bus_ops *bus_ops;
247 unsigned long flags;
248
249 bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
250 if (!bus_ops)
251 return -ENOMEM;
252 ops = pci_bus_set_ops(bus, &pci_ops_aer);
253 spin_lock_irqsave(&inject_lock, flags);
254 if (ops == &pci_ops_aer)
255 goto out;
256 pci_bus_ops_init(bus_ops, bus, ops);
257 list_add(&bus_ops->list, &pci_bus_ops_list);
258 bus_ops = NULL;
259out:
260 spin_unlock_irqrestore(&inject_lock, flags);
261 if (bus_ops)
262 kfree(bus_ops);
263 return 0;
264}
265
266static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
267{
268 while (1) {
269 if (!dev->is_pcie)
270 break;
271 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
272 return dev;
273 if (!dev->bus->self)
274 break;
275 dev = dev->bus->self;
276 }
277 return NULL;
278}
279
280static int find_aer_device_iter(struct device *device, void *data)
281{
282 struct pcie_device **result = data;
283 struct pcie_device *pcie_dev;
284
285 if (device->bus == &pcie_port_bus_type) {
286 pcie_dev = to_pcie_device(device);
287 if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
288 *result = pcie_dev;
289 return 1;
290 }
291 }
292 return 0;
293}
294
295static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
296{
297 return device_for_each_child(&dev->dev, result, find_aer_device_iter);
298}
299
300static int aer_inject(struct aer_error_inj *einj)
301{
302 struct aer_error *err, *rperr;
303 struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
304 struct pci_dev *dev, *rpdev;
305 struct pcie_device *edev;
306 unsigned long flags;
307 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
308 int pos_cap_err, rp_pos_cap_err;
309 u32 sever;
310 int ret = 0;
311
312 dev = pci_get_bus_and_slot(einj->bus, devfn);
313 if (!dev)
314 return -EINVAL;
315 rpdev = pcie_find_root_port(dev);
316 if (!rpdev) {
317 ret = -EINVAL;
318 goto out_put;
319 }
320
321 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
322 if (!pos_cap_err) {
323 ret = -EIO;
324 goto out_put;
325 }
326 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
327
328 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
329 if (!rp_pos_cap_err) {
330 ret = -EIO;
331 goto out_put;
332 }
333
334 err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
335 if (!err_alloc) {
336 ret = -ENOMEM;
337 goto out_put;
338 }
339 rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
340 if (!rperr_alloc) {
341 ret = -ENOMEM;
342 goto out_put;
343 }
344
345 spin_lock_irqsave(&inject_lock, flags);
346
347 err = __find_aer_error_by_dev(dev);
348 if (!err) {
349 err = err_alloc;
350 err_alloc = NULL;
351 aer_error_init(err, einj->bus, devfn, pos_cap_err);
352 list_add(&err->list, &einjected);
353 }
354 err->uncor_status |= einj->uncor_status;
355 err->cor_status |= einj->cor_status;
356 err->header_log0 = einj->header_log0;
357 err->header_log1 = einj->header_log1;
358 err->header_log2 = einj->header_log2;
359 err->header_log3 = einj->header_log3;
360
361 rperr = __find_aer_error_by_dev(rpdev);
362 if (!rperr) {
363 rperr = rperr_alloc;
364 rperr_alloc = NULL;
365 aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
366 rp_pos_cap_err);
367 list_add(&rperr->list, &einjected);
368 }
369 if (einj->cor_status) {
370 if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
371 rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
372 else
373 rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
374 rperr->source_id &= 0xffff0000;
375 rperr->source_id |= (einj->bus << 8) | devfn;
376 }
377 if (einj->uncor_status) {
378 if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
379 rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
380 if (sever & einj->uncor_status) {
381 rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
382 if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
383 rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
384 } else
385 rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
386 rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
387 rperr->source_id &= 0x0000ffff;
388 rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
389 }
390 spin_unlock_irqrestore(&inject_lock, flags);
391
392 ret = pci_bus_set_aer_ops(dev->bus);
393 if (ret)
394 goto out_put;
395 ret = pci_bus_set_aer_ops(rpdev->bus);
396 if (ret)
397 goto out_put;
398
399 if (find_aer_device(rpdev, &edev))
400 aer_irq(-1, edev);
401 else
402 ret = -EINVAL;
403out_put:
404 if (err_alloc)
405 kfree(err_alloc);
406 if (rperr_alloc)
407 kfree(rperr_alloc);
408 pci_dev_put(dev);
409 return ret;
410}
411
412static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
413 size_t usize, loff_t *off)
414{
415 struct aer_error_inj einj;
416 int ret;
417
418 if (!capable(CAP_SYS_ADMIN))
419 return -EPERM;
420
421 if (usize != sizeof(struct aer_error_inj))
422 return -EINVAL;
423
424 if (copy_from_user(&einj, ubuf, usize))
425 return -EFAULT;
426
427 ret = aer_inject(&einj);
428 return ret ? ret : usize;
429}
430
431static const struct file_operations aer_inject_fops = {
432 .write = aer_inject_write,
433 .owner = THIS_MODULE,
434};
435
436static struct miscdevice aer_inject_device = {
437 .minor = MISC_DYNAMIC_MINOR,
438 .name = "aer_inject",
439 .fops = &aer_inject_fops,
440};
441
442static int __init aer_inject_init(void)
443{
444 return misc_register(&aer_inject_device);
445}
446
447static void __exit aer_inject_exit(void)
448{
449 struct aer_error *err, *err_next;
450 unsigned long flags;
451 struct pci_bus_ops *bus_ops;
452
453 misc_deregister(&aer_inject_device);
454
455 while ((bus_ops = pci_bus_ops_pop())) {
456 pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
457 kfree(bus_ops);
458 }
459
460 spin_lock_irqsave(&inject_lock, flags);
461 list_for_each_entry_safe(err, err_next,
462 &pci_bus_ops_list, list) {
463 list_del(&err->list);
464 kfree(err);
465 }
466 spin_unlock_irqrestore(&inject_lock, flags);
467}
468
469module_init(aer_inject_init);
470module_exit(aer_inject_exit);
471
472MODULE_DESCRIPTION("PCIE AER software error injector");
473MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index e390707661dd..4770f13b3ca1 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,30 +38,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
38MODULE_DESCRIPTION(DRIVER_DESC); 38MODULE_DESCRIPTION(DRIVER_DESC);
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static int __devinit aer_probe (struct pcie_device *dev, 41static int __devinit aer_probe (struct pcie_device *dev);
42 const struct pcie_port_service_id *id );
43static void aer_remove(struct pcie_device *dev); 42static void aer_remove(struct pcie_device *dev);
44static int aer_suspend(struct pcie_device *dev, pm_message_t state)
45{return 0;}
46static int aer_resume(struct pcie_device *dev) {return 0;}
47static pci_ers_result_t aer_error_detected(struct pci_dev *dev, 43static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
48 enum pci_channel_state error); 44 enum pci_channel_state error);
49static void aer_error_resume(struct pci_dev *dev); 45static void aer_error_resume(struct pci_dev *dev);
50static pci_ers_result_t aer_root_reset(struct pci_dev *dev); 46static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
51 47
52/*
53 * PCI Express bus's AER Root service driver data structure
54 */
55static struct pcie_port_service_id aer_id[] = {
56 {
57 .vendor = PCI_ANY_ID,
58 .device = PCI_ANY_ID,
59 .port_type = PCIE_RC_PORT,
60 .service_type = PCIE_PORT_SERVICE_AER,
61 },
62 { /* end: all zeroes */ }
63};
64
65static struct pci_error_handlers aer_error_handlers = { 48static struct pci_error_handlers aer_error_handlers = {
66 .error_detected = aer_error_detected, 49 .error_detected = aer_error_detected,
67 .resume = aer_error_resume, 50 .resume = aer_error_resume,
@@ -69,14 +52,12 @@ static struct pci_error_handlers aer_error_handlers = {
69 52
70static struct pcie_port_service_driver aerdriver = { 53static struct pcie_port_service_driver aerdriver = {
71 .name = "aer", 54 .name = "aer",
72 .id_table = &aer_id[0], 55 .port_type = PCIE_ANY_PORT,
56 .service = PCIE_PORT_SERVICE_AER,
73 57
74 .probe = aer_probe, 58 .probe = aer_probe,
75 .remove = aer_remove, 59 .remove = aer_remove,
76 60
77 .suspend = aer_suspend,
78 .resume = aer_resume,
79
80 .err_handler = &aer_error_handlers, 61 .err_handler = &aer_error_handlers,
81 62
82 .reset_link = aer_root_reset, 63 .reset_link = aer_root_reset,
@@ -96,7 +77,7 @@ void pci_no_aer(void)
96 * 77 *
97 * Invoked when Root Port detects AER messages. 78 * Invoked when Root Port detects AER messages.
98 **/ 79 **/
99static irqreturn_t aer_irq(int irq, void *context) 80irqreturn_t aer_irq(int irq, void *context)
100{ 81{
101 unsigned int status, id; 82 unsigned int status, id;
102 struct pcie_device *pdev = (struct pcie_device *)context; 83 struct pcie_device *pdev = (struct pcie_device *)context;
@@ -145,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context)
145 126
146 return IRQ_HANDLED; 127 return IRQ_HANDLED;
147} 128}
129EXPORT_SYMBOL_GPL(aer_irq);
148 130
149/** 131/**
150 * aer_alloc_rpc - allocate Root Port data structure 132 * aer_alloc_rpc - allocate Root Port data structure
@@ -207,8 +189,7 @@ static void aer_remove(struct pcie_device *dev)
207 * 189 *
208 * Invoked when PCI Express bus loads AER service driver. 190 * Invoked when PCI Express bus loads AER service driver.
209 **/ 191 **/
210static int __devinit aer_probe (struct pcie_device *dev, 192static int __devinit aer_probe (struct pcie_device *dev)
211 const struct pcie_port_service_id *id )
212{ 193{
213 int status; 194 int status;
214 struct aer_rpc *rpc; 195 struct aer_rpc *rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index c7ad68b6c6d6..bbd7428ca2d0 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/pcieport_if.h> 12#include <linux/pcieport_if.h>
13#include <linux/aer.h> 13#include <linux/aer.h>
14#include <linux/interrupt.h>
14 15
15#define AER_NONFATAL 0 16#define AER_NONFATAL 0
16#define AER_FATAL 1 17#define AER_FATAL 1
@@ -56,7 +57,11 @@ struct header_log_regs {
56 unsigned int dw3; 57 unsigned int dw3;
57}; 58};
58 59
60#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
59struct aer_err_info { 61struct aer_err_info {
62 struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
63 int error_dev_num;
64 u16 id;
60 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ 65 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
61 int flags; 66 int flags;
62 unsigned int status; /* COR/UNCOR Error Status */ 67 unsigned int status; /* COR/UNCOR Error Status */
@@ -95,6 +100,9 @@ struct aer_broadcast_data {
95static inline pci_ers_result_t merge_result(enum pci_ers_result orig, 100static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
96 enum pci_ers_result new) 101 enum pci_ers_result new)
97{ 102{
103 if (new == PCI_ERS_RESULT_NONE)
104 return orig;
105
98 switch (orig) { 106 switch (orig) {
99 case PCI_ERS_RESULT_CAN_RECOVER: 107 case PCI_ERS_RESULT_CAN_RECOVER:
100 case PCI_ERS_RESULT_RECOVERED: 108 case PCI_ERS_RESULT_RECOVERED:
@@ -117,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
117extern int aer_init(struct pcie_device *dev); 125extern int aer_init(struct pcie_device *dev);
118extern void aer_isr(struct work_struct *work); 126extern void aer_isr(struct work_struct *work);
119extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 127extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
128extern irqreturn_t aer_irq(int irq, void *context);
120 129
121#ifdef CONFIG_ACPI 130#ifdef CONFIG_ACPI
122extern int aer_osc_setup(struct pcie_device *pciedev); 131extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index ebce26c37049..8edb2f300e8f 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -38,7 +38,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
38 38
39 handle = acpi_find_root_bridge_handle(pdev); 39 handle = acpi_find_root_bridge_handle(pdev);
40 if (handle) { 40 if (handle) {
41 status = pci_osc_control_set(handle, 41 status = acpi_pci_osc_control_set(handle,
42 OSC_PCI_EXPRESS_AER_CONTROL | 42 OSC_PCI_EXPRESS_AER_CONTROL |
43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
44 } 44 }
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 382575007382..3d8872704a58 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
26#include "aerdrv.h" 26#include "aerdrv.h"
27 27
28static int forceload; 28static int forceload;
29static int nosourceid;
29module_param(forceload, bool, 0); 30module_param(forceload, bool, 0);
31module_param(nosourceid, bool, 0);
30 32
31int pci_enable_pcie_error_reporting(struct pci_dev *dev) 33int pci_enable_pcie_error_reporting(struct pci_dev *dev)
32{ 34{
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
109#endif /* 0 */ 111#endif /* 0 */
110 112
111 113
112static void set_device_error_reporting(struct pci_dev *dev, void *data) 114static int set_device_error_reporting(struct pci_dev *dev, void *data)
113{ 115{
114 bool enable = *((bool *)data); 116 bool enable = *((bool *)data);
115 117
116 if (dev->pcie_type != PCIE_RC_PORT && 118 if (dev->pcie_type == PCIE_RC_PORT ||
117 dev->pcie_type != PCIE_SW_UPSTREAM_PORT && 119 dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
118 dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT) 120 dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
119 return; 121 if (enable)
122 pci_enable_pcie_error_reporting(dev);
123 else
124 pci_disable_pcie_error_reporting(dev);
125 }
120 126
121 if (enable) 127 if (enable)
122 pci_enable_pcie_error_reporting(dev); 128 pcie_set_ecrc_checking(dev);
123 else 129
124 pci_disable_pcie_error_reporting(dev); 130 return 0;
125} 131}
126 132
127/** 133/**
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
139 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); 145 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
140} 146}
141 147
142static int find_device_iter(struct device *device, void *data) 148static inline int compare_device_id(struct pci_dev *dev,
149 struct aer_err_info *e_info)
143{ 150{
144 struct pci_dev *dev; 151 if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
145 u16 id = *(unsigned long *)data; 152 /*
146 u8 secondary, subordinate, d_bus = id >> 8; 153 * Device ID match
154 */
155 return 1;
156 }
147 157
148 if (device->bus == &pci_bus_type) { 158 return 0;
149 dev = to_pci_dev(device); 159}
150 if (id == ((dev->bus->number << 8) | dev->devfn)) { 160
151 /* 161static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
152 * Device ID match 162{
153 */ 163 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
154 *(unsigned long*)data = (unsigned long)device; 164 e_info->dev[e_info->error_dev_num] = dev;
155 return 1; 165 e_info->error_dev_num++;
156 } 166 return 1;
167 } else
168 return 0;
169}
170
171
172#define PCI_BUS(x) (((x) >> 8) & 0xff)
173
174static int find_device_iter(struct pci_dev *dev, void *data)
175{
176 int pos;
177 u32 status;
178 u32 mask;
179 u16 reg16;
180 int result;
181 struct aer_err_info *e_info = (struct aer_err_info *)data;
182
183 /*
184 * When bus id is equal to 0, it might be a bad id
185 * reported by root port.
186 */
187 if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
188 result = compare_device_id(dev, e_info);
189 if (result)
190 add_error_device(e_info, dev);
157 191
158 /* 192 /*
159 * If device is P2P, check if it is an upstream? 193 * If there is no multiple error, we stop
194 * or continue based on the id comparing.
160 */ 195 */
161 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { 196 if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
162 pci_read_config_byte(dev, PCI_SECONDARY_BUS, 197 return result;
163 &secondary); 198
164 pci_read_config_byte(dev, PCI_SUBORDINATE_BUS, 199 /*
165 &subordinate); 200 * If there are multiple errors and id does match,
166 if (d_bus >= secondary && d_bus <= subordinate) { 201 * We need continue to search other devices under
167 *(unsigned long*)data = (unsigned long)device; 202 * the root port. Return 0 means that.
168 return 1; 203 */
169 } 204 if (result)
205 return 0;
206 }
207
208 /*
209 * When either
210 * 1) nosourceid==y;
211 * 2) bus id is equal to 0. Some ports might lose the bus
212 * id of error source id;
213 * 3) There are multiple errors and prior id comparing fails;
214 * We check AER status registers to find the initial reporter.
215 */
216 if (atomic_read(&dev->enable_cnt) == 0)
217 return 0;
218 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
219 if (!pos)
220 return 0;
221 /* Check if AER is enabled */
222 pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
223 if (!(reg16 & (
224 PCI_EXP_DEVCTL_CERE |
225 PCI_EXP_DEVCTL_NFERE |
226 PCI_EXP_DEVCTL_FERE |
227 PCI_EXP_DEVCTL_URRE)))
228 return 0;
229 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
230 if (!pos)
231 return 0;
232
233 status = 0;
234 mask = 0;
235 if (e_info->severity == AER_CORRECTABLE) {
236 pci_read_config_dword(dev,
237 pos + PCI_ERR_COR_STATUS,
238 &status);
239 pci_read_config_dword(dev,
240 pos + PCI_ERR_COR_MASK,
241 &mask);
242 if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
243 add_error_device(e_info, dev);
244 goto added;
245 }
246 } else {
247 pci_read_config_dword(dev,
248 pos + PCI_ERR_UNCOR_STATUS,
249 &status);
250 pci_read_config_dword(dev,
251 pos + PCI_ERR_UNCOR_MASK,
252 &mask);
253 if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
254 add_error_device(e_info, dev);
255 goto added;
170 } 256 }
171 } 257 }
172 258
173 return 0; 259 return 0;
260
261added:
262 if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
263 return 0;
264 else
265 return 1;
174} 266}
175 267
176/** 268/**
177 * find_source_device - search through device hierarchy for source device 269 * find_source_device - search through device hierarchy for source device
178 * @parent: pointer to Root Port pci_dev data structure 270 * @parent: pointer to Root Port pci_dev data structure
179 * @id: device ID of agent who sends an error message to this Root Port 271 * @err_info: including detailed error information such like id
180 * 272 *
181 * Invoked when error is detected at the Root Port. 273 * Invoked when error is detected at the Root Port.
182 */ 274 */
183static struct device* find_source_device(struct pci_dev *parent, u16 id) 275static void find_source_device(struct pci_dev *parent,
276 struct aer_err_info *e_info)
184{ 277{
185 struct pci_dev *dev = parent; 278 struct pci_dev *dev = parent;
186 struct device *device; 279 int result;
187 unsigned long device_addr;
188 int status;
189 280
190 /* Is Root Port an agent that sends error message? */ 281 /* Is Root Port an agent that sends error message? */
191 if (id == ((dev->bus->number << 8) | dev->devfn)) 282 result = find_device_iter(dev, e_info);
192 return &dev->dev; 283 if (result)
193 284 return;
194 do {
195 device_addr = id;
196 if ((status = device_for_each_child(&dev->dev,
197 &device_addr, find_device_iter))) {
198 device = (struct device*)device_addr;
199 dev = to_pci_dev(device);
200 if (id == ((dev->bus->number << 8) | dev->devfn))
201 return device;
202 }
203 }while (status);
204 285
205 return NULL; 286 pci_walk_bus(parent->subordinate, find_device_iter, e_info);
206} 287}
207 288
208static void report_error_detected(struct pci_dev *dev, void *data) 289static int report_error_detected(struct pci_dev *dev, void *data)
209{ 290{
210 pci_ers_result_t vote; 291 pci_ers_result_t vote;
211 struct pci_error_handlers *err_handler; 292 struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data)
230 dev->driver ? 311 dev->driver ?
231 "no AER-aware driver" : "no driver"); 312 "no AER-aware driver" : "no driver");
232 } 313 }
233 return; 314 return 0;
234 } 315 }
235 316
236 err_handler = dev->driver->err_handler; 317 err_handler = dev->driver->err_handler;
237 vote = err_handler->error_detected(dev, result_data->state); 318 vote = err_handler->error_detected(dev, result_data->state);
238 result_data->result = merge_result(result_data->result, vote); 319 result_data->result = merge_result(result_data->result, vote);
239 return; 320 return 0;
240} 321}
241 322
242static void report_mmio_enabled(struct pci_dev *dev, void *data) 323static int report_mmio_enabled(struct pci_dev *dev, void *data)
243{ 324{
244 pci_ers_result_t vote; 325 pci_ers_result_t vote;
245 struct pci_error_handlers *err_handler; 326 struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data)
249 if (!dev->driver || 330 if (!dev->driver ||
250 !dev->driver->err_handler || 331 !dev->driver->err_handler ||
251 !dev->driver->err_handler->mmio_enabled) 332 !dev->driver->err_handler->mmio_enabled)
252 return; 333 return 0;
253 334
254 err_handler = dev->driver->err_handler; 335 err_handler = dev->driver->err_handler;
255 vote = err_handler->mmio_enabled(dev); 336 vote = err_handler->mmio_enabled(dev);
256 result_data->result = merge_result(result_data->result, vote); 337 result_data->result = merge_result(result_data->result, vote);
257 return; 338 return 0;
258} 339}
259 340
260static void report_slot_reset(struct pci_dev *dev, void *data) 341static int report_slot_reset(struct pci_dev *dev, void *data)
261{ 342{
262 pci_ers_result_t vote; 343 pci_ers_result_t vote;
263 struct pci_error_handlers *err_handler; 344 struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data)
267 if (!dev->driver || 348 if (!dev->driver ||
268 !dev->driver->err_handler || 349 !dev->driver->err_handler ||
269 !dev->driver->err_handler->slot_reset) 350 !dev->driver->err_handler->slot_reset)
270 return; 351 return 0;
271 352
272 err_handler = dev->driver->err_handler; 353 err_handler = dev->driver->err_handler;
273 vote = err_handler->slot_reset(dev); 354 vote = err_handler->slot_reset(dev);
274 result_data->result = merge_result(result_data->result, vote); 355 result_data->result = merge_result(result_data->result, vote);
275 return; 356 return 0;
276} 357}
277 358
278static void report_resume(struct pci_dev *dev, void *data) 359static int report_resume(struct pci_dev *dev, void *data)
279{ 360{
280 struct pci_error_handlers *err_handler; 361 struct pci_error_handlers *err_handler;
281 362
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data)
284 if (!dev->driver || 365 if (!dev->driver ||
285 !dev->driver->err_handler || 366 !dev->driver->err_handler ||
286 !dev->driver->err_handler->resume) 367 !dev->driver->err_handler->resume)
287 return; 368 return 0;
288 369
289 err_handler = dev->driver->err_handler; 370 err_handler = dev->driver->err_handler;
290 err_handler->resume(dev); 371 err_handler->resume(dev);
291 return; 372 return 0;
292} 373}
293 374
294/** 375/**
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data)
305static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, 386static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
306 enum pci_channel_state state, 387 enum pci_channel_state state,
307 char *error_mesg, 388 char *error_mesg,
308 void (*cb)(struct pci_dev *, void *)) 389 int (*cb)(struct pci_dev *, void *))
309{ 390{
310 struct aer_broadcast_data result_data; 391 struct aer_broadcast_data result_data;
311 392
@@ -351,21 +432,21 @@ static int find_aer_service_iter(struct device *device, void *data)
351{ 432{
352 struct device_driver *driver; 433 struct device_driver *driver;
353 struct pcie_port_service_driver *service_driver; 434 struct pcie_port_service_driver *service_driver;
354 struct pcie_device *pcie_dev;
355 struct find_aer_service_data *result; 435 struct find_aer_service_data *result;
356 436
357 result = (struct find_aer_service_data *) data; 437 result = (struct find_aer_service_data *) data;
358 438
359 if (device->bus == &pcie_port_bus_type) { 439 if (device->bus == &pcie_port_bus_type) {
360 pcie_dev = to_pcie_device(device); 440 struct pcie_port_data *port_data;
361 if (pcie_dev->id.port_type == PCIE_SW_DOWNSTREAM_PORT) 441
442 port_data = pci_get_drvdata(to_pcie_device(device)->port);
443 if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
362 result->is_downstream = 1; 444 result->is_downstream = 1;
363 445
364 driver = device->driver; 446 driver = device->driver;
365 if (driver) { 447 if (driver) {
366 service_driver = to_service_driver(driver); 448 service_driver = to_service_driver(driver);
367 if (service_driver->id_table->service_type == 449 if (service_driver->service == PCIE_PORT_SERVICE_AER) {
368 PCIE_PORT_SERVICE_AER) {
369 result->aer_driver = service_driver; 450 result->aer_driver = service_driver;
370 return 1; 451 return 1;
371 } 452 }
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
497 */ 578 */
498static void handle_error_source(struct pcie_device * aerdev, 579static void handle_error_source(struct pcie_device * aerdev,
499 struct pci_dev *dev, 580 struct pci_dev *dev,
500 struct aer_err_info info) 581 struct aer_err_info *info)
501{ 582{
502 pci_ers_result_t status = 0; 583 pci_ers_result_t status = 0;
503 int pos; 584 int pos;
504 585
505 if (info.severity == AER_CORRECTABLE) { 586 if (info->severity == AER_CORRECTABLE) {
506 /* 587 /*
507 * Correctable error does not need software intevention. 588 * Correctable error does not need software intevention.
508 * No need to go through error recovery process. 589 * No need to go through error recovery process.
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev,
510 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 591 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
511 if (pos) 592 if (pos)
512 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 593 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
513 info.status); 594 info->status);
514 } else { 595 } else {
515 status = do_recovery(aerdev, dev, info.severity); 596 status = do_recovery(aerdev, dev, info->severity);
516 if (status == PCI_ERS_RESULT_RECOVERED) { 597 if (status == PCI_ERS_RESULT_RECOVERED) {
517 dev_printk(KERN_DEBUG, &dev->dev, "AER driver " 598 dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
518 "successfully recovered\n"); 599 "successfully recovered\n");
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
661 return AER_SUCCESS; 742 return AER_SUCCESS;
662} 743}
663 744
745static inline void aer_process_err_devices(struct pcie_device *p_device,
746 struct aer_err_info *e_info)
747{
748 int i;
749
750 if (!e_info->dev[0]) {
751 dev_printk(KERN_DEBUG, &p_device->port->dev,
752 "can't find device of ID%04x\n",
753 e_info->id);
754 }
755
756 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
757 if (get_device_error_info(e_info->dev[i], e_info) ==
758 AER_SUCCESS) {
759 aer_print_error(e_info->dev[i], e_info);
760 handle_error_source(p_device,
761 e_info->dev[i],
762 e_info);
763 }
764 }
765}
766
664/** 767/**
665 * aer_isr_one_error - consume an error detected by root port 768 * aer_isr_one_error - consume an error detected by root port
666 * @p_device: pointer to error root port service device 769 * @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
669static void aer_isr_one_error(struct pcie_device *p_device, 772static void aer_isr_one_error(struct pcie_device *p_device,
670 struct aer_err_source *e_src) 773 struct aer_err_source *e_src)
671{ 774{
672 struct device *s_device; 775 struct aer_err_info *e_info;
673 struct aer_err_info e_info = {0, 0, 0,};
674 int i; 776 int i;
675 u16 id; 777
778 /* struct aer_err_info might be big, so we allocate it with slab */
779 e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
780 if (e_info == NULL) {
781 dev_printk(KERN_DEBUG, &p_device->port->dev,
782 "Can't allocate mem when processing AER errors\n");
783 return;
784 }
676 785
677 /* 786 /*
678 * There is a possibility that both correctable error and 787 * There is a possibility that both correctable error and
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device,
684 if (!(e_src->status & i)) 793 if (!(e_src->status & i))
685 continue; 794 continue;
686 795
796 memset(e_info, 0, sizeof(struct aer_err_info));
797
687 /* Init comprehensive error information */ 798 /* Init comprehensive error information */
688 if (i & PCI_ERR_ROOT_COR_RCV) { 799 if (i & PCI_ERR_ROOT_COR_RCV) {
689 id = ERR_COR_ID(e_src->id); 800 e_info->id = ERR_COR_ID(e_src->id);
690 e_info.severity = AER_CORRECTABLE; 801 e_info->severity = AER_CORRECTABLE;
691 } else { 802 } else {
692 id = ERR_UNCOR_ID(e_src->id); 803 e_info->id = ERR_UNCOR_ID(e_src->id);
693 e_info.severity = ((e_src->status >> 6) & 1); 804 e_info->severity = ((e_src->status >> 6) & 1);
694 } 805 }
695 if (e_src->status & 806 if (e_src->status &
696 (PCI_ERR_ROOT_MULTI_COR_RCV | 807 (PCI_ERR_ROOT_MULTI_COR_RCV |
697 PCI_ERR_ROOT_MULTI_UNCOR_RCV)) 808 PCI_ERR_ROOT_MULTI_UNCOR_RCV))
698 e_info.flags |= AER_MULTI_ERROR_VALID_FLAG; 809 e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
699 if (!(s_device = find_source_device(p_device->port, id))) { 810
700 printk(KERN_DEBUG "%s->can't find device of ID%04x\n", 811 find_source_device(p_device->port, e_info);
701 __func__, id); 812 aer_process_err_devices(p_device, e_info);
702 continue;
703 }
704 if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
705 AER_SUCCESS) {
706 aer_print_error(to_pci_dev(s_device), &e_info);
707 handle_error_source(p_device,
708 to_pci_dev(s_device),
709 e_info);
710 }
711 } 813 }
814
815 kfree(e_info);
712} 816}
713 817
714/** 818/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 000000000000..ece97df4df6d
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
1/*
2 * Enables/disables PCIe ECRC checking.
3 *
4 * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
5 * Andrew Patterson <andrew.patterson@hp.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307, USA.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/pci.h>
27#include <linux/pci_regs.h>
28#include <linux/errno.h>
29#include "../../pci.h"
30
31#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
32#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
33#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
34
35static int ecrc_policy = ECRC_POLICY_DEFAULT;
36
37static const char *ecrc_policy_str[] = {
38 [ECRC_POLICY_DEFAULT] = "bios",
39 [ECRC_POLICY_OFF] = "off",
40 [ECRC_POLICY_ON] = "on"
41};
42
43/**
44 * enable_ercr_checking - enable PCIe ECRC checking for a device
45 * @dev: the PCI device
46 *
47 * Returns 0 on success, or negative on failure.
48 */
49static int enable_ecrc_checking(struct pci_dev *dev)
50{
51 int pos;
52 u32 reg32;
53
54 if (!dev->is_pcie)
55 return -ENODEV;
56
57 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
58 if (!pos)
59 return -ENODEV;
60
61 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
62 if (reg32 & PCI_ERR_CAP_ECRC_GENC)
63 reg32 |= PCI_ERR_CAP_ECRC_GENE;
64 if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
65 reg32 |= PCI_ERR_CAP_ECRC_CHKE;
66 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
67
68 return 0;
69}
70
71/**
72 * disable_ercr_checking - disables PCIe ECRC checking for a device
73 * @dev: the PCI device
74 *
75 * Returns 0 on success, or negative on failure.
76 */
77static int disable_ecrc_checking(struct pci_dev *dev)
78{
79 int pos;
80 u32 reg32;
81
82 if (!dev->is_pcie)
83 return -ENODEV;
84
85 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
86 if (!pos)
87 return -ENODEV;
88
89 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
90 reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
91 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
92
93 return 0;
94}
95
96/**
97 * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
98 * @dev: the PCI device
99 */
100void pcie_set_ecrc_checking(struct pci_dev *dev)
101{
102 switch (ecrc_policy) {
103 case ECRC_POLICY_DEFAULT:
104 return;
105 case ECRC_POLICY_OFF:
106 disable_ecrc_checking(dev);
107 break;
108 case ECRC_POLICY_ON:
109 enable_ecrc_checking(dev);;
110 break;
111 default:
112 return;
113 }
114}
115
116/**
117 * pcie_ecrc_get_policy - parse kernel command-line ecrc option
118 */
119void pcie_ecrc_get_policy(char *str)
120{
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
124 if (!strncmp(str, ecrc_policy_str[i],
125 strlen(ecrc_policy_str[i])))
126 break;
127 if (i >= ARRAY_SIZE(ecrc_policy_str))
128 return;
129
130 ecrc_policy = i;
131}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f168af4..3d27c97e0486 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
26#endif 26#endif
27#define MODULE_PARAM_PREFIX "pcie_aspm." 27#define MODULE_PARAM_PREFIX "pcie_aspm."
28 28
29struct endpoint_state { 29struct aspm_latency {
30 unsigned int l0s_acceptable_latency; 30 u32 l0s; /* L0s latency (nsec) */
31 unsigned int l1_acceptable_latency; 31 u32 l1; /* L1 latency (nsec) */
32}; 32};
33 33
34struct pcie_link_state { 34struct pcie_link_state {
35 struct list_head sibiling; 35 struct pci_dev *pdev; /* Upstream component of the Link */
36 struct pci_dev *pdev; 36 struct pcie_link_state *root; /* pointer to the root port link */
37 bool downstream_has_switch; 37 struct pcie_link_state *parent; /* pointer to the parent Link state */
38 38 struct list_head sibling; /* node in link_list */
39 struct pcie_link_state *parent; 39 struct list_head children; /* list of child link states */
40 struct list_head children; 40 struct list_head link; /* node in parent's children list */
41 struct list_head link;
42 41
43 /* ASPM state */ 42 /* ASPM state */
44 unsigned int support_state; 43 u32 aspm_support:2; /* Supported ASPM state */
45 unsigned int enabled_state; 44 u32 aspm_enabled:2; /* Enabled ASPM state */
46 unsigned int bios_aspm_state; 45 u32 aspm_default:2; /* Default ASPM state by BIOS */
47 /* upstream component */ 46
48 unsigned int l0s_upper_latency; 47 /* Clock PM state */
49 unsigned int l1_upper_latency; 48 u32 clkpm_capable:1; /* Clock PM capable? */
50 /* downstream component */ 49 u32 clkpm_enabled:1; /* Current Clock PM state */
51 unsigned int l0s_down_latency; 50 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
52 unsigned int l1_down_latency;
53 /* Clock PM state*/
54 unsigned int clk_pm_capable;
55 unsigned int clk_pm_enabled;
56 unsigned int bios_clk_state;
57 51
52 /* Latencies */
53 struct aspm_latency latency; /* Exit latency */
58 /* 54 /*
59 * A pcie downstream port only has one slot under it, so at most there 55 * Endpoint acceptable latencies. A pcie downstream port only
60 * are 8 functions 56 * has one slot under it, so at most there are 8 functions.
61 */ 57 */
62 struct endpoint_state endpoints[8]; 58 struct aspm_latency acceptable[8];
63}; 59};
64 60
65static int aspm_disabled, aspm_force; 61static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@ static const char *policy_str[] = {
78 74
79#define LINK_RETRAIN_TIMEOUT HZ 75#define LINK_RETRAIN_TIMEOUT HZ
80 76
81static int policy_to_aspm_state(struct pci_dev *pdev) 77static int policy_to_aspm_state(struct pcie_link_state *link)
82{ 78{
83 struct pcie_link_state *link_state = pdev->link_state;
84
85 switch (aspm_policy) { 79 switch (aspm_policy) {
86 case POLICY_PERFORMANCE: 80 case POLICY_PERFORMANCE:
87 /* Disable ASPM and Clock PM */ 81 /* Disable ASPM and Clock PM */
88 return 0; 82 return 0;
89 case POLICY_POWERSAVE: 83 case POLICY_POWERSAVE:
90 /* Enable ASPM L0s/L1 */ 84 /* Enable ASPM L0s/L1 */
91 return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 85 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
92 case POLICY_DEFAULT: 86 case POLICY_DEFAULT:
93 return link_state->bios_aspm_state; 87 return link->aspm_default;
94 } 88 }
95 return 0; 89 return 0;
96} 90}
97 91
98static int policy_to_clkpm_state(struct pci_dev *pdev) 92static int policy_to_clkpm_state(struct pcie_link_state *link)
99{ 93{
100 struct pcie_link_state *link_state = pdev->link_state;
101
102 switch (aspm_policy) { 94 switch (aspm_policy) {
103 case POLICY_PERFORMANCE: 95 case POLICY_PERFORMANCE:
104 /* Disable ASPM and Clock PM */ 96 /* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev)
107 /* Disable Clock PM */ 99 /* Disable Clock PM */
108 return 1; 100 return 1;
109 case POLICY_DEFAULT: 101 case POLICY_DEFAULT:
110 return link_state->bios_clk_state; 102 return link->clkpm_default;
111 } 103 }
112 return 0; 104 return 0;
113} 105}
114 106
115static void pcie_set_clock_pm(struct pci_dev *pdev, int enable) 107static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
116{ 108{
117 struct pci_dev *child_dev;
118 int pos; 109 int pos;
119 u16 reg16; 110 u16 reg16;
120 struct pcie_link_state *link_state = pdev->link_state; 111 struct pci_dev *child;
112 struct pci_bus *linkbus = link->pdev->subordinate;
121 113
122 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 114 list_for_each_entry(child, &linkbus->devices, bus_list) {
123 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 115 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
124 if (!pos) 116 if (!pos)
125 return; 117 return;
126 pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16); 118 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
127 if (enable) 119 if (enable)
128 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; 120 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
129 else 121 else
130 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 122 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
131 pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16); 123 pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
132 } 124 }
133 link_state->clk_pm_enabled = !!enable; 125 link->clkpm_enabled = !!enable;
134} 126}
135 127
136static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) 128static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
137{ 129{
138 int pos; 130 /* Don't enable Clock PM if the link is not Clock PM capable */
131 if (!link->clkpm_capable && enable)
132 return;
133 /* Need nothing if the specified equals to current state */
134 if (link->clkpm_enabled == enable)
135 return;
136 pcie_set_clkpm_nocheck(link, enable);
137}
138
139static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
140{
141 int pos, capable = 1, enabled = 1;
139 u32 reg32; 142 u32 reg32;
140 u16 reg16; 143 u16 reg16;
141 int capable = 1, enabled = 1; 144 struct pci_dev *child;
142 struct pci_dev *child_dev; 145 struct pci_bus *linkbus = link->pdev->subordinate;
143 struct pcie_link_state *link_state = pdev->link_state;
144 146
145 /* All functions should have the same cap and state, take the worst */ 147 /* All functions should have the same cap and state, take the worst */
146 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 148 list_for_each_entry(child, &linkbus->devices, bus_list) {
147 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 149 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
148 if (!pos) 150 if (!pos)
149 return; 151 return;
150 pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32); 152 pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
151 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 153 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
152 capable = 0; 154 capable = 0;
153 enabled = 0; 155 enabled = 0;
154 break; 156 break;
155 } 157 }
156 pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16); 158 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
157 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 159 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
158 enabled = 0; 160 enabled = 0;
159 } 161 }
160 link_state->clk_pm_enabled = enabled; 162 link->clkpm_enabled = enabled;
161 link_state->bios_clk_state = enabled; 163 link->clkpm_default = enabled;
162 if (!blacklist) { 164 link->clkpm_capable = (blacklist) ? 0 : capable;
163 link_state->clk_pm_capable = capable;
164 pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
165 } else {
166 link_state->clk_pm_capable = 0;
167 pcie_set_clock_pm(pdev, 0);
168 }
169} 165}
170 166
171static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) 167static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
172{ 168{
173 struct pci_dev *child_dev; 169 struct pci_dev *child;
170 struct pci_bus *linkbus = link->pdev->subordinate;
174 171
175 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 172 list_for_each_entry(child, &linkbus->devices, bus_list) {
176 if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) 173 if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
177 return true; 174 return true;
178 } 175 }
179 return false; 176 return false;
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
184 * could use common clock. If they are, configure them to use the 181 * could use common clock. If they are, configure them to use the
185 * common clock. That will reduce the ASPM state exit latency. 182 * common clock. That will reduce the ASPM state exit latency.
186 */ 183 */
187static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) 184static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
188{ 185{
189 int pos, child_pos, i = 0; 186 int ppos, cpos, same_clock = 1;
190 u16 reg16 = 0; 187 u16 reg16, parent_reg, child_reg[8];
191 struct pci_dev *child_dev;
192 int same_clock = 1;
193 unsigned long start_jiffies; 188 unsigned long start_jiffies;
194 u16 child_regs[8], parent_reg; 189 struct pci_dev *child, *parent = link->pdev;
190 struct pci_bus *linkbus = parent->subordinate;
195 /* 191 /*
196 * all functions of a slot should have the same Slot Clock 192 * All functions of a slot should have the same Slot Clock
197 * Configuration, so just check one function 193 * Configuration, so just check one function
198 * */ 194 */
199 child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, 195 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
200 bus_list); 196 BUG_ON(!child->is_pcie);
201 BUG_ON(!child_dev->is_pcie);
202 197
203 /* Check downstream component if bit Slot Clock Configuration is 1 */ 198 /* Check downstream component if bit Slot Clock Configuration is 1 */
204 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 199 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
205 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16); 200 pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
206 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 201 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
207 same_clock = 0; 202 same_clock = 0;
208 203
209 /* Check upstream component if bit Slot Clock Configuration is 1 */ 204 /* Check upstream component if bit Slot Clock Configuration is 1 */
210 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 205 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
211 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 206 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
212 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 207 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
213 same_clock = 0; 208 same_clock = 0;
214 209
215 /* Configure downstream component, all functions */ 210 /* Configure downstream component, all functions */
216 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 211 list_for_each_entry(child, &linkbus->devices, bus_list) {
217 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 212 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
218 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 213 pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
219 &reg16); 214 child_reg[PCI_FUNC(child->devfn)] = reg16;
220 child_regs[i] = reg16;
221 if (same_clock) 215 if (same_clock)
222 reg16 |= PCI_EXP_LNKCTL_CCC; 216 reg16 |= PCI_EXP_LNKCTL_CCC;
223 else 217 else
224 reg16 &= ~PCI_EXP_LNKCTL_CCC; 218 reg16 &= ~PCI_EXP_LNKCTL_CCC;
225 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 219 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
226 reg16);
227 i++;
228 } 220 }
229 221
230 /* Configure upstream component */ 222 /* Configure upstream component */
231 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 223 pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
232 parent_reg = reg16; 224 parent_reg = reg16;
233 if (same_clock) 225 if (same_clock)
234 reg16 |= PCI_EXP_LNKCTL_CCC; 226 reg16 |= PCI_EXP_LNKCTL_CCC;
235 else 227 else
236 reg16 &= ~PCI_EXP_LNKCTL_CCC; 228 reg16 &= ~PCI_EXP_LNKCTL_CCC;
237 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 229 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
238 230
239 /* retrain link */ 231 /* Retrain link */
240 reg16 |= PCI_EXP_LNKCTL_RL; 232 reg16 |= PCI_EXP_LNKCTL_RL;
241 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 233 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
242 234
243 /* Wait for link training end */ 235 /* Wait for link training end. Break out after waiting for timeout */
244 /* break out after waiting for timeout */
245 start_jiffies = jiffies; 236 start_jiffies = jiffies;
246 for (;;) { 237 for (;;) {
247 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 238 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
248 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 239 if (!(reg16 & PCI_EXP_LNKSTA_LT))
249 break; 240 break;
250 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) 241 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
251 break; 242 break;
252 msleep(1); 243 msleep(1);
253 } 244 }
254 /* training failed -> recover */ 245 if (!(reg16 & PCI_EXP_LNKSTA_LT))
255 if (reg16 & PCI_EXP_LNKSTA_LT) { 246 return;
256 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" 247
257 " common clock\n"); 248 /* Training failed. Restore common clock configurations */
258 i = 0; 249 dev_printk(KERN_ERR, &parent->dev,
259 list_for_each_entry(child_dev, &pdev->subordinate->devices, 250 "ASPM: Could not configure common clock\n");
260 bus_list) { 251 list_for_each_entry(child, &linkbus->devices, bus_list) {
261 child_pos = pci_find_capability(child_dev, 252 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
262 PCI_CAP_ID_EXP); 253 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
263 pci_write_config_word(child_dev, 254 child_reg[PCI_FUNC(child->devfn)]);
264 child_pos + PCI_EXP_LNKCTL,
265 child_regs[i]);
266 i++;
267 }
268 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
269 } 255 }
256 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
270} 257}
271 258
272/* 259/* Convert L0s latency encoding to ns */
273 * calc_L0S_latency: Convert L0s latency encoding to ns 260static u32 calc_l0s_latency(u32 encoding)
274 */
275static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
276{ 261{
277 unsigned int ns = 64; 262 if (encoding == 0x7)
263 return (5 * 1000); /* > 4us */
264 return (64 << encoding);
265}
278 266
279 if (latency_encoding == 0x7) { 267/* Convert L0s acceptable latency encoding to ns */
280 if (ac) 268static u32 calc_l0s_acceptable(u32 encoding)
281 ns = -1U; 269{
282 else 270 if (encoding == 0x7)
283 ns = 5*1000; /* > 4us */ 271 return -1U;
284 } else 272 return (64 << encoding);
285 ns *= (1 << latency_encoding);
286 return ns;
287} 273}
288 274
289/* 275/* Convert L1 latency encoding to ns */
290 * calc_L1_latency: Convert L1 latency encoding to ns 276static u32 calc_l1_latency(u32 encoding)
291 */
292static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
293{ 277{
294 unsigned int ns = 1000; 278 if (encoding == 0x7)
279 return (65 * 1000); /* > 64us */
280 return (1000 << encoding);
281}
295 282
296 if (latency_encoding == 0x7) { 283/* Convert L1 acceptable latency encoding to ns */
297 if (ac) 284static u32 calc_l1_acceptable(u32 encoding)
298 ns = -1U; 285{
299 else 286 if (encoding == 0x7)
300 ns = 65*1000; /* > 64us */ 287 return -1U;
301 } else 288 return (1000 << encoding);
302 ns *= (1 << latency_encoding);
303 return ns;
304} 289}
305 290
306static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, 291static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
307 unsigned int *l0s, unsigned int *l1, unsigned int *enabled) 292 u32 *l0s, u32 *l1, u32 *enabled)
308{ 293{
309 int pos; 294 int pos;
310 u16 reg16; 295 u16 reg16;
311 u32 reg32; 296 u32 reg32, encoding;
312 unsigned int latency;
313 297
298 *l0s = *l1 = *enabled = 0;
314 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 299 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
315 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); 300 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
316 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 301 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
317 if (*state != PCIE_LINK_STATE_L0S && 302 if (*state != PCIE_LINK_STATE_L0S &&
318 *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S)) 303 *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
319 *state = 0; 304 *state = 0;
320 if (*state == 0) 305 if (*state == 0)
321 return; 306 return;
322 307
323 latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 308 encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
324 *l0s = calc_L0S_latency(latency, 0); 309 *l0s = calc_l0s_latency(encoding);
325 if (*state & PCIE_LINK_STATE_L1) { 310 if (*state & PCIE_LINK_STATE_L1) {
326 latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 311 encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
327 *l1 = calc_L1_latency(latency, 0); 312 *l1 = calc_l1_latency(encoding);
328 } 313 }
329 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 314 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
330 *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1); 315 *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
331} 316}
332 317
333static void pcie_aspm_cap_init(struct pci_dev *pdev) 318static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
334{ 319{
335 struct pci_dev *child_dev; 320 u32 support, l0s, l1, enabled;
336 u32 state, tmp; 321 struct pci_dev *child, *parent = link->pdev;
337 struct pcie_link_state *link_state = pdev->link_state; 322 struct pci_bus *linkbus = parent->subordinate;
323
324 if (blacklist) {
325 /* Set support state to 0, so we will disable ASPM later */
326 link->aspm_support = 0;
327 link->aspm_default = 0;
328 link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
329 return;
330 }
331
332 /* Configure common clock before checking latencies */
333 pcie_aspm_configure_common_clock(link);
338 334
339 /* upstream component states */ 335 /* upstream component states */
340 pcie_aspm_get_cap_device(pdev, &link_state->support_state, 336 pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
341 &link_state->l0s_upper_latency, 337 link->aspm_support = support;
342 &link_state->l1_upper_latency, 338 link->latency.l0s = l0s;
343 &link_state->enabled_state); 339 link->latency.l1 = l1;
340 link->aspm_enabled = enabled;
341
344 /* downstream component states, all functions have the same setting */ 342 /* downstream component states, all functions have the same setting */
345 child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, 343 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
346 bus_list); 344 pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
347 pcie_aspm_get_cap_device(child_dev, &state, 345 link->aspm_support &= support;
348 &link_state->l0s_down_latency, 346 link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
349 &link_state->l1_down_latency, 347 link->latency.l1 = max_t(u32, link->latency.l1, l1);
350 &tmp); 348
351 link_state->support_state &= state; 349 if (!link->aspm_support)
352 if (!link_state->support_state)
353 return; 350 return;
354 link_state->enabled_state &= link_state->support_state; 351
355 link_state->bios_aspm_state = link_state->enabled_state; 352 link->aspm_enabled &= link->aspm_support;
353 link->aspm_default = link->aspm_enabled;
356 354
357 /* ENDPOINT states*/ 355 /* ENDPOINT states*/
358 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 356 list_for_each_entry(child, &linkbus->devices, bus_list) {
359 int pos; 357 int pos;
360 u32 reg32; 358 u32 reg32, encoding;
361 unsigned int latency; 359 struct aspm_latency *acceptable =
362 struct endpoint_state *ep_state = 360 &link->acceptable[PCI_FUNC(child->devfn)];
363 &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
364 361
365 if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && 362 if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
366 child_dev->pcie_type != PCI_EXP_TYPE_LEG_END) 363 child->pcie_type != PCI_EXP_TYPE_LEG_END)
367 continue; 364 continue;
368 365
369 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 366 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
370 pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32); 367 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
371 latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 368 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
372 latency = calc_L0S_latency(latency, 1); 369 acceptable->l0s = calc_l0s_acceptable(encoding);
373 ep_state->l0s_acceptable_latency = latency; 370 if (link->aspm_support & PCIE_LINK_STATE_L1) {
374 if (link_state->support_state & PCIE_LINK_STATE_L1) { 371 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
375 latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; 372 acceptable->l1 = calc_l1_acceptable(encoding);
376 latency = calc_L1_latency(latency, 1);
377 ep_state->l1_acceptable_latency = latency;
378 } 373 }
379 } 374 }
380} 375}
381 376
382static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev, 377/**
383 unsigned int state) 378 * __pcie_aspm_check_state_one - check latency for endpoint device.
384{ 379 * @endpoint: pointer to the struct pci_dev of endpoint device
385 struct pci_dev *parent_dev, *tmp_dev; 380 *
386 unsigned int latency, l1_latency = 0; 381 * TBD: The latency from the endpoint to root complex vary per switch's
387 struct pcie_link_state *link_state; 382 * upstream link state above the device. Here we just do a simple check
388 struct endpoint_state *ep_state; 383 * which assumes all links above the device can be in L1 state, that
389 384 * is we just consider the worst case. If switch's upstream link can't
390 parent_dev = pdev->bus->self; 385 * be put into L0S/L1, then our check is too strictly.
391 link_state = parent_dev->link_state; 386 */
392 state &= link_state->support_state; 387static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
393 if (state == 0) 388{
394 return 0; 389 u32 l1_switch_latency = 0;
395 ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)]; 390 struct aspm_latency *acceptable;
396 391 struct pcie_link_state *link;
397 /* 392
398 * Check latency for endpoint device. 393 link = endpoint->bus->self->link_state;
399 * TBD: The latency from the endpoint to root complex vary per 394 state &= link->aspm_support;
400 * switch's upstream link state above the device. Here we just do a 395 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
401 * simple check which assumes all links above the device can be in L1 396
402 * state, that is we just consider the worst case. If switch's upstream 397 while (link && state) {
403 * link can't be put into L0S/L1, then our check is too strictly. 398 if ((state & PCIE_LINK_STATE_L0S) &&
404 */ 399 (link->latency.l0s > acceptable->l0s))
405 tmp_dev = pdev; 400 state &= ~PCIE_LINK_STATE_L0S;
406 while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { 401 if ((state & PCIE_LINK_STATE_L1) &&
407 parent_dev = tmp_dev->bus->self; 402 (link->latency.l1 + l1_switch_latency > acceptable->l1))
408 link_state = parent_dev->link_state; 403 state &= ~PCIE_LINK_STATE_L1;
409 if (state & PCIE_LINK_STATE_L0S) { 404 link = link->parent;
410 latency = max_t(unsigned int, 405 /*
411 link_state->l0s_upper_latency, 406 * Every switch on the path to root complex need 1
412 link_state->l0s_down_latency); 407 * more microsecond for L1. Spec doesn't mention L0s.
413 if (latency > ep_state->l0s_acceptable_latency) 408 */
414 state &= ~PCIE_LINK_STATE_L0S; 409 l1_switch_latency += 1000;
415 }
416 if (state & PCIE_LINK_STATE_L1) {
417 latency = max_t(unsigned int,
418 link_state->l1_upper_latency,
419 link_state->l1_down_latency);
420 if (latency + l1_latency >
421 ep_state->l1_acceptable_latency)
422 state &= ~PCIE_LINK_STATE_L1;
423 }
424 if (!parent_dev->bus->self) /* parent_dev is a root port */
425 break;
426 else {
427 /*
428 * parent_dev is the downstream port of a switch, make
429 * tmp_dev the upstream port of the switch
430 */
431 tmp_dev = parent_dev->bus->self;
432 /*
433 * every switch on the path to root complex need 1 more
434 * microsecond for L1. Spec doesn't mention L0S.
435 */
436 if (state & PCIE_LINK_STATE_L1)
437 l1_latency += 1000;
438 }
439 } 410 }
440 return state; 411 return state;
441} 412}
442 413
443static unsigned int pcie_aspm_check_state(struct pci_dev *pdev, 414static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
444 unsigned int state)
445{ 415{
446 struct pci_dev *child_dev; 416 pci_power_t power_state;
417 struct pci_dev *child;
418 struct pci_bus *linkbus = link->pdev->subordinate;
447 419
448 /* If no child, ignore the link */ 420 /* If no child, ignore the link */
449 if (list_empty(&pdev->subordinate->devices)) 421 if (list_empty(&linkbus->devices))
450 return state; 422 return state;
451 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 423
452 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 424 list_for_each_entry(child, &linkbus->devices, bus_list) {
453 /* 425 /*
454 * If downstream component of a link is pci bridge, we 426 * If downstream component of a link is pci bridge, we
455 * disable ASPM for now for the link 427 * disable ASPM for now for the link
456 * */ 428 */
457 state = 0; 429 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
458 break; 430 return 0;
459 } 431
460 if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && 432 if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
461 child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)) 433 child->pcie_type != PCI_EXP_TYPE_LEG_END))
462 continue; 434 continue;
463 /* Device not in D0 doesn't need check latency */ 435 /* Device not in D0 doesn't need check latency */
464 if (child_dev->current_state == PCI_D1 || 436 power_state = child->current_state;
465 child_dev->current_state == PCI_D2 || 437 if (power_state == PCI_D1 || power_state == PCI_D2 ||
466 child_dev->current_state == PCI_D3hot || 438 power_state == PCI_D3hot || power_state == PCI_D3cold)
467 child_dev->current_state == PCI_D3cold)
468 continue; 439 continue;
469 state = __pcie_aspm_check_state_one(child_dev, state); 440 state = __pcie_aspm_check_state_one(child, state);
470 } 441 }
471 return state; 442 return state;
472} 443}
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
482 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 453 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
483} 454}
484 455
485static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) 456static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
486{ 457{
487 struct pci_dev *child_dev; 458 struct pci_dev *child, *parent = link->pdev;
488 int valid = 1; 459 struct pci_bus *linkbus = parent->subordinate;
489 struct pcie_link_state *link_state = pdev->link_state;
490 460
491 /* If no child, disable the link */ 461 /* If no child, disable the link */
492 if (list_empty(&pdev->subordinate->devices)) 462 if (list_empty(&linkbus->devices))
493 state = 0; 463 state = 0;
494 /* 464 /*
495 * if the downstream component has pci bridge function, don't do ASPM 465 * If the downstream component has pci bridge function, don't
496 * now 466 * do ASPM now.
497 */ 467 */
498 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 468 list_for_each_entry(child, &linkbus->devices, bus_list) {
499 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 469 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
500 valid = 0; 470 return;
501 break;
502 }
503 } 471 }
504 if (!valid)
505 return;
506
507 /* 472 /*
508 * spec 2.0 suggests all functions should be configured the same 473 * Spec 2.0 suggests all functions should be configured the
509 * setting for ASPM. Enabling ASPM L1 should be done in upstream 474 * same setting for ASPM. Enabling ASPM L1 should be done in
510 * component first and then downstream, and vice versa for disabling 475 * upstream component first and then downstream, and vice
511 * ASPM L1. Spec doesn't mention L0S. 476 * versa for disabling ASPM L1. Spec doesn't mention L0S.
512 */ 477 */
513 if (state & PCIE_LINK_STATE_L1) 478 if (state & PCIE_LINK_STATE_L1)
514 __pcie_aspm_config_one_dev(pdev, state); 479 __pcie_aspm_config_one_dev(parent, state);
515 480
516 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) 481 list_for_each_entry(child, &linkbus->devices, bus_list)
517 __pcie_aspm_config_one_dev(child_dev, state); 482 __pcie_aspm_config_one_dev(child, state);
518 483
519 if (!(state & PCIE_LINK_STATE_L1)) 484 if (!(state & PCIE_LINK_STATE_L1))
520 __pcie_aspm_config_one_dev(pdev, state); 485 __pcie_aspm_config_one_dev(parent, state);
521 486
522 link_state->enabled_state = state; 487 link->aspm_enabled = state;
523} 488}
524 489
525static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) 490/* Check the whole hierarchy, and configure each link in the hierarchy */
491static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
492 u32 state)
526{ 493{
527 struct pcie_link_state *root_port_link = link; 494 struct pcie_link_state *leaf, *root = link->root;
528 while (root_port_link->parent)
529 root_port_link = root_port_link->parent;
530 return root_port_link;
531}
532 495
533/* check the whole hierarchy, and configure each link in the hierarchy */ 496 state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
534static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
535 unsigned int state)
536{
537 struct pcie_link_state *link_state = pdev->link_state;
538 struct pcie_link_state *root_port_link = get_root_port_link(link_state);
539 struct pcie_link_state *leaf;
540 497
541 state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 498 /* Check all links who have specific root port link */
542 499 list_for_each_entry(leaf, &link_list, sibling) {
543 /* check all links who have specific root port link */ 500 if (!list_empty(&leaf->children) || (leaf->root != root))
544 list_for_each_entry(leaf, &link_list, sibiling) {
545 if (!list_empty(&leaf->children) ||
546 get_root_port_link(leaf) != root_port_link)
547 continue; 501 continue;
548 state = pcie_aspm_check_state(leaf->pdev, state); 502 state = pcie_aspm_check_state(leaf, state);
549 } 503 }
550 /* check root port link too in case it hasn't children */ 504 /* Check root port link too in case it hasn't children */
551 state = pcie_aspm_check_state(root_port_link->pdev, state); 505 state = pcie_aspm_check_state(root, state);
552 506 if (link->aspm_enabled == state)
553 if (link_state->enabled_state == state)
554 return; 507 return;
555
556 /* 508 /*
557 * we must change the hierarchy. See comments in 509 * We must change the hierarchy. See comments in
558 * __pcie_aspm_config_link for the order 510 * __pcie_aspm_config_link for the order
559 **/ 511 **/
560 if (state & PCIE_LINK_STATE_L1) { 512 if (state & PCIE_LINK_STATE_L1) {
561 list_for_each_entry(leaf, &link_list, sibiling) { 513 list_for_each_entry(leaf, &link_list, sibling) {
562 if (get_root_port_link(leaf) == root_port_link) 514 if (leaf->root == root)
563 __pcie_aspm_config_link(leaf->pdev, state); 515 __pcie_aspm_config_link(leaf, state);
564 } 516 }
565 } else { 517 } else {
566 list_for_each_entry_reverse(leaf, &link_list, sibiling) { 518 list_for_each_entry_reverse(leaf, &link_list, sibling) {
567 if (get_root_port_link(leaf) == root_port_link) 519 if (leaf->root == root)
568 __pcie_aspm_config_link(leaf->pdev, state); 520 __pcie_aspm_config_link(leaf, state);
569 } 521 }
570 } 522 }
571} 523}
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
574 * pcie_aspm_configure_link_state: enable/disable PCI express link state 526 * pcie_aspm_configure_link_state: enable/disable PCI express link state
575 * @pdev: the root port or switch downstream port 527 * @pdev: the root port or switch downstream port
576 */ 528 */
577static void pcie_aspm_configure_link_state(struct pci_dev *pdev, 529static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
578 unsigned int state) 530 u32 state)
579{ 531{
580 down_read(&pci_bus_sem); 532 down_read(&pci_bus_sem);
581 mutex_lock(&aspm_lock); 533 mutex_lock(&aspm_lock);
582 __pcie_aspm_configure_link_state(pdev, state); 534 __pcie_aspm_configure_link_state(link, state);
583 mutex_unlock(&aspm_lock); 535 mutex_unlock(&aspm_lock);
584 up_read(&pci_bus_sem); 536 up_read(&pci_bus_sem);
585} 537}
586 538
587static void free_link_state(struct pci_dev *pdev) 539static void free_link_state(struct pcie_link_state *link)
588{ 540{
589 kfree(pdev->link_state); 541 link->pdev->link_state = NULL;
590 pdev->link_state = NULL; 542 kfree(link);
591} 543}
592 544
593static int pcie_aspm_sanity_check(struct pci_dev *pdev) 545static int pcie_aspm_sanity_check(struct pci_dev *pdev)
594{ 546{
595 struct pci_dev *child_dev; 547 struct pci_dev *child;
596 int child_pos; 548 int pos;
597 u32 reg32; 549 u32 reg32;
598
599 /* 550 /*
600 * Some functions in a slot might not all be PCIE functions, very 551 * Some functions in a slot might not all be PCIE functions,
601 * strange. Disable ASPM for the whole slot 552 * very strange. Disable ASPM for the whole slot
602 */ 553 */
603 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 554 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
604 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 555 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
605 if (!child_pos) 556 if (!pos)
606 return -EINVAL; 557 return -EINVAL;
607
608 /* 558 /*
609 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 559 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
610 * RBER bit to determine if a function is 1.1 version device 560 * RBER bit to determine if a function is 1.1 version device
611 */ 561 */
612 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, 562 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
613 &reg32);
614 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 563 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
615 dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" 564 dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
616 " on pre-1.1 PCIe device. You can enable it" 565 " on pre-1.1 PCIe device. You can enable it"
617 " with 'pcie_aspm=force'\n"); 566 " with 'pcie_aspm=force'\n");
618 return -EINVAL; 567 return -EINVAL;
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
621 return 0; 570 return 0;
622} 571}
623 572
573static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
574{
575 struct pcie_link_state *link;
576 int blacklist = !!pcie_aspm_sanity_check(pdev);
577
578 link = kzalloc(sizeof(*link), GFP_KERNEL);
579 if (!link)
580 return NULL;
581 INIT_LIST_HEAD(&link->sibling);
582 INIT_LIST_HEAD(&link->children);
583 INIT_LIST_HEAD(&link->link);
584 link->pdev = pdev;
585 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
586 struct pcie_link_state *parent;
587 parent = pdev->bus->parent->self->link_state;
588 if (!parent) {
589 kfree(link);
590 return NULL;
591 }
592 link->parent = parent;
593 list_add(&link->link, &parent->children);
594 }
595 /* Setup a pointer to the root port link */
596 if (!link->parent)
597 link->root = link;
598 else
599 link->root = link->parent->root;
600
601 list_add(&link->sibling, &link_list);
602
603 pdev->link_state = link;
604
605 /* Check ASPM capability */
606 pcie_aspm_cap_init(link, blacklist);
607
608 /* Check Clock PM capability */
609 pcie_clkpm_cap_init(link, blacklist);
610
611 return link;
612}
613
624/* 614/*
625 * pcie_aspm_init_link_state: Initiate PCI express link state. 615 * pcie_aspm_init_link_state: Initiate PCI express link state.
626 * It is called after the pcie and its children devices are scaned. 616 * It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
628 */ 618 */
629void pcie_aspm_init_link_state(struct pci_dev *pdev) 619void pcie_aspm_init_link_state(struct pci_dev *pdev)
630{ 620{
631 unsigned int state; 621 u32 state;
632 struct pcie_link_state *link_state; 622 struct pcie_link_state *link;
633 int error = 0;
634 int blacklist;
635 623
636 if (aspm_disabled || !pdev->is_pcie || pdev->link_state) 624 if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
637 return; 625 return;
638 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 626 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
639 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 627 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
628 return;
629
630 /* VIA has a strange chipset, root port is under a bridge */
631 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
632 pdev->bus->self)
640 return; 633 return;
634
641 down_read(&pci_bus_sem); 635 down_read(&pci_bus_sem);
642 if (list_empty(&pdev->subordinate->devices)) 636 if (list_empty(&pdev->subordinate->devices))
643 goto out; 637 goto out;
644 638
645 blacklist = !!pcie_aspm_sanity_check(pdev);
646
647 mutex_lock(&aspm_lock); 639 mutex_lock(&aspm_lock);
648 640 link = pcie_aspm_setup_link_state(pdev);
649 link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); 641 if (!link)
650 if (!link_state) 642 goto unlock;
651 goto unlock_out; 643 /*
652 644 * Setup initial ASPM state
653 link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); 645 *
654 INIT_LIST_HEAD(&link_state->children); 646 * If link has switch, delay the link config. The leaf link
655 INIT_LIST_HEAD(&link_state->link); 647 * initialization will config the whole hierarchy. But we must
656 if (pdev->bus->self) {/* this is a switch */ 648 * make sure BIOS doesn't set unsupported link state.
657 struct pcie_link_state *parent_link_state; 649 */
658 650 if (pcie_aspm_downstream_has_switch(link)) {
659 parent_link_state = pdev->bus->parent->self->link_state; 651 state = pcie_aspm_check_state(link, link->aspm_default);
660 if (!parent_link_state) { 652 __pcie_aspm_config_link(link, state);
661 kfree(link_state);
662 goto unlock_out;
663 }
664 list_add(&link_state->link, &parent_link_state->children);
665 link_state->parent = parent_link_state;
666 }
667
668 pdev->link_state = link_state;
669
670 if (!blacklist) {
671 pcie_aspm_configure_common_clock(pdev);
672 pcie_aspm_cap_init(pdev);
673 } else { 653 } else {
674 link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 654 state = policy_to_aspm_state(link);
675 link_state->bios_aspm_state = 0; 655 __pcie_aspm_configure_link_state(link, state);
676 /* Set support state to 0, so we will disable ASPM later */
677 link_state->support_state = 0;
678 } 656 }
679 657
680 link_state->pdev = pdev; 658 /* Setup initial Clock PM state */
681 list_add(&link_state->sibiling, &link_list); 659 state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
682 660 pcie_set_clkpm(link, state);
683 if (link_state->downstream_has_switch) { 661unlock:
684 /*
685 * If link has switch, delay the link config. The leaf link
686 * initialization will config the whole hierarchy. but we must
687 * make sure BIOS doesn't set unsupported link state
688 **/
689 state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
690 __pcie_aspm_config_link(pdev, state);
691 } else
692 __pcie_aspm_configure_link_state(pdev,
693 policy_to_aspm_state(pdev));
694
695 pcie_check_clock_pm(pdev, blacklist);
696
697unlock_out:
698 if (error)
699 free_link_state(pdev);
700 mutex_unlock(&aspm_lock); 662 mutex_unlock(&aspm_lock);
701out: 663out:
702 up_read(&pci_bus_sem); 664 up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
725 687
726 /* All functions are removed, so just disable ASPM for the link */ 688 /* All functions are removed, so just disable ASPM for the link */
727 __pcie_aspm_config_one_dev(parent, 0); 689 __pcie_aspm_config_one_dev(parent, 0);
728 list_del(&link_state->sibiling); 690 list_del(&link_state->sibling);
729 list_del(&link_state->link); 691 list_del(&link_state->link);
730 /* Clock PM is for endpoint device */ 692 /* Clock PM is for endpoint device */
731 693
732 free_link_state(parent); 694 free_link_state(link_state);
733out: 695out:
734 mutex_unlock(&aspm_lock); 696 mutex_unlock(&aspm_lock);
735 up_read(&pci_bus_sem); 697 up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
749 * devices changed PM state, we should recheck if latency meets all 711 * devices changed PM state, we should recheck if latency meets all
750 * functions' requirement 712 * functions' requirement
751 */ 713 */
752 pcie_aspm_configure_link_state(pdev, link_state->enabled_state); 714 pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
753} 715}
754 716
755/* 717/*
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
772 down_read(&pci_bus_sem); 734 down_read(&pci_bus_sem);
773 mutex_lock(&aspm_lock); 735 mutex_lock(&aspm_lock);
774 link_state = parent->link_state; 736 link_state = parent->link_state;
775 link_state->support_state &= 737 link_state->aspm_support &= ~state;
776 ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1)); 738 __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
777 if (state & PCIE_LINK_STATE_CLKPM) 739 if (state & PCIE_LINK_STATE_CLKPM) {
778 link_state->clk_pm_capable = 0; 740 link_state->clkpm_capable = 0;
779 741 pcie_set_clkpm(link_state, 0);
780 __pcie_aspm_configure_link_state(parent, link_state->enabled_state); 742 }
781 if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
782 pcie_set_clock_pm(parent, 0);
783 mutex_unlock(&aspm_lock); 743 mutex_unlock(&aspm_lock);
784 up_read(&pci_bus_sem); 744 up_read(&pci_bus_sem);
785} 745}
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state);
788static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) 748static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
789{ 749{
790 int i; 750 int i;
791 struct pci_dev *pdev;
792 struct pcie_link_state *link_state; 751 struct pcie_link_state *link_state;
793 752
794 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 753 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
802 down_read(&pci_bus_sem); 761 down_read(&pci_bus_sem);
803 mutex_lock(&aspm_lock); 762 mutex_lock(&aspm_lock);
804 aspm_policy = i; 763 aspm_policy = i;
805 list_for_each_entry(link_state, &link_list, sibiling) { 764 list_for_each_entry(link_state, &link_list, sibling) {
806 pdev = link_state->pdev; 765 __pcie_aspm_configure_link_state(link_state,
807 __pcie_aspm_configure_link_state(pdev, 766 policy_to_aspm_state(link_state));
808 policy_to_aspm_state(pdev)); 767 pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
809 if (link_state->clk_pm_capable &&
810 link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
811 pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
812
813 } 768 }
814 mutex_unlock(&aspm_lock); 769 mutex_unlock(&aspm_lock);
815 up_read(&pci_bus_sem); 770 up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev,
838 struct pci_dev *pci_device = to_pci_dev(dev); 793 struct pci_dev *pci_device = to_pci_dev(dev);
839 struct pcie_link_state *link_state = pci_device->link_state; 794 struct pcie_link_state *link_state = pci_device->link_state;
840 795
841 return sprintf(buf, "%d\n", link_state->enabled_state); 796 return sprintf(buf, "%d\n", link_state->aspm_enabled);
842} 797}
843 798
844static ssize_t link_state_store(struct device *dev, 799static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev,
846 const char *buf, 801 const char *buf,
847 size_t n) 802 size_t n)
848{ 803{
849 struct pci_dev *pci_device = to_pci_dev(dev); 804 struct pci_dev *pdev = to_pci_dev(dev);
850 int state; 805 int state;
851 806
852 if (n < 1) 807 if (n < 1)
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev,
854 state = buf[0]-'0'; 809 state = buf[0]-'0';
855 if (state >= 0 && state <= 3) { 810 if (state >= 0 && state <= 3) {
856 /* setup link aspm state */ 811 /* setup link aspm state */
857 pcie_aspm_configure_link_state(pci_device, state); 812 pcie_aspm_configure_link_state(pdev->link_state, state);
858 return n; 813 return n;
859 } 814 }
860 815
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev,
868 struct pci_dev *pci_device = to_pci_dev(dev); 823 struct pci_dev *pci_device = to_pci_dev(dev);
869 struct pcie_link_state *link_state = pci_device->link_state; 824 struct pcie_link_state *link_state = pci_device->link_state;
870 825
871 return sprintf(buf, "%d\n", link_state->clk_pm_enabled); 826 return sprintf(buf, "%d\n", link_state->clkpm_enabled);
872} 827}
873 828
874static ssize_t clk_ctl_store(struct device *dev, 829static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev,
876 const char *buf, 831 const char *buf,
877 size_t n) 832 size_t n)
878{ 833{
879 struct pci_dev *pci_device = to_pci_dev(dev); 834 struct pci_dev *pdev = to_pci_dev(dev);
880 int state; 835 int state;
881 836
882 if (n < 1) 837 if (n < 1)
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev,
885 840
886 down_read(&pci_bus_sem); 841 down_read(&pci_bus_sem);
887 mutex_lock(&aspm_lock); 842 mutex_lock(&aspm_lock);
888 pcie_set_clock_pm(pci_device, !!state); 843 pcie_set_clkpm_nocheck(pdev->link_state, !!state);
889 mutex_unlock(&aspm_lock); 844 mutex_unlock(&aspm_lock);
890 up_read(&pci_bus_sem); 845 up_read(&pci_bus_sem);
891 846
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
904 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 859 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
905 return; 860 return;
906 861
907 if (link_state->support_state) 862 if (link_state->aspm_support)
908 sysfs_add_file_to_group(&pdev->dev.kobj, 863 sysfs_add_file_to_group(&pdev->dev.kobj,
909 &dev_attr_link_state.attr, power_group); 864 &dev_attr_link_state.attr, power_group);
910 if (link_state->clk_pm_capable) 865 if (link_state->clkpm_capable)
911 sysfs_add_file_to_group(&pdev->dev.kobj, 866 sysfs_add_file_to_group(&pdev->dev.kobj,
912 &dev_attr_clk_ctl.attr, power_group); 867 &dev_attr_clk_ctl.attr, power_group);
913} 868}
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
920 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 875 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
921 return; 876 return;
922 877
923 if (link_state->support_state) 878 if (link_state->aspm_support)
924 sysfs_remove_file_from_group(&pdev->dev.kobj, 879 sysfs_remove_file_from_group(&pdev->dev.kobj,
925 &dev_attr_link_state.attr, power_group); 880 &dev_attr_link_state.attr, power_group);
926 if (link_state->clk_pm_capable) 881 if (link_state->clkpm_capable)
927 sysfs_remove_file_from_group(&pdev->dev.kobj, 882 sysfs_remove_file_from_group(&pdev->dev.kobj,
928 &dev_attr_clk_ctl.attr, power_group); 883 &dev_attr_clk_ctl.attr, power_group);
929} 884}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 2529f3f2ea5a..17ad53868f9f 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,19 +25,21 @@
25#define PCIE_CAPABILITIES_REG 0x2 25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14 26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4 27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
29/*
30 * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
31 * table entires used by port services must not exceed 31
32 */
33#define PCIE_PORT_MAX_MSIX_ENTRIES 32
28 34
29#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 35#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
30 36
31struct pcie_port_device_ext {
32 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
33};
34
35extern struct bus_type pcie_port_bus_type; 37extern struct bus_type pcie_port_bus_type;
36extern int pcie_port_device_probe(struct pci_dev *dev); 38extern int pcie_port_device_probe(struct pci_dev *dev);
37extern int pcie_port_device_register(struct pci_dev *dev); 39extern int pcie_port_device_register(struct pci_dev *dev);
38#ifdef CONFIG_PM 40#ifdef CONFIG_PM
39extern int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state); 41extern int pcie_port_device_suspend(struct device *dev);
40extern int pcie_port_device_resume(struct pci_dev *dev); 42extern int pcie_port_device_resume(struct device *dev);
41#endif 43#endif
42extern void pcie_port_device_remove(struct pci_dev *dev); 44extern void pcie_port_device_remove(struct pci_dev *dev);
43extern int __must_check pcie_port_bus_register(void); 45extern int __must_check pcie_port_bus_register(void);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index eec89b767f9f..ef3a4eeaebb4 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,20 +26,22 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
27{ 27{
28 struct pcie_device *pciedev; 28 struct pcie_device *pciedev;
29 struct pcie_port_data *port_data;
29 struct pcie_port_service_driver *driver; 30 struct pcie_port_service_driver *driver;
30 31
31 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 32 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
32 return 0; 33 return 0;
33 34
34 pciedev = to_pcie_device(dev); 35 pciedev = to_pcie_device(dev);
35 driver = to_service_driver(drv); 36 driver = to_service_driver(drv);
36 if ( (driver->id_table->vendor != PCI_ANY_ID && 37
37 driver->id_table->vendor != pciedev->id.vendor) || 38 if (driver->service != pciedev->service)
38 (driver->id_table->device != PCI_ANY_ID && 39 return 0;
39 driver->id_table->device != pciedev->id.device) || 40
40 (driver->id_table->port_type != PCIE_ANY_PORT && 41 port_data = pci_get_drvdata(pciedev->port);
41 driver->id_table->port_type != pciedev->id.port_type) || 42
42 driver->id_table->service_type != pciedev->id.service_type ) 43 if (driver->port_type != PCIE_ANY_PORT
44 && driver->port_type != port_data->port_type)
43 return 0; 45 return 0;
44 46
45 return 1; 47 return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 8b3f8c18032f..13ffdc35ea0e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,10 +15,9 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17 17
18#include "../pci.h"
18#include "portdrv.h" 19#include "portdrv.h"
19 20
20extern int pcie_mch_quirk; /* MSI-quirk Indicator */
21
22/** 21/**
23 * release_pcie_device - free PCI Express port service device structure 22 * release_pcie_device - free PCI Express port service device structure
24 * @dev: Port service device to release 23 * @dev: Port service device to release
@@ -31,26 +30,150 @@ static void release_pcie_device(struct device *dev)
31 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
32} 31}
33 32
34static int is_msi_quirked(struct pci_dev *dev) 33/**
34 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
35 * @entries: Array of MSI-X entries
36 * @new_entry: Index of the entry to add to the array
37 * @nr_entries: Number of entries aleady in the array
38 *
39 * Return value: Position of the added entry in the array
40 */
41static int pcie_port_msix_add_entry(
42 struct msix_entry *entries, int new_entry, int nr_entries)
35{ 43{
36 int port_type, quirk = 0; 44 int j;
45
46 for (j = 0; j < nr_entries; j++)
47 if (entries[j].entry == new_entry)
48 return j;
49
50 entries[j].entry = new_entry;
51 return j;
52}
53
54/**
55 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
56 * @dev: PCI Express port to handle
57 * @vectors: Array of interrupt vectors to populate
58 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
59 *
60 * Return value: 0 on success, error code on failure
61 */
62static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
63{
64 struct msix_entry *msix_entries;
65 int idx[PCIE_PORT_DEVICE_MAXSERVICES];
66 int nr_entries, status, pos, i, nvec;
37 u16 reg16; 67 u16 reg16;
68 u32 reg32;
38 69
39 pci_read_config_word(dev, 70 nr_entries = pci_msix_table_size(dev);
40 pci_find_capability(dev, PCI_CAP_ID_EXP) + 71 if (!nr_entries)
41 PCIE_CAPABILITIES_REG, &reg16); 72 return -EINVAL;
42 port_type = (reg16 >> 4) & PORT_TYPE_MASK; 73 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
43 switch(port_type) { 74 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
44 case PCIE_RC_PORT: 75
45 if (pcie_mch_quirk == 1) 76 msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
46 quirk = 1; 77 if (!msix_entries)
47 break; 78 return -ENOMEM;
48 case PCIE_SW_UPSTREAM_PORT: 79
49 case PCIE_SW_DOWNSTREAM_PORT: 80 /*
50 default: 81 * Allocate as many entries as the port wants, so that we can check
51 break; 82 * which of them will be useful. Moreover, if nr_entries is correctly
83 * equal to the number of entries this port actually uses, we'll happily
84 * go through without any tricks.
85 */
86 for (i = 0; i < nr_entries; i++)
87 msix_entries[i].entry = i;
88
89 status = pci_enable_msix(dev, msix_entries, nr_entries);
90 if (status)
91 goto Exit;
92
93 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
94 idx[i] = -1;
95 status = -EIO;
96 nvec = 0;
97
98 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
99 int entry;
100
101 /*
102 * The code below follows the PCI Express Base Specification 2.0
103 * stating in Section 6.1.6 that "PME and Hot-Plug Event
104 * interrupts (when both are implemented) always share the same
105 * MSI or MSI-X vector, as indicated by the Interrupt Message
106 * Number field in the PCI Express Capabilities register", where
107 * according to Section 7.8.2 of the specification "For MSI-X,
108 * the value in this field indicates which MSI-X Table entry is
109 * used to generate the interrupt message."
110 */
111 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
112 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
113 entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
114 if (entry >= nr_entries)
115 goto Error;
116
117 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
118 if (i == nvec)
119 nvec++;
120
121 idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
122 idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
123 }
124
125 if (mask & PCIE_PORT_SERVICE_AER) {
126 int entry;
127
128 /*
129 * The code below follows Section 7.10.10 of the PCI Express
130 * Base Specification 2.0 stating that bits 31-27 of the Root
131 * Error Status Register contain a value indicating which of the
132 * MSI/MSI-X vectors assigned to the port is going to be used
133 * for AER, where "For MSI-X, the value in this register
134 * indicates which MSI-X Table entry is used to generate the
135 * interrupt message."
136 */
137 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
138 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
139 entry = reg32 >> 27;
140 if (entry >= nr_entries)
141 goto Error;
142
143 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
144 if (i == nvec)
145 nvec++;
146
147 idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
52 } 148 }
53 return quirk; 149
150 /*
151 * If nvec is equal to the allocated number of entries, we can just use
152 * what we have. Otherwise, the port has some extra entries not for the
153 * services we know and we need to work around that.
154 */
155 if (nvec == nr_entries) {
156 status = 0;
157 } else {
158 /* Drop the temporary MSI-X setup */
159 pci_disable_msix(dev);
160
161 /* Now allocate the MSI-X vectors for real */
162 status = pci_enable_msix(dev, msix_entries, nvec);
163 if (status)
164 goto Exit;
165 }
166
167 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
168 vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
169
170 Exit:
171 kfree(msix_entries);
172 return status;
173
174 Error:
175 pci_disable_msix(dev);
176 goto Exit;
54} 177}
55 178
56/** 179/**
@@ -64,47 +187,32 @@ static int is_msi_quirked(struct pci_dev *dev)
64 */ 187 */
65static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
66{ 189{
67 int i, pos, nvec, status = -EINVAL; 190 struct pcie_port_data *port_data = pci_get_drvdata(dev);
68 int interrupt_mode = PCIE_PORT_INTx_MODE; 191 int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
192 int i;
69 193
70 /* Set INTx as default */
71 for (i = 0, nvec = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
72 if (mask & (1 << i))
73 nvec++;
74 vectors[i] = dev->irq;
75 }
76
77 /* Check MSI quirk */ 194 /* Check MSI quirk */
78 if (is_msi_quirked(dev)) 195 if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
79 return interrupt_mode; 196 goto Fallback;
80 197
81 /* Select MSI-X over MSI if supported */ 198 /* Try to use MSI-X if supported */
82 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 199 if (!pcie_port_enable_msix(dev, vectors, mask))
83 if (pos) { 200 return PCIE_PORT_MSIX_MODE;
84 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 201
85 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 202 /* We're not going to use MSI-X, so try MSI and fall back to INTx */
86 status = pci_enable_msix(dev, msix_entries, nvec); 203 if (!pci_enable_msi(dev))
87 if (!status) { 204 interrupt_mode = PCIE_PORT_MSI_MODE;
88 int j = 0; 205
89 206 Fallback:
90 interrupt_mode = PCIE_PORT_MSIX_MODE; 207 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
91 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 208 interrupt_mode = PCIE_PORT_INTx_MODE;
92 if (mask & (1 << i)) 209
93 vectors[i] = msix_entries[j++].vector; 210 irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
94 } 211 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
95 } 212 vectors[i] = irq;
96 } 213
97 if (status) { 214 vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
98 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 215
99 if (pos) {
100 status = pci_enable_msi(dev);
101 if (!status) {
102 interrupt_mode = PCIE_PORT_MSI_MODE;
103 for (i = 0;i < PCIE_PORT_DEVICE_MAXSERVICES;i++)
104 vectors[i] = dev->irq;
105 }
106 }
107 }
108 return interrupt_mode; 216 return interrupt_mode;
109} 217}
110 218
@@ -132,13 +240,11 @@ static int get_port_device_capability(struct pci_dev *dev)
132 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32); 240 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
133 if (reg32 & SLOT_HP_CAPABLE_MASK) 241 if (reg32 & SLOT_HP_CAPABLE_MASK)
134 services |= PCIE_PORT_SERVICE_HP; 242 services |= PCIE_PORT_SERVICE_HP;
135 } 243 }
136 /* PME Capable - root port capability */ 244 /* AER capable */
137 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
138 services |= PCIE_PORT_SERVICE_PME;
139
140 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 245 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
141 services |= PCIE_PORT_SERVICE_AER; 246 services |= PCIE_PORT_SERVICE_AER;
247 /* VC support */
142 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 248 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
143 services |= PCIE_PORT_SERVICE_VC; 249 services |= PCIE_PORT_SERVICE_VC;
144 250
@@ -152,27 +258,24 @@ static int get_port_device_capability(struct pci_dev *dev)
152 * @port_type: Type of the port 258 * @port_type: Type of the port
153 * @service_type: Type of service to associate with the service device 259 * @service_type: Type of service to associate with the service device
154 * @irq: Interrupt vector to associate with the service device 260 * @irq: Interrupt vector to associate with the service device
155 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
156 */ 261 */
157static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 262static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
158 int port_type, int service_type, int irq, int irq_mode) 263 int service_type, int irq)
159{ 264{
265 struct pcie_port_data *port_data = pci_get_drvdata(parent);
160 struct device *device; 266 struct device *device;
267 int port_type = port_data->port_type;
161 268
162 dev->port = parent; 269 dev->port = parent;
163 dev->interrupt_mode = irq_mode;
164 dev->irq = irq; 270 dev->irq = irq;
165 dev->id.vendor = parent->vendor; 271 dev->service = service_type;
166 dev->id.device = parent->device;
167 dev->id.port_type = port_type;
168 dev->id.service_type = (1 << service_type);
169 272
170 /* Initialize generic device interface */ 273 /* Initialize generic device interface */
171 device = &dev->device; 274 device = &dev->device;
172 memset(device, 0, sizeof(struct device)); 275 memset(device, 0, sizeof(struct device));
173 device->bus = &pcie_port_bus_type; 276 device->bus = &pcie_port_bus_type;
174 device->driver = NULL; 277 device->driver = NULL;
175 device->driver_data = NULL; 278 dev_set_drvdata(device, NULL);
176 device->release = release_pcie_device; /* callback to free pcie dev */ 279 device->release = release_pcie_device; /* callback to free pcie dev */
177 dev_set_name(device, "%s:pcie%02x", 280 dev_set_name(device, "%s:pcie%02x",
178 pci_name(parent), get_descriptor_id(port_type, service_type)); 281 pci_name(parent), get_descriptor_id(port_type, service_type));
@@ -185,10 +288,9 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
185 * @port_type: Type of the port 288 * @port_type: Type of the port
186 * @service_type: Type of service to associate with the service device 289 * @service_type: Type of service to associate with the service device
187 * @irq: Interrupt vector to associate with the service device 290 * @irq: Interrupt vector to associate with the service device
188 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
189 */ 291 */
190static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, 292static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
191 int port_type, int service_type, int irq, int irq_mode) 293 int service_type, int irq)
192{ 294{
193 struct pcie_device *device; 295 struct pcie_device *device;
194 296
@@ -196,7 +298,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
196 if (!device) 298 if (!device)
197 return NULL; 299 return NULL;
198 300
199 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 301 pcie_device_init(parent, device, service_type, irq);
200 return device; 302 return device;
201} 303}
202 304
@@ -230,63 +332,90 @@ int pcie_port_device_probe(struct pci_dev *dev)
230 */ 332 */
231int pcie_port_device_register(struct pci_dev *dev) 333int pcie_port_device_register(struct pci_dev *dev)
232{ 334{
233 struct pcie_port_device_ext *p_ext; 335 struct pcie_port_data *port_data;
234 int status, type, capabilities, irq_mode, i; 336 int status, capabilities, irq_mode, i, nr_serv;
235 int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; 337 int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
236 u16 reg16; 338 u16 reg16;
237 339
238 /* Allocate port device extension */ 340 port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
239 if (!(p_ext = kmalloc(sizeof(struct pcie_port_device_ext), GFP_KERNEL))) 341 if (!port_data)
240 return -ENOMEM; 342 return -ENOMEM;
241 343 pci_set_drvdata(dev, port_data);
242 pci_set_drvdata(dev, p_ext);
243 344
244 /* Get port type */ 345 /* Get port type */
245 pci_read_config_word(dev, 346 pci_read_config_word(dev,
246 pci_find_capability(dev, PCI_CAP_ID_EXP) + 347 pci_find_capability(dev, PCI_CAP_ID_EXP) +
247 PCIE_CAPABILITIES_REG, &reg16); 348 PCIE_CAPABILITIES_REG, &reg16);
248 type = (reg16 >> 4) & PORT_TYPE_MASK; 349 port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
249 350
250 /* Now get port services */
251 capabilities = get_port_device_capability(dev); 351 capabilities = get_port_device_capability(dev);
352 /* Root ports are capable of generating PME too */
353 if (port_data->port_type == PCIE_RC_PORT)
354 capabilities |= PCIE_PORT_SERVICE_PME;
355
252 irq_mode = assign_interrupt_mode(dev, vectors, capabilities); 356 irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
253 p_ext->interrupt_mode = irq_mode; 357 if (irq_mode == PCIE_PORT_NO_IRQ) {
358 /*
359 * Don't use service devices that require interrupts if there is
360 * no way to generate them.
361 */
362 if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
363 status = -ENODEV;
364 goto Error;
365 }
366 capabilities = PCIE_PORT_SERVICE_VC;
367 }
368 port_data->port_irq_mode = irq_mode;
369
370 status = pci_enable_device(dev);
371 if (status)
372 goto Error;
373 pci_set_master(dev);
254 374
255 /* Allocate child services if any */ 375 /* Allocate child services if any */
256 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 376 for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
257 struct pcie_device *child; 377 struct pcie_device *child;
378 int service = 1 << i;
379
380 if (!(capabilities & service))
381 continue;
258 382
259 if (capabilities & (1 << i)) { 383 child = alloc_pcie_device(dev, service, vectors[i]);
260 child = alloc_pcie_device( 384 if (!child)
261 dev, /* parent */ 385 continue;
262 type, /* port type */ 386
263 i, /* service type */ 387 status = device_register(&child->device);
264 vectors[i], /* irq */ 388 if (status) {
265 irq_mode /* interrupt mode */); 389 kfree(child);
266 if (child) { 390 continue;
267 status = device_register(&child->device);
268 if (status) {
269 kfree(child);
270 continue;
271 }
272 get_device(&child->device);
273 }
274 } 391 }
392
393 get_device(&child->device);
394 nr_serv++;
395 }
396 if (!nr_serv) {
397 pci_disable_device(dev);
398 status = -ENODEV;
399 goto Error;
275 } 400 }
401
276 return 0; 402 return 0;
403
404 Error:
405 kfree(port_data);
406 return status;
277} 407}
278 408
279#ifdef CONFIG_PM 409#ifdef CONFIG_PM
280static int suspend_iter(struct device *dev, void *data) 410static int suspend_iter(struct device *dev, void *data)
281{ 411{
282 struct pcie_port_service_driver *service_driver; 412 struct pcie_port_service_driver *service_driver;
283 pm_message_t state = * (pm_message_t *) data;
284 413
285 if ((dev->bus == &pcie_port_bus_type) && 414 if ((dev->bus == &pcie_port_bus_type) &&
286 (dev->driver)) { 415 (dev->driver)) {
287 service_driver = to_service_driver(dev->driver); 416 service_driver = to_service_driver(dev->driver);
288 if (service_driver->suspend) 417 if (service_driver->suspend)
289 service_driver->suspend(to_pcie_device(dev), state); 418 service_driver->suspend(to_pcie_device(dev));
290 } 419 }
291 return 0; 420 return 0;
292} 421}
@@ -294,11 +423,10 @@ static int suspend_iter(struct device *dev, void *data)
294/** 423/**
295 * pcie_port_device_suspend - suspend port services associated with a PCIe port 424 * pcie_port_device_suspend - suspend port services associated with a PCIe port
296 * @dev: PCI Express port to handle 425 * @dev: PCI Express port to handle
297 * @state: Representation of system power management transition in progress
298 */ 426 */
299int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) 427int pcie_port_device_suspend(struct device *dev)
300{ 428{
301 return device_for_each_child(&dev->dev, &state, suspend_iter); 429 return device_for_each_child(dev, NULL, suspend_iter);
302} 430}
303 431
304static int resume_iter(struct device *dev, void *data) 432static int resume_iter(struct device *dev, void *data)
@@ -318,24 +446,17 @@ static int resume_iter(struct device *dev, void *data)
318 * pcie_port_device_suspend - resume port services associated with a PCIe port 446 * pcie_port_device_suspend - resume port services associated with a PCIe port
319 * @dev: PCI Express port to handle 447 * @dev: PCI Express port to handle
320 */ 448 */
321int pcie_port_device_resume(struct pci_dev *dev) 449int pcie_port_device_resume(struct device *dev)
322{ 450{
323 return device_for_each_child(&dev->dev, NULL, resume_iter); 451 return device_for_each_child(dev, NULL, resume_iter);
324} 452}
325#endif 453#endif /* PM */
326 454
327static int remove_iter(struct device *dev, void *data) 455static int remove_iter(struct device *dev, void *data)
328{ 456{
329 struct pcie_port_service_driver *service_driver;
330
331 if (dev->bus == &pcie_port_bus_type) { 457 if (dev->bus == &pcie_port_bus_type) {
332 if (dev->driver) { 458 put_device(dev);
333 service_driver = to_service_driver(dev->driver); 459 device_unregister(dev);
334 if (service_driver->remove)
335 service_driver->remove(to_pcie_device(dev));
336 }
337 *(unsigned long*)data = (unsigned long)dev;
338 return 1;
339 } 460 }
340 return 0; 461 return 0;
341} 462}
@@ -349,25 +470,21 @@ static int remove_iter(struct device *dev, void *data)
349 */ 470 */
350void pcie_port_device_remove(struct pci_dev *dev) 471void pcie_port_device_remove(struct pci_dev *dev)
351{ 472{
352 struct device *device; 473 struct pcie_port_data *port_data = pci_get_drvdata(dev);
353 unsigned long device_addr;
354 int interrupt_mode = PCIE_PORT_INTx_MODE;
355 int status;
356 474
357 do { 475 device_for_each_child(&dev->dev, NULL, remove_iter);
358 status = device_for_each_child(&dev->dev, &device_addr, remove_iter); 476 pci_disable_device(dev);
359 if (status) { 477
360 device = (struct device*)device_addr; 478 switch (port_data->port_irq_mode) {
361 interrupt_mode = (to_pcie_device(device))->interrupt_mode; 479 case PCIE_PORT_MSIX_MODE:
362 put_device(device);
363 device_unregister(device);
364 }
365 } while (status);
366 /* Switch to INTx by default if MSI enabled */
367 if (interrupt_mode == PCIE_PORT_MSIX_MODE)
368 pci_disable_msix(dev); 480 pci_disable_msix(dev);
369 else if (interrupt_mode == PCIE_PORT_MSI_MODE) 481 break;
482 case PCIE_PORT_MSI_MODE:
370 pci_disable_msi(dev); 483 pci_disable_msi(dev);
484 break;
485 }
486
487 kfree(port_data);
371} 488}
372 489
373/** 490/**
@@ -392,7 +509,7 @@ static int pcie_port_probe_service(struct device *dev)
392 return -ENODEV; 509 return -ENODEV;
393 510
394 pciedev = to_pcie_device(dev); 511 pciedev = to_pcie_device(dev);
395 status = driver->probe(pciedev, driver->id_table); 512 status = driver->probe(pciedev);
396 if (!status) { 513 if (!status) {
397 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 514 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
398 driver->name); 515 driver->name);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 5ea566e20b37..091ce70051e0 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -32,11 +32,6 @@ MODULE_LICENSE("GPL");
32/* global data */ 32/* global data */
33static const char device_name[] = "pcieport-driver"; 33static const char device_name[] = "pcieport-driver";
34 34
35static int pcie_portdrv_save_config(struct pci_dev *dev)
36{
37 return pci_save_state(dev);
38}
39
40static int pcie_portdrv_restore_config(struct pci_dev *dev) 35static int pcie_portdrv_restore_config(struct pci_dev *dev)
41{ 36{
42 int retval; 37 int retval;
@@ -49,21 +44,21 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
49} 44}
50 45
51#ifdef CONFIG_PM 46#ifdef CONFIG_PM
52static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) 47static struct dev_pm_ops pcie_portdrv_pm_ops = {
53{ 48 .suspend = pcie_port_device_suspend,
54 return pcie_port_device_suspend(dev, state); 49 .resume = pcie_port_device_resume,
50 .freeze = pcie_port_device_suspend,
51 .thaw = pcie_port_device_resume,
52 .poweroff = pcie_port_device_suspend,
53 .restore = pcie_port_device_resume,
54};
55 55
56} 56#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
57 57
58static int pcie_portdrv_resume(struct pci_dev *dev) 58#else /* !PM */
59{ 59
60 pci_set_master(dev); 60#define PCIE_PORTDRV_PM_OPS NULL
61 return pcie_port_device_resume(dev); 61#endif /* !PM */
62}
63#else
64#define pcie_portdrv_suspend NULL
65#define pcie_portdrv_resume NULL
66#endif
67 62
68/* 63/*
69 * pcie_portdrv_probe - Probe PCI-Express port devices 64 * pcie_portdrv_probe - Probe PCI-Express port devices
@@ -82,20 +77,15 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
82 if (status) 77 if (status)
83 return status; 78 return status;
84 79
85 if (pci_enable_device(dev) < 0)
86 return -ENODEV;
87
88 pci_set_master(dev);
89 if (!dev->irq && dev->pin) { 80 if (!dev->irq && dev->pin) {
90 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 81 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
91 "check vendor BIOS\n", dev->vendor, dev->device); 82 "check vendor BIOS\n", dev->vendor, dev->device);
92 } 83 }
93 if (pcie_port_device_register(dev)) { 84 status = pcie_port_device_register(dev);
94 pci_disable_device(dev); 85 if (status)
95 return -ENOMEM; 86 return status;
96 }
97 87
98 pcie_portdrv_save_config(dev); 88 pci_save_state(dev);
99 89
100 return 0; 90 return 0;
101} 91}
@@ -104,7 +94,6 @@ static void pcie_portdrv_remove (struct pci_dev *dev)
104{ 94{
105 pcie_port_device_remove(dev); 95 pcie_port_device_remove(dev);
106 pci_disable_device(dev); 96 pci_disable_device(dev);
107 kfree(pci_get_drvdata(dev));
108} 97}
109 98
110static int error_detected_iter(struct device *device, void *data) 99static int error_detected_iter(struct device *device, void *data)
@@ -211,7 +200,7 @@ static int slot_reset_iter(struct device *device, void *data)
211 200
212static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) 201static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
213{ 202{
214 pci_ers_result_t status = PCI_ERS_RESULT_NONE; 203 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
215 int retval; 204 int retval;
216 205
217 /* If fatal, restore cfg space for possible link reset at upstream */ 206 /* If fatal, restore cfg space for possible link reset at upstream */
@@ -278,10 +267,9 @@ static struct pci_driver pcie_portdriver = {
278 .probe = pcie_portdrv_probe, 267 .probe = pcie_portdrv_probe,
279 .remove = pcie_portdrv_remove, 268 .remove = pcie_portdrv_remove,
280 269
281 .suspend = pcie_portdrv_suspend,
282 .resume = pcie_portdrv_resume,
283
284 .err_handler = &pcie_portdrv_err_handler, 270 .err_handler = &pcie_portdrv_err_handler,
271
272 .driver.pm = PCIE_PORTDRV_PM_OPS,
285}; 273};
286 274
287static int __init pcie_portdrv_init(void) 275static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 55ec44a27e89..40e75f6a5056 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
194 if (type == pci_bar_io) { 194 if (type == pci_bar_io) {
195 l &= PCI_BASE_ADDRESS_IO_MASK; 195 l &= PCI_BASE_ADDRESS_IO_MASK;
196 mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff; 196 mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
197 } else { 197 } else {
198 l &= PCI_BASE_ADDRESS_MEM_MASK; 198 l &= PCI_BASE_ADDRESS_MEM_MASK;
199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
237 dev_printk(KERN_DEBUG, &dev->dev, 237 dev_printk(KERN_DEBUG, &dev->dev,
238 "reg %x 64bit mmio: %pR\n", pos, res); 238 "reg %x 64bit mmio: %pR\n", pos, res);
239 } 239 }
240
241 res->flags |= IORESOURCE_MEM_64;
240 } else { 242 } else {
241 sz = pci_size(l, sz, mask); 243 sz = pci_size(l, sz, mask);
242 244
@@ -287,7 +289,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
287 struct resource *res; 289 struct resource *res;
288 int i; 290 int i;
289 291
290 if (!dev) /* It's a host bus, nothing to read */ 292 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
291 return; 293 return;
292 294
293 if (dev->transparent) { 295 if (dev->transparent) {
@@ -362,7 +364,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
362 } 364 }
363 } 365 }
364 if (base <= limit) { 366 if (base <= limit) {
365 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 367 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
368 IORESOURCE_MEM | IORESOURCE_PREFETCH;
369 if (res->flags & PCI_PREF_RANGE_TYPE_64)
370 res->flags |= IORESOURCE_MEM_64;
366 res->start = base; 371 res->start = base;
367 res->end = limit + 0xfffff; 372 res->end = limit + 0xfffff;
368 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", 373 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
@@ -511,21 +516,21 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
511 516
512 /* 517 /*
513 * If we already got to this bus through a different bridge, 518 * If we already got to this bus through a different bridge,
514 * ignore it. This can happen with the i450NX chipset. 519 * don't re-add it. This can happen with the i450NX chipset.
520 *
521 * However, we continue to descend down the hierarchy and
522 * scan remaining child buses.
515 */ 523 */
516 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 524 child = pci_find_bus(pci_domain_nr(bus), busnr);
517 dev_info(&dev->dev, "bus %04x:%02x already known\n", 525 if (!child) {
518 pci_domain_nr(bus), busnr); 526 child = pci_add_new_bus(bus, dev, busnr);
519 goto out; 527 if (!child)
528 goto out;
529 child->primary = buses & 0xFF;
530 child->subordinate = (buses >> 16) & 0xFF;
531 child->bridge_ctl = bctl;
520 } 532 }
521 533
522 child = pci_add_new_bus(bus, dev, busnr);
523 if (!child)
524 goto out;
525 child->primary = buses & 0xFF;
526 child->subordinate = (buses >> 16) & 0xFF;
527 child->bridge_ctl = bctl;
528
529 cmax = pci_scan_child_bus(child); 534 cmax = pci_scan_child_bus(child);
530 if (cmax > max) 535 if (cmax > max)
531 max = cmax; 536 max = cmax;
@@ -674,6 +679,19 @@ static void pci_read_irq(struct pci_dev *dev)
674 dev->irq = irq; 679 dev->irq = irq;
675} 680}
676 681
682static void set_pcie_port_type(struct pci_dev *pdev)
683{
684 int pos;
685 u16 reg16;
686
687 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
688 if (!pos)
689 return;
690 pdev->is_pcie = 1;
691 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
692 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
693}
694
677#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 695#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
678 696
679/** 697/**
@@ -683,12 +701,33 @@ static void pci_read_irq(struct pci_dev *dev)
683 * Initialize the device structure with information about the device's 701 * Initialize the device structure with information about the device's
684 * vendor,class,memory and IO-space addresses,IRQ lines etc. 702 * vendor,class,memory and IO-space addresses,IRQ lines etc.
685 * Called at initialisation of the PCI subsystem and by CardBus services. 703 * Called at initialisation of the PCI subsystem and by CardBus services.
686 * Returns 0 on success and -1 if unknown type of device (not normal, bridge 704 * Returns 0 on success and negative if unknown type of device (not normal,
687 * or CardBus). 705 * bridge or CardBus).
688 */ 706 */
689static int pci_setup_device(struct pci_dev * dev) 707int pci_setup_device(struct pci_dev *dev)
690{ 708{
691 u32 class; 709 u32 class;
710 u8 hdr_type;
711 struct pci_slot *slot;
712
713 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
714 return -EIO;
715
716 dev->sysdata = dev->bus->sysdata;
717 dev->dev.parent = dev->bus->bridge;
718 dev->dev.bus = &pci_bus_type;
719 dev->hdr_type = hdr_type & 0x7f;
720 dev->multifunction = !!(hdr_type & 0x80);
721 dev->error_state = pci_channel_io_normal;
722 set_pcie_port_type(dev);
723
724 list_for_each_entry(slot, &dev->bus->slots, list)
725 if (PCI_SLOT(dev->devfn) == slot->number)
726 dev->slot = slot;
727
728 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
729 set this higher, assuming the system even supports it. */
730 dev->dma_mask = 0xffffffff;
692 731
693 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 732 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
694 dev->bus->number, PCI_SLOT(dev->devfn), 733 dev->bus->number, PCI_SLOT(dev->devfn),
@@ -703,11 +742,15 @@ static int pci_setup_device(struct pci_dev * dev)
703 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", 742 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
704 dev->vendor, dev->device, class, dev->hdr_type); 743 dev->vendor, dev->device, class, dev->hdr_type);
705 744
745 /* need to have dev->class ready */
746 dev->cfg_size = pci_cfg_space_size(dev);
747
706 /* "Unknown power state" */ 748 /* "Unknown power state" */
707 dev->current_state = PCI_UNKNOWN; 749 dev->current_state = PCI_UNKNOWN;
708 750
709 /* Early fixups, before probing the BARs */ 751 /* Early fixups, before probing the BARs */
710 pci_fixup_device(pci_fixup_early, dev); 752 pci_fixup_device(pci_fixup_early, dev);
753 /* device class may be changed after fixup */
711 class = dev->class >> 8; 754 class = dev->class >> 8;
712 755
713 switch (dev->hdr_type) { /* header type */ 756 switch (dev->hdr_type) { /* header type */
@@ -770,7 +813,7 @@ static int pci_setup_device(struct pci_dev * dev)
770 default: /* unknown header */ 813 default: /* unknown header */
771 dev_err(&dev->dev, "unknown header type %02x, " 814 dev_err(&dev->dev, "unknown header type %02x, "
772 "ignoring device\n", dev->hdr_type); 815 "ignoring device\n", dev->hdr_type);
773 return -1; 816 return -EIO;
774 817
775 bad: 818 bad:
776 dev_err(&dev->dev, "ignoring class %02x (doesn't match header " 819 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
@@ -785,6 +828,7 @@ static int pci_setup_device(struct pci_dev * dev)
785static void pci_release_capabilities(struct pci_dev *dev) 828static void pci_release_capabilities(struct pci_dev *dev)
786{ 829{
787 pci_vpd_release(dev); 830 pci_vpd_release(dev);
831 pci_iov_release(dev);
788} 832}
789 833
790/** 834/**
@@ -803,19 +847,6 @@ static void pci_release_dev(struct device *dev)
803 kfree(pci_dev); 847 kfree(pci_dev);
804} 848}
805 849
806static void set_pcie_port_type(struct pci_dev *pdev)
807{
808 int pos;
809 u16 reg16;
810
811 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
812 if (!pos)
813 return;
814 pdev->is_pcie = 1;
815 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
816 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
817}
818
819/** 850/**
820 * pci_cfg_space_size - get the configuration space size of the PCI device. 851 * pci_cfg_space_size - get the configuration space size of the PCI device.
821 * @dev: PCI device 852 * @dev: PCI device
@@ -847,6 +878,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
847{ 878{
848 int pos; 879 int pos;
849 u32 status; 880 u32 status;
881 u16 class;
882
883 class = dev->class >> 8;
884 if (class == PCI_CLASS_BRIDGE_HOST)
885 return pci_cfg_space_size_ext(dev);
850 886
851 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 887 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
852 if (!pos) { 888 if (!pos) {
@@ -891,9 +927,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
891static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 927static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
892{ 928{
893 struct pci_dev *dev; 929 struct pci_dev *dev;
894 struct pci_slot *slot;
895 u32 l; 930 u32 l;
896 u8 hdr_type;
897 int delay = 1; 931 int delay = 1;
898 932
899 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 933 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
@@ -920,34 +954,16 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
920 } 954 }
921 } 955 }
922 956
923 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
924 return NULL;
925
926 dev = alloc_pci_dev(); 957 dev = alloc_pci_dev();
927 if (!dev) 958 if (!dev)
928 return NULL; 959 return NULL;
929 960
930 dev->bus = bus; 961 dev->bus = bus;
931 dev->sysdata = bus->sysdata;
932 dev->dev.parent = bus->bridge;
933 dev->dev.bus = &pci_bus_type;
934 dev->devfn = devfn; 962 dev->devfn = devfn;
935 dev->hdr_type = hdr_type & 0x7f;
936 dev->multifunction = !!(hdr_type & 0x80);
937 dev->vendor = l & 0xffff; 963 dev->vendor = l & 0xffff;
938 dev->device = (l >> 16) & 0xffff; 964 dev->device = (l >> 16) & 0xffff;
939 dev->cfg_size = pci_cfg_space_size(dev);
940 dev->error_state = pci_channel_io_normal;
941 set_pcie_port_type(dev);
942
943 list_for_each_entry(slot, &bus->slots, list)
944 if (PCI_SLOT(devfn) == slot->number)
945 dev->slot = slot;
946 965
947 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 966 if (pci_setup_device(dev)) {
948 set this higher, assuming the system even supports it. */
949 dev->dma_mask = 0xffffffff;
950 if (pci_setup_device(dev) < 0) {
951 kfree(dev); 967 kfree(dev);
952 return NULL; 968 return NULL;
953 } 969 }
@@ -972,6 +988,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
972 988
973 /* Alternative Routing-ID Forwarding */ 989 /* Alternative Routing-ID Forwarding */
974 pci_enable_ari(dev); 990 pci_enable_ari(dev);
991
992 /* Single Root I/O Virtualization */
993 pci_iov_init(dev);
975} 994}
976 995
977void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 996void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1006,6 +1025,12 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1006{ 1025{
1007 struct pci_dev *dev; 1026 struct pci_dev *dev;
1008 1027
1028 dev = pci_get_slot(bus, devfn);
1029 if (dev) {
1030 pci_dev_put(dev);
1031 return dev;
1032 }
1033
1009 dev = pci_scan_device(bus, devfn); 1034 dev = pci_scan_device(bus, devfn);
1010 if (!dev) 1035 if (!dev)
1011 return NULL; 1036 return NULL;
@@ -1024,35 +1049,27 @@ EXPORT_SYMBOL(pci_scan_single_device);
1024 * Scan a PCI slot on the specified PCI bus for devices, adding 1049 * Scan a PCI slot on the specified PCI bus for devices, adding
1025 * discovered devices to the @bus->devices list. New devices 1050 * discovered devices to the @bus->devices list. New devices
1026 * will not have is_added set. 1051 * will not have is_added set.
1052 *
1053 * Returns the number of new devices found.
1027 */ 1054 */
1028int pci_scan_slot(struct pci_bus *bus, int devfn) 1055int pci_scan_slot(struct pci_bus *bus, int devfn)
1029{ 1056{
1030 int func, nr = 0; 1057 int fn, nr = 0;
1031 int scan_all_fns; 1058 struct pci_dev *dev;
1032
1033 scan_all_fns = pcibios_scan_all_fns(bus, devfn);
1034
1035 for (func = 0; func < 8; func++, devfn++) {
1036 struct pci_dev *dev;
1037
1038 dev = pci_scan_single_device(bus, devfn);
1039 if (dev) {
1040 nr++;
1041 1059
1042 /* 1060 dev = pci_scan_single_device(bus, devfn);
1043 * If this is a single function device, 1061 if (dev && !dev->is_added) /* new device? */
1044 * don't scan past the first function. 1062 nr++;
1045 */ 1063
1046 if (!dev->multifunction) { 1064 if ((dev && dev->multifunction) ||
1047 if (func > 0) { 1065 (!dev && pcibios_scan_all_fns(bus, devfn))) {
1048 dev->multifunction = 1; 1066 for (fn = 1; fn < 8; fn++) {
1049 } else { 1067 dev = pci_scan_single_device(bus, devfn + fn);
1050 break; 1068 if (dev) {
1051 } 1069 if (!dev->is_added)
1070 nr++;
1071 dev->multifunction = 1;
1052 } 1072 }
1053 } else {
1054 if (func == 0 && !scan_all_fns)
1055 break;
1056 } 1073 }
1057 } 1074 }
1058 1075
@@ -1074,12 +1091,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1074 for (devfn = 0; devfn < 0x100; devfn += 8) 1091 for (devfn = 0; devfn < 0x100; devfn += 8)
1075 pci_scan_slot(bus, devfn); 1092 pci_scan_slot(bus, devfn);
1076 1093
1094 /* Reserve buses for SR-IOV capability. */
1095 max += pci_iov_bus_range(bus);
1096
1077 /* 1097 /*
1078 * After performing arch-dependent fixup of the bus, look behind 1098 * After performing arch-dependent fixup of the bus, look behind
1079 * all PCI-to-PCI bridges on this bus. 1099 * all PCI-to-PCI bridges on this bus.
1080 */ 1100 */
1081 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1101 if (!bus->is_added) {
1082 pcibios_fixup_bus(bus); 1102 pr_debug("PCI: Fixups for bus %04x:%02x\n",
1103 pci_domain_nr(bus), bus->number);
1104 pcibios_fixup_bus(bus);
1105 if (pci_is_root_bus(bus))
1106 bus->is_added = 1;
1107 }
1108
1083 for (pass=0; pass < 2; pass++) 1109 for (pass=0; pass < 2; pass++)
1084 list_for_each_entry(dev, &bus->devices, bus_list) { 1110 list_for_each_entry(dev, &bus->devices, bus_list) {
1085 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1111 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
@@ -1099,10 +1125,6 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1099 return max; 1125 return max;
1100} 1126}
1101 1127
1102void __attribute__((weak)) set_pci_bus_resources_arch_default(struct pci_bus *b)
1103{
1104}
1105
1106struct pci_bus * pci_create_bus(struct device *parent, 1128struct pci_bus * pci_create_bus(struct device *parent,
1107 int bus, struct pci_ops *ops, void *sysdata) 1129 int bus, struct pci_ops *ops, void *sysdata)
1108{ 1130{
@@ -1114,7 +1136,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
1114 if (!b) 1136 if (!b)
1115 return NULL; 1137 return NULL;
1116 1138
1117 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1139 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1118 if (!dev){ 1140 if (!dev){
1119 kfree(b); 1141 kfree(b);
1120 return NULL; 1142 return NULL;
@@ -1133,7 +1155,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
1133 list_add_tail(&b->node, &pci_root_buses); 1155 list_add_tail(&b->node, &pci_root_buses);
1134 up_write(&pci_bus_sem); 1156 up_write(&pci_bus_sem);
1135 1157
1136 memset(dev, 0, sizeof(*dev));
1137 dev->parent = parent; 1158 dev->parent = parent;
1138 dev->release = pci_release_bus_bridge_dev; 1159 dev->release = pci_release_bus_bridge_dev;
1139 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1160 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
@@ -1162,8 +1183,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
1162 b->resource[0] = &ioport_resource; 1183 b->resource[0] = &ioport_resource;
1163 b->resource[1] = &iomem_resource; 1184 b->resource[1] = &iomem_resource;
1164 1185
1165 set_pci_bus_resources_arch_default(b);
1166
1167 return b; 1186 return b;
1168 1187
1169dev_create_file_err: 1188dev_create_file_err:
@@ -1193,6 +1212,38 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1193EXPORT_SYMBOL(pci_scan_bus_parented); 1212EXPORT_SYMBOL(pci_scan_bus_parented);
1194 1213
1195#ifdef CONFIG_HOTPLUG 1214#ifdef CONFIG_HOTPLUG
1215/**
1216 * pci_rescan_bus - scan a PCI bus for devices.
1217 * @bus: PCI bus to scan
1218 *
1219 * Scan a PCI bus and child buses for new devices, adds them,
1220 * and enables them.
1221 *
1222 * Returns the max number of subordinate bus discovered.
1223 */
1224unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1225{
1226 unsigned int max;
1227 struct pci_dev *dev;
1228
1229 max = pci_scan_child_bus(bus);
1230
1231 down_read(&pci_bus_sem);
1232 list_for_each_entry(dev, &bus->devices, bus_list)
1233 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1234 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1235 if (dev->subordinate)
1236 pci_bus_size_bridges(dev->subordinate);
1237 up_read(&pci_bus_sem);
1238
1239 pci_bus_assign_resources(bus);
1240 pci_enable_bridges(bus);
1241 pci_bus_add_devices(bus);
1242
1243 return max;
1244}
1245EXPORT_SYMBOL_GPL(pci_rescan_bus);
1246
1196EXPORT_SYMBOL(pci_add_new_bus); 1247EXPORT_SYMBOL(pci_add_new_bus);
1197EXPORT_SYMBOL(pci_scan_slot); 1248EXPORT_SYMBOL(pci_scan_slot);
1198EXPORT_SYMBOL(pci_scan_bridge); 1249EXPORT_SYMBOL(pci_scan_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 92b9efe9bcaf..56552d74abea 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/pci-aspm.h> 26#include <linux/pci-aspm.h>
27#include <linux/ioport.h>
27#include "pci.h" 28#include "pci.h"
28 29
29int isa_dma_bridge_buggy; 30int isa_dma_bridge_buggy;
@@ -34,6 +35,69 @@ int pcie_mch_quirk;
34EXPORT_SYMBOL(pcie_mch_quirk); 35EXPORT_SYMBOL(pcie_mch_quirk);
35 36
36#ifdef CONFIG_PCI_QUIRKS 37#ifdef CONFIG_PCI_QUIRKS
38/*
39 * This quirk function disables memory decoding and releases memory resources
40 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
41 * It also rounds up size to specified alignment.
42 * Later on, the kernel will assign page-aligned memory resource back
43 * to the device.
44 */
45static void __devinit quirk_resource_alignment(struct pci_dev *dev)
46{
47 int i;
48 struct resource *r;
49 resource_size_t align, size;
50 u16 command;
51
52 if (!pci_is_reassigndev(dev))
53 return;
54
55 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
56 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
57 dev_warn(&dev->dev,
58 "Can't reassign resources to host bridge.\n");
59 return;
60 }
61
62 dev_info(&dev->dev,
63 "Disabling memory decoding and releasing memory resources.\n");
64 pci_read_config_word(dev, PCI_COMMAND, &command);
65 command &= ~PCI_COMMAND_MEMORY;
66 pci_write_config_word(dev, PCI_COMMAND, command);
67
68 align = pci_specified_resource_alignment(dev);
69 for (i=0; i < PCI_BRIDGE_RESOURCES; i++) {
70 r = &dev->resource[i];
71 if (!(r->flags & IORESOURCE_MEM))
72 continue;
73 size = resource_size(r);
74 if (size < align) {
75 size = align;
76 dev_info(&dev->dev,
77 "Rounding up size of resource #%d to %#llx.\n",
78 i, (unsigned long long)size);
79 }
80 r->end = size - 1;
81 r->start = 0;
82 }
83 /* Need to disable bridge's resource window,
84 * to enable the kernel to reassign new resource
85 * window later on.
86 */
87 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
88 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
89 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
90 r = &dev->resource[i];
91 if (!(r->flags & IORESOURCE_MEM))
92 continue;
93 r->end = resource_size(r) - 1;
94 r->start = 0;
95 }
96 pci_disable_bridge_window(dev);
97 }
98}
99DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
100
37/* The Mellanox Tavor device gives false positive parity errors 101/* The Mellanox Tavor device gives false positive parity errors
38 * Mark this device with a broken_parity_status, to allow 102 * Mark this device with a broken_parity_status, to allow
39 * PCI scanning code to "skip" this now blacklisted device. 103 * PCI scanning code to "skip" this now blacklisted device.
@@ -1069,6 +1133,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1069 switch (dev->subsystem_device) { 1133 switch (dev->subsystem_device) {
1070 case 0x1751: /* M2N notebook */ 1134 case 0x1751: /* M2N notebook */
1071 case 0x1821: /* M5N notebook */ 1135 case 0x1821: /* M5N notebook */
1136 case 0x1897: /* A6L notebook */
1072 asus_hides_smbus = 1; 1137 asus_hides_smbus = 1;
1073 } 1138 }
1074 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1139 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1099,6 +1164,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1099 switch (dev->subsystem_device) { 1164 switch (dev->subsystem_device) {
1100 case 0x12bc: /* HP D330L */ 1165 case 0x12bc: /* HP D330L */
1101 case 0x12bd: /* HP D530 */ 1166 case 0x12bd: /* HP D530 */
1167 case 0x006a: /* HP Compaq nx9500 */
1102 asus_hides_smbus = 1; 1168 asus_hides_smbus = 1;
1103 } 1169 }
1104 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB) 1170 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -1126,10 +1192,15 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1126 * its on-board VGA controller */ 1192 * its on-board VGA controller */
1127 asus_hides_smbus = 1; 1193 asus_hides_smbus = 1;
1128 } 1194 }
1129 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG) 1195 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1130 switch(dev->subsystem_device) { 1196 switch(dev->subsystem_device) {
1131 case 0x00b8: /* Compaq Evo D510 CMT */ 1197 case 0x00b8: /* Compaq Evo D510 CMT */
1132 case 0x00b9: /* Compaq Evo D510 SFF */ 1198 case 0x00b9: /* Compaq Evo D510 SFF */
1199 /* Motherboard doesn't have Host bridge
1200 * subvendor/subdevice IDs and on-board VGA
1201 * controller is disabled if an AGP card is
1202 * inserted, therefore checking USB UHCI
1203 * Controller #1 */
1133 asus_hides_smbus = 1; 1204 asus_hides_smbus = 1;
1134 } 1205 }
1135 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) 1206 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
@@ -1154,7 +1225,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
1154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); 1225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1155 1226
1156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); 1227DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge); 1228DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); 1229DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1159 1230
1160static void asus_hides_smbus_lpc(struct pci_dev *dev) 1231static void asus_hides_smbus_lpc(struct pci_dev *dev)
@@ -1664,9 +1735,13 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1664 * of parallel ports and <S> is the number of serial ports. 1735 * of parallel ports and <S> is the number of serial ports.
1665 */ 1736 */
1666 switch (dev->device) { 1737 switch (dev->device) {
1738 case PCI_DEVICE_ID_NETMOS_9835:
1739 /* Well, this rule doesn't hold for the following 9835 device */
1740 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
1741 dev->subsystem_device == 0x0299)
1742 return;
1667 case PCI_DEVICE_ID_NETMOS_9735: 1743 case PCI_DEVICE_ID_NETMOS_9735:
1668 case PCI_DEVICE_ID_NETMOS_9745: 1744 case PCI_DEVICE_ID_NETMOS_9745:
1669 case PCI_DEVICE_ID_NETMOS_9835:
1670 case PCI_DEVICE_ID_NETMOS_9845: 1745 case PCI_DEVICE_ID_NETMOS_9845:
1671 case PCI_DEVICE_ID_NETMOS_9855: 1746 case PCI_DEVICE_ID_NETMOS_9855:
1672 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && 1747 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
@@ -1943,6 +2018,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1943 PCI_DEVICE_ID_NX2_5709S, 2018 PCI_DEVICE_ID_NX2_5709S,
1944 quirk_brcm_570x_limit_vpd); 2019 quirk_brcm_570x_limit_vpd);
1945 2020
2021/* Originally in EDAC sources for i82875P:
2022 * Intel tells BIOS developers to hide device 6 which
2023 * configures the overflow device access containing
2024 * the DRBs - this is where we expose device 6.
2025 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
2026 */
2027static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
2028{
2029 u8 reg;
2030
2031 if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
2032 dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
2033 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2034 }
2035}
2036
2037DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2038 quirk_unhide_mch_dev6);
2039DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2040 quirk_unhide_mch_dev6);
2041
2042
1946#ifdef CONFIG_PCI_MSI 2043#ifdef CONFIG_PCI_MSI
1947/* Some chipsets do not support MSI. We cannot easily rely on setting 2044/* Some chipsets do not support MSI. We cannot easily rely on setting
1948 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 2045 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
@@ -1960,6 +2057,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_di
1960DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); 2057DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
1961DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); 2058DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
1962DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); 2059DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2060DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
1963 2061
1964/* Disable MSI on chipsets that are known to not support it */ 2062/* Disable MSI on chipsets that are known to not support it */
1965static void __devinit quirk_disable_msi(struct pci_dev *dev) 2063static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -2078,6 +2176,92 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2078 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2176 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2079 nvenet_msi_disable); 2177 nvenet_msi_disable);
2080 2178
2179static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2180{
2181 int pos, ttl = 48;
2182 int found = 0;
2183
2184 /* check if there is HT MSI cap or enabled on this device */
2185 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2186 while (pos && ttl--) {
2187 u8 flags;
2188
2189 if (found < 1)
2190 found = 1;
2191 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2192 &flags) == 0) {
2193 if (flags & HT_MSI_FLAGS_ENABLE) {
2194 if (found < 2) {
2195 found = 2;
2196 break;
2197 }
2198 }
2199 }
2200 pos = pci_find_next_ht_capability(dev, pos,
2201 HT_CAPTYPE_MSI_MAPPING);
2202 }
2203
2204 return found;
2205}
2206
2207static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
2208{
2209 struct pci_dev *dev;
2210 int pos;
2211 int i, dev_no;
2212 int found = 0;
2213
2214 dev_no = host_bridge->devfn >> 3;
2215 for (i = dev_no + 1; i < 0x20; i++) {
2216 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2217 if (!dev)
2218 continue;
2219
2220 /* found next host bridge ?*/
2221 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2222 if (pos != 0) {
2223 pci_dev_put(dev);
2224 break;
2225 }
2226
2227 if (ht_check_msi_mapping(dev)) {
2228 found = 1;
2229 pci_dev_put(dev);
2230 break;
2231 }
2232 pci_dev_put(dev);
2233 }
2234
2235 return found;
2236}
2237
2238#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
2239#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
2240
2241static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
2242{
2243 int pos, ctrl_off;
2244 int end = 0;
2245 u16 flags, ctrl;
2246
2247 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2248
2249 if (!pos)
2250 goto out;
2251
2252 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2253
2254 ctrl_off = ((flags >> 10) & 1) ?
2255 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2256 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2257
2258 if (ctrl & (1 << 6))
2259 end = 1;
2260
2261out:
2262 return end;
2263}
2264
2081static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev) 2265static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2082{ 2266{
2083 struct pci_dev *host_bridge; 2267 struct pci_dev *host_bridge;
@@ -2102,6 +2286,11 @@ static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2102 if (!found) 2286 if (!found)
2103 return; 2287 return;
2104 2288
2289 /* don't enable end_device/host_bridge with leaf directly here */
2290 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2291 host_bridge_with_leaf(host_bridge))
2292 goto out;
2293
2105 /* root did that ! */ 2294 /* root did that ! */
2106 if (msi_ht_cap_enabled(host_bridge)) 2295 if (msi_ht_cap_enabled(host_bridge))
2107 goto out; 2296 goto out;
@@ -2132,44 +2321,12 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2132 } 2321 }
2133} 2322}
2134 2323
2135static int __devinit ht_check_msi_mapping(struct pci_dev *dev) 2324static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2136{
2137 int pos, ttl = 48;
2138 int found = 0;
2139
2140 /* check if there is HT MSI cap or enabled on this device */
2141 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2142 while (pos && ttl--) {
2143 u8 flags;
2144
2145 if (found < 1)
2146 found = 1;
2147 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2148 &flags) == 0) {
2149 if (flags & HT_MSI_FLAGS_ENABLE) {
2150 if (found < 2) {
2151 found = 2;
2152 break;
2153 }
2154 }
2155 }
2156 pos = pci_find_next_ht_capability(dev, pos,
2157 HT_CAPTYPE_MSI_MAPPING);
2158 }
2159
2160 return found;
2161}
2162
2163static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2164{ 2325{
2165 struct pci_dev *host_bridge; 2326 struct pci_dev *host_bridge;
2166 int pos; 2327 int pos;
2167 int found; 2328 int found;
2168 2329
2169 /* Enabling HT MSI mapping on this device breaks MCP51 */
2170 if (dev->device == 0x270)
2171 return;
2172
2173 /* check if there is HT MSI cap or enabled on this device */ 2330 /* check if there is HT MSI cap or enabled on this device */
2174 found = ht_check_msi_mapping(dev); 2331 found = ht_check_msi_mapping(dev);
2175 2332
@@ -2193,7 +2350,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2193 /* Host bridge is to HT */ 2350 /* Host bridge is to HT */
2194 if (found == 1) { 2351 if (found == 1) {
2195 /* it is not enabled, try to enable it */ 2352 /* it is not enabled, try to enable it */
2196 nv_ht_enable_msi_mapping(dev); 2353 if (all)
2354 ht_enable_msi_mapping(dev);
2355 else
2356 nv_ht_enable_msi_mapping(dev);
2197 } 2357 }
2198 return; 2358 return;
2199 } 2359 }
@@ -2205,8 +2365,20 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2205 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2365 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2206 ht_disable_msi_mapping(dev); 2366 ht_disable_msi_mapping(dev);
2207} 2367}
2208DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2368
2209DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2369static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2370{
2371 return __nv_msi_ht_cap_quirk(dev, 1);
2372}
2373
2374static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2375{
2376 return __nv_msi_ht_cap_quirk(dev, 0);
2377}
2378
2379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2380
2381DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2210 2382
2211static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 2383static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
2212{ 2384{
@@ -2268,6 +2440,56 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2268 2440
2269#endif /* CONFIG_PCI_MSI */ 2441#endif /* CONFIG_PCI_MSI */
2270 2442
2443#ifdef CONFIG_PCI_IOV
2444
2445/*
2446 * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the
2447 * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the
2448 * old Flash Memory Space.
2449 */
2450static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
2451{
2452 int pos, flags;
2453 u32 bar, start, size;
2454
2455 if (PAGE_SIZE > 0x10000)
2456 return;
2457
2458 flags = pci_resource_flags(dev, 0);
2459 if ((flags & PCI_BASE_ADDRESS_SPACE) !=
2460 PCI_BASE_ADDRESS_SPACE_MEMORY ||
2461 (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) !=
2462 PCI_BASE_ADDRESS_MEM_TYPE_32)
2463 return;
2464
2465 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
2466 if (!pos)
2467 return;
2468
2469 pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar);
2470 if (bar & PCI_BASE_ADDRESS_MEM_MASK)
2471 return;
2472
2473 start = pci_resource_start(dev, 1);
2474 size = pci_resource_len(dev, 1);
2475 if (!start || size != 0x400000 || start & (size - 1))
2476 return;
2477
2478 pci_resource_flags(dev, 1) = 0;
2479 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
2480 pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start);
2481 pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2);
2482
2483 dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n");
2484}
2485DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
2486DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
2487DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
2488DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
2489DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
2490
2491#endif /* CONFIG_PCI_IOV */
2492
2271static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2493static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
2272 struct pci_fixup *end) 2494 struct pci_fixup *end)
2273{ 2495{
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 042e08924421..176615e7231f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@ static void pci_stop_dev(struct pci_dev *dev)
32 32
33static void pci_destroy_dev(struct pci_dev *dev) 33static void pci_destroy_dev(struct pci_dev *dev)
34{ 34{
35 pci_stop_dev(dev);
36
37 /* Remove the device from the device lists, and prevent any further 35 /* Remove the device from the device lists, and prevent any further
38 * list accesses from this device */ 36 * list accesses from this device */
39 down_write(&pci_bus_sem); 37 down_write(&pci_bus_sem);
@@ -71,6 +69,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
71 down_write(&pci_bus_sem); 69 down_write(&pci_bus_sem);
72 list_del(&pci_bus->node); 70 list_del(&pci_bus->node);
73 up_write(&pci_bus_sem); 71 up_write(&pci_bus_sem);
72 if (!pci_bus->is_added)
73 return;
74
74 pci_remove_legacy_files(pci_bus); 75 pci_remove_legacy_files(pci_bus);
75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); 76 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
76 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); 77 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
@@ -92,6 +93,7 @@ EXPORT_SYMBOL(pci_remove_bus);
92 */ 93 */
93void pci_remove_bus_device(struct pci_dev *dev) 94void pci_remove_bus_device(struct pci_dev *dev)
94{ 95{
96 pci_stop_bus_device(dev);
95 if (dev->subordinate) { 97 if (dev->subordinate) {
96 struct pci_bus *b = dev->subordinate; 98 struct pci_bus *b = dev->subordinate;
97 99
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 5af8bd538149..e8cb5051c311 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
29 if (pdev->is_pcie) 29 if (pdev->is_pcie)
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (!pdev->bus->self) 32 if (pci_is_root_bus(pdev->bus))
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
@@ -115,36 +115,6 @@ pci_find_next_bus(const struct pci_bus *from)
115 115
116#ifdef CONFIG_PCI_LEGACY 116#ifdef CONFIG_PCI_LEGACY
117/** 117/**
118 * pci_find_slot - locate PCI device from a given PCI slot
119 * @bus: number of PCI bus on which desired PCI device resides
120 * @devfn: encodes number of PCI slot in which the desired PCI
121 * device resides and the logical device number within that slot
122 * in case of multi-function devices.
123 *
124 * Given a PCI bus and slot/function number, the desired PCI device
125 * is located in system global list of PCI devices. If the device
126 * is found, a pointer to its data structure is returned. If no
127 * device is found, %NULL is returned.
128 *
129 * NOTE: Do not use this function any more; use pci_get_slot() instead, as
130 * the PCI device returned by this function can disappear at any moment in
131 * time.
132 */
133struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
134{
135 struct pci_dev *dev = NULL;
136
137 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
138 if (dev->bus->number == bus && dev->devfn == devfn) {
139 pci_dev_put(dev);
140 return dev;
141 }
142 }
143 return NULL;
144}
145EXPORT_SYMBOL(pci_find_slot);
146
147/**
148 * pci_find_device - begin or continue searching for a PCI device by vendor/device id 118 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
149 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids 119 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
150 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids 120 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 704608945780..b636e245445d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,7 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29 29
30static void pbus_assign_resources_sorted(struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
32 struct pci_dev *dev; 32 struct pci_dev *dev;
33 struct resource *res; 33 struct resource *res;
@@ -58,7 +58,6 @@ static void pbus_assign_resources_sorted(struct pci_bus *bus)
58 res = list->res; 58 res = list->res;
59 idx = res - &list->dev->resource[0]; 59 idx = res - &list->dev->resource[0];
60 if (pci_assign_resource(list->dev, idx)) { 60 if (pci_assign_resource(list->dev, idx)) {
61 /* FIXME: get rid of this */
62 res->start = 0; 61 res->start = 0;
63 res->end = 0; 62 res->end = 0;
64 res->flags = 0; 63 res->flags = 0;
@@ -143,6 +142,10 @@ static void pci_setup_bridge(struct pci_bus *bus)
143 struct pci_dev *bridge = bus->self; 142 struct pci_dev *bridge = bus->self;
144 struct pci_bus_region region; 143 struct pci_bus_region region;
145 u32 l, bu, lu, io_upper16; 144 u32 l, bu, lu, io_upper16;
145 int pref_mem64;
146
147 if (pci_is_enabled(bridge))
148 return;
146 149
147 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", 150 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
148 pci_domain_nr(bus), bus->number); 151 pci_domain_nr(bus), bus->number);
@@ -195,16 +198,22 @@ static void pci_setup_bridge(struct pci_bus *bus)
195 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); 198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
196 199
197 /* Set up PREF base/limit. */ 200 /* Set up PREF base/limit. */
201 pref_mem64 = 0;
198 bu = lu = 0; 202 bu = lu = 0;
199 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 203 pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
200 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { 204 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
205 int width = 8;
201 l = (region.start >> 16) & 0xfff0; 206 l = (region.start >> 16) & 0xfff0;
202 l |= region.end & 0xfff00000; 207 l |= region.end & 0xfff00000;
203 bu = upper_32_bits(region.start); 208 if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
204 lu = upper_32_bits(region.end); 209 pref_mem64 = 1;
205 dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n", 210 bu = upper_32_bits(region.start);
206 (unsigned long long)region.start, 211 lu = upper_32_bits(region.end);
207 (unsigned long long)region.end); 212 width = 16;
213 }
214 dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
215 width, (unsigned long long)region.start,
216 width, (unsigned long long)region.end);
208 } 217 }
209 else { 218 else {
210 l = 0x0000fff0; 219 l = 0x0000fff0;
@@ -212,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
212 } 221 }
213 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 222 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
214 223
215 /* Set the upper 32 bits of PREF base & limit. */ 224 if (pref_mem64) {
216 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); 225 /* Set the upper 32 bits of PREF base & limit. */
217 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); 226 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
227 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
228 }
218 229
219 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 230 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
220} 231}
@@ -252,8 +263,25 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
252 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 263 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
253 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); 264 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
254 } 265 }
255 if (pmem) 266 if (pmem) {
256 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 267 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
268 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
269 b_res[2].flags |= IORESOURCE_MEM_64;
270 }
271
272 /* double check if bridge does support 64 bit pref */
273 if (b_res[2].flags & IORESOURCE_MEM_64) {
274 u32 mem_base_hi, tmp;
275 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
276 &mem_base_hi);
277 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
278 0xffffffff);
279 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
280 if (!tmp)
281 b_res[2].flags &= ~IORESOURCE_MEM_64;
282 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
283 mem_base_hi);
284 }
257} 285}
258 286
259/* Helper function for sizing routines: find first available 287/* Helper function for sizing routines: find first available
@@ -333,6 +361,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
333 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ 361 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
334 int order, max_order; 362 int order, max_order;
335 struct resource *b_res = find_free_bus_resource(bus, type); 363 struct resource *b_res = find_free_bus_resource(bus, type);
364 unsigned int mem64_mask = 0;
336 365
337 if (!b_res) 366 if (!b_res)
338 return 0; 367 return 0;
@@ -341,9 +370,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
341 max_order = 0; 370 max_order = 0;
342 size = 0; 371 size = 0;
343 372
373 mem64_mask = b_res->flags & IORESOURCE_MEM_64;
374 b_res->flags &= ~IORESOURCE_MEM_64;
375
344 list_for_each_entry(dev, &bus->devices, bus_list) { 376 list_for_each_entry(dev, &bus->devices, bus_list) {
345 int i; 377 int i;
346 378
347 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 379 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
348 struct resource *r = &dev->resource[i]; 380 struct resource *r = &dev->resource[i];
349 resource_size_t r_size; 381 resource_size_t r_size;
@@ -369,6 +401,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
369 aligns[order] += align; 401 aligns[order] += align;
370 if (order > max_order) 402 if (order > max_order)
371 max_order = order; 403 max_order = order;
404 mem64_mask &= r->flags & IORESOURCE_MEM_64;
372 } 405 }
373 } 406 }
374 407
@@ -393,6 +426,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
393 b_res->start = min_align; 426 b_res->start = min_align;
394 b_res->end = size + min_align - 1; 427 b_res->end = size + min_align - 1;
395 b_res->flags |= IORESOURCE_STARTALIGN; 428 b_res->flags |= IORESOURCE_STARTALIGN;
429 b_res->flags |= mem64_mask;
396 return 1; 430 return 1;
397} 431}
398 432
@@ -495,7 +529,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
495} 529}
496EXPORT_SYMBOL(pci_bus_size_bridges); 530EXPORT_SYMBOL(pci_bus_size_bridges);
497 531
498void __ref pci_bus_assign_resources(struct pci_bus *bus) 532void __ref pci_bus_assign_resources(const struct pci_bus *bus)
499{ 533{
500 struct pci_bus *b; 534 struct pci_bus *b;
501 struct pci_dev *dev; 535 struct pci_dev *dev;
@@ -533,11 +567,13 @@ static void pci_bus_dump_res(struct pci_bus *bus)
533 567
534 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 568 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
535 struct resource *res = bus->resource[i]; 569 struct resource *res = bus->resource[i];
536 if (!res) 570 if (!res || !res->end)
537 continue; 571 continue;
538 572
539 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, 573 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i,
540 (res->flags & IORESOURCE_IO) ? "io: " : "mem:", res); 574 (res->flags & IORESOURCE_IO) ? "io: " :
575 ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"),
576 res);
541 } 577 }
542} 578}
543 579
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 32e8d88a4619..b711fb7181e2 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -99,11 +99,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
99int pci_claim_resource(struct pci_dev *dev, int resource) 99int pci_claim_resource(struct pci_dev *dev, int resource)
100{ 100{
101 struct resource *res = &dev->resource[resource]; 101 struct resource *res = &dev->resource[resource];
102 struct resource *root = NULL; 102 struct resource *root;
103 char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; 103 char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
104 int err; 104 int err;
105 105
106 root = pcibios_select_root(dev, res); 106 root = pci_find_parent_resource(dev, res);
107 107
108 err = -EINVAL; 108 err = -EINVAL;
109 if (root != NULL) 109 if (root != NULL)
@@ -120,23 +120,31 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
120 return err; 120 return err;
121} 121}
122 122
123int pci_assign_resource(struct pci_dev *dev, int resno) 123#ifdef CONFIG_PCI_QUIRKS
124void pci_disable_bridge_window(struct pci_dev *dev)
125{
126 dev_dbg(&dev->dev, "Disabling bridge window.\n");
127
128 /* MMIO Base/Limit */
129 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
130
131 /* Prefetchable MMIO Base/Limit */
132 pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
133 pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
134 pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
135}
136#endif /* CONFIG_PCI_QUIRKS */
137
138static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
139 int resno)
124{ 140{
125 struct pci_bus *bus = dev->bus;
126 struct resource *res = dev->resource + resno; 141 struct resource *res = dev->resource + resno;
127 resource_size_t size, min, align; 142 resource_size_t size, min, align;
128 int ret; 143 int ret;
129 144
130 size = resource_size(res); 145 size = resource_size(res);
131 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
132
133 align = resource_alignment(res); 147 align = resource_alignment(res);
134 if (!align) {
135 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
136 "alignment) %pR flags %#lx\n",
137 resno, res, res->flags);
138 return -EINVAL;
139 }
140 148
141 /* First, try exact prefetching match.. */ 149 /* First, try exact prefetching match.. */
142 ret = pci_bus_alloc_resource(bus, res, size, align, min, 150 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -154,10 +162,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
154 pcibios_align_resource, dev); 162 pcibios_align_resource, dev);
155 } 163 }
156 164
157 if (ret) { 165 if (!ret) {
158 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
159 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
160 } else {
161 res->flags &= ~IORESOURCE_STARTALIGN; 166 res->flags &= ~IORESOURCE_STARTALIGN;
162 if (resno < PCI_BRIDGE_RESOURCES) 167 if (resno < PCI_BRIDGE_RESOURCES)
163 pci_update_resource(dev, resno); 168 pci_update_resource(dev, resno);
@@ -166,6 +171,39 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
166 return ret; 171 return ret;
167} 172}
168 173
174int pci_assign_resource(struct pci_dev *dev, int resno)
175{
176 struct resource *res = dev->resource + resno;
177 resource_size_t align;
178 struct pci_bus *bus;
179 int ret;
180
181 align = resource_alignment(res);
182 if (!align) {
183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
184 "alignment) %pR flags %#lx\n",
185 resno, res, res->flags);
186 return -EINVAL;
187 }
188
189 bus = dev->bus;
190 while ((ret = __pci_assign_resource(bus, dev, resno))) {
191 if (bus->parent && bus->self->transparent)
192 bus = bus->parent;
193 else
194 bus = NULL;
195 if (bus)
196 continue;
197 break;
198 }
199
200 if (ret)
201 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
202 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
203
204 return ret;
205}
206
169#if 0 207#if 0
170int pci_assign_resource_fixed(struct pci_dev *dev, int resno) 208int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
171{ 209{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 5a8ccb4f604d..eddb0748b0ea 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * drivers/pci/slot.c 2 * drivers/pci/slot.c
3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com> 5 * Alex Chiang <achiang@hp.com>
6 */ 6 */
7 7
8#include <linux/kobject.h> 8#include <linux/kobject.h>
@@ -52,8 +52,8 @@ static void pci_slot_release(struct kobject *kobj)
52 struct pci_dev *dev; 52 struct pci_dev *dev;
53 struct pci_slot *slot = to_pci_slot(kobj); 53 struct pci_slot *slot = to_pci_slot(kobj);
54 54
55 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, 55 dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
56 slot->bus->number, slot->number); 56 slot->number, pci_slot_name(slot));
57 57
58 list_for_each_entry(dev, &slot->bus->devices, bus_list) 58 list_for_each_entry(dev, &slot->bus->devices, bus_list)
59 if (PCI_SLOT(dev->devfn) == slot->number) 59 if (PCI_SLOT(dev->devfn) == slot->number)
@@ -248,9 +248,8 @@ placeholder:
248 if (PCI_SLOT(dev->devfn) == slot_nr) 248 if (PCI_SLOT(dev->devfn) == slot_nr)
249 dev->slot = slot; 249 dev->slot = slot;
250 250
251 /* Don't care if debug printk has a -1 for slot_nr */ 251 dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 slot_nr, pci_slot_name(slot));
253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
254 253
255out: 254out:
256 kfree(slot_name); 255 kfree(slot_name);
@@ -265,8 +264,8 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
265 264
266/** 265/**
267 * pci_renumber_slot - update %struct pci_slot -> number 266 * pci_renumber_slot - update %struct pci_slot -> number
268 * @slot - %struct pci_slot to update 267 * @slot: &struct pci_slot to update
269 * @slot_nr - new number for slot 268 * @slot_nr: new number for slot
270 * 269 *
271 * The primary purpose of this interface is to allow callers who earlier 270 * The primary purpose of this interface is to allow callers who earlier
272 * created a placeholder slot in pci_create_slot() by passing a -1 as 271 * created a placeholder slot in pci_create_slot() by passing a -1 as
@@ -299,9 +298,8 @@ EXPORT_SYMBOL_GPL(pci_renumber_slot);
299 */ 298 */
300void pci_destroy_slot(struct pci_slot *slot) 299void pci_destroy_slot(struct pci_slot *slot)
301{ 300{
302 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 301 dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
303 atomic_read(&slot->kobj.kref.refcount) - 1, 302 slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
304 pci_domain_nr(slot->bus), slot->bus->number, slot->number);
305 303
306 down_write(&pci_bus_sem); 304 down_write(&pci_bus_sem);
307 kobject_put(&slot->kobj); 305 kobject_put(&slot->kobj);
@@ -309,6 +307,45 @@ void pci_destroy_slot(struct pci_slot *slot)
309} 307}
310EXPORT_SYMBOL_GPL(pci_destroy_slot); 308EXPORT_SYMBOL_GPL(pci_destroy_slot);
311 309
310#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
311#include <linux/pci_hotplug.h>
312/**
313 * pci_hp_create_link - create symbolic link to the hotplug driver module.
314 * @slot: struct pci_slot
315 *
316 * Helper function for pci_hotplug_core.c to create symbolic link to
317 * the hotplug driver module.
318 */
319void pci_hp_create_module_link(struct pci_slot *pci_slot)
320{
321 struct hotplug_slot *slot = pci_slot->hotplug;
322 struct kobject *kobj = NULL;
323 int no_warn;
324
325 if (!slot || !slot->ops)
326 return;
327 kobj = kset_find_obj(module_kset, slot->ops->mod_name);
328 if (!kobj)
329 return;
330 no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
331 kobject_put(kobj);
332}
333EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
334
335/**
336 * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
337 * @slot: struct pci_slot
338 *
339 * Helper function for pci_hotplug_core.c to remove symbolic link to
340 * the hotplug driver module.
341 */
342void pci_hp_remove_module_link(struct pci_slot *pci_slot)
343{
344 sysfs_remove_link(&pci_slot->kobj, "module");
345}
346EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
347#endif
348
312static int pci_slot_init(void) 349static int pci_slot_init(void)
313{ 350{
314 struct kset *pci_bus_kset; 351 struct kset *pci_bus_kset;