diff options
Diffstat (limited to 'drivers/pci')
| -rw-r--r-- | drivers/pci/hotplug/pci_hotplug_core.c | 2 | ||||
| -rw-r--r-- | drivers/pci/intel-iommu.c | 109 | ||||
| -rw-r--r-- | drivers/pci/msi.c | 64 | ||||
| -rw-r--r-- | drivers/pci/msi.h | 10 | ||||
| -rw-r--r-- | drivers/pci/pci.c | 15 | ||||
| -rw-r--r-- | drivers/pci/pcie/aer/ecrc.c | 2 | ||||
| -rw-r--r-- | drivers/pci/quirks.c | 5 | ||||
| -rw-r--r-- | drivers/pci/slot.c | 4 |
8 files changed, 158 insertions, 53 deletions
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 844580489d4d..5c5043f239cf 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
| @@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name) | |||
| 555 | * @slot: pointer to the &struct hotplug_slot to register | 555 | * @slot: pointer to the &struct hotplug_slot to register |
| 556 | * @devnr: device number | 556 | * @devnr: device number |
| 557 | * @name: name registered with kobject core | 557 | * @name: name registered with kobject core |
| 558 | * @owner: caller module owner | ||
| 559 | * @mod_name: caller module name | ||
| 558 | * | 560 | * |
| 559 | * Registers a hotplug slot with the pci hotplug subsystem, which will allow | 561 | * Registers a hotplug slot with the pci hotplug subsystem, which will allow |
| 560 | * userspace interaction to the slot. | 562 | * userspace interaction to the slot. |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 53075424a434..ebc9b8dca881 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -2117,6 +2117,47 @@ static int domain_add_dev_info(struct dmar_domain *domain, | |||
| 2117 | return 0; | 2117 | return 0; |
| 2118 | } | 2118 | } |
| 2119 | 2119 | ||
| 2120 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | ||
| 2121 | { | ||
| 2122 | if (iommu_identity_mapping == 2) | ||
| 2123 | return IS_GFX_DEVICE(pdev); | ||
| 2124 | |||
| 2125 | /* | ||
| 2126 | * We want to start off with all devices in the 1:1 domain, and | ||
| 2127 | * take them out later if we find they can't access all of memory. | ||
| 2128 | * | ||
| 2129 | * However, we can't do this for PCI devices behind bridges, | ||
| 2130 | * because all PCI devices behind the same bridge will end up | ||
| 2131 | * with the same source-id on their transactions. | ||
| 2132 | * | ||
| 2133 | * Practically speaking, we can't change things around for these | ||
| 2134 | * devices at run-time, because we can't be sure there'll be no | ||
| 2135 | * DMA transactions in flight for any of their siblings. | ||
| 2136 | * | ||
| 2137 | * So PCI devices (unless they're on the root bus) as well as | ||
| 2138 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
| 2139 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
| 2140 | * not to be able to map all of memory. | ||
| 2141 | */ | ||
| 2142 | if (!pdev->is_pcie) { | ||
| 2143 | if (!pci_is_root_bus(pdev->bus)) | ||
| 2144 | return 0; | ||
| 2145 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | ||
| 2146 | return 0; | ||
| 2147 | } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
| 2148 | return 0; | ||
| 2149 | |||
| 2150 | /* | ||
| 2151 | * At boot time, we don't yet know if devices will be 64-bit capable. | ||
| 2152 | * Assume that they will -- if they turn out not to be, then we can | ||
| 2153 | * take them out of the 1:1 domain later. | ||
| 2154 | */ | ||
| 2155 | if (!startup) | ||
| 2156 | return pdev->dma_mask > DMA_BIT_MASK(32); | ||
| 2157 | |||
| 2158 | return 1; | ||
| 2159 | } | ||
| 2160 | |||
| 2120 | static int iommu_prepare_static_identity_mapping(void) | 2161 | static int iommu_prepare_static_identity_mapping(void) |
| 2121 | { | 2162 | { |
| 2122 | struct pci_dev *pdev = NULL; | 2163 | struct pci_dev *pdev = NULL; |
| @@ -2127,16 +2168,18 @@ static int iommu_prepare_static_identity_mapping(void) | |||
| 2127 | return -EFAULT; | 2168 | return -EFAULT; |
| 2128 | 2169 | ||
| 2129 | for_each_pci_dev(pdev) { | 2170 | for_each_pci_dev(pdev) { |
| 2130 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", | 2171 | if (iommu_should_identity_map(pdev, 1)) { |
| 2131 | pci_name(pdev)); | 2172 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", |
| 2173 | pci_name(pdev)); | ||
| 2132 | 2174 | ||
| 2133 | ret = domain_context_mapping(si_domain, pdev, | 2175 | ret = domain_context_mapping(si_domain, pdev, |
| 2134 | CONTEXT_TT_MULTI_LEVEL); | 2176 | CONTEXT_TT_MULTI_LEVEL); |
| 2135 | if (ret) | 2177 | if (ret) |
| 2136 | return ret; | 2178 | return ret; |
| 2137 | ret = domain_add_dev_info(si_domain, pdev); | 2179 | ret = domain_add_dev_info(si_domain, pdev); |
| 2138 | if (ret) | 2180 | if (ret) |
| 2139 | return ret; | 2181 | return ret; |
| 2182 | } | ||
| 2140 | } | 2183 | } |
| 2141 | 2184 | ||
| 2142 | return 0; | 2185 | return 0; |
| @@ -2291,6 +2334,10 @@ int __init init_dmars(void) | |||
| 2291 | * identity mapping if iommu_identity_mapping is set. | 2334 | * identity mapping if iommu_identity_mapping is set. |
| 2292 | */ | 2335 | */ |
| 2293 | if (!iommu_pass_through) { | 2336 | if (!iommu_pass_through) { |
| 2337 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | ||
| 2338 | if (!iommu_identity_mapping) | ||
| 2339 | iommu_identity_mapping = 2; | ||
| 2340 | #endif | ||
| 2294 | if (iommu_identity_mapping) | 2341 | if (iommu_identity_mapping) |
| 2295 | iommu_prepare_static_identity_mapping(); | 2342 | iommu_prepare_static_identity_mapping(); |
| 2296 | /* | 2343 | /* |
| @@ -2368,15 +2415,15 @@ error: | |||
| 2368 | return ret; | 2415 | return ret; |
| 2369 | } | 2416 | } |
| 2370 | 2417 | ||
| 2418 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
| 2371 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | 2419 | static inline unsigned long aligned_nrpages(unsigned long host_addr, |
| 2372 | size_t size) | 2420 | size_t size) |
| 2373 | { | 2421 | { |
| 2374 | host_addr &= ~PAGE_MASK; | 2422 | host_addr &= ~PAGE_MASK; |
| 2375 | host_addr += size + PAGE_SIZE - 1; | 2423 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; |
| 2376 | |||
| 2377 | return host_addr >> VTD_PAGE_SHIFT; | ||
| 2378 | } | 2424 | } |
| 2379 | 2425 | ||
| 2426 | /* This takes a number of _MM_ pages, not VTD pages */ | ||
| 2380 | static struct iova *intel_alloc_iova(struct device *dev, | 2427 | static struct iova *intel_alloc_iova(struct device *dev, |
| 2381 | struct dmar_domain *domain, | 2428 | struct dmar_domain *domain, |
| 2382 | unsigned long nrpages, uint64_t dma_mask) | 2429 | unsigned long nrpages, uint64_t dma_mask) |
| @@ -2443,16 +2490,24 @@ static int iommu_dummy(struct pci_dev *pdev) | |||
| 2443 | } | 2490 | } |
| 2444 | 2491 | ||
| 2445 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | 2492 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ |
| 2446 | static int iommu_no_mapping(struct pci_dev *pdev) | 2493 | static int iommu_no_mapping(struct device *dev) |
| 2447 | { | 2494 | { |
| 2495 | struct pci_dev *pdev; | ||
| 2448 | int found; | 2496 | int found; |
| 2449 | 2497 | ||
| 2498 | if (unlikely(dev->bus != &pci_bus_type)) | ||
| 2499 | return 1; | ||
| 2500 | |||
| 2501 | pdev = to_pci_dev(dev); | ||
| 2502 | if (iommu_dummy(pdev)) | ||
| 2503 | return 1; | ||
| 2504 | |||
| 2450 | if (!iommu_identity_mapping) | 2505 | if (!iommu_identity_mapping) |
| 2451 | return iommu_dummy(pdev); | 2506 | return 0; |
| 2452 | 2507 | ||
| 2453 | found = identity_mapping(pdev); | 2508 | found = identity_mapping(pdev); |
| 2454 | if (found) { | 2509 | if (found) { |
| 2455 | if (pdev->dma_mask > DMA_BIT_MASK(32)) | 2510 | if (iommu_should_identity_map(pdev, 0)) |
| 2456 | return 1; | 2511 | return 1; |
| 2457 | else { | 2512 | else { |
| 2458 | /* | 2513 | /* |
| @@ -2469,9 +2524,12 @@ static int iommu_no_mapping(struct pci_dev *pdev) | |||
| 2469 | * In case of a detached 64 bit DMA device from vm, the device | 2524 | * In case of a detached 64 bit DMA device from vm, the device |
| 2470 | * is put into si_domain for identity mapping. | 2525 | * is put into si_domain for identity mapping. |
| 2471 | */ | 2526 | */ |
| 2472 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { | 2527 | if (iommu_should_identity_map(pdev, 0)) { |
| 2473 | int ret; | 2528 | int ret; |
| 2474 | ret = domain_add_dev_info(si_domain, pdev); | 2529 | ret = domain_add_dev_info(si_domain, pdev); |
| 2530 | if (ret) | ||
| 2531 | return 0; | ||
| 2532 | ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | ||
| 2475 | if (!ret) { | 2533 | if (!ret) { |
| 2476 | printk(KERN_INFO "64bit %s uses identity mapping\n", | 2534 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
| 2477 | pci_name(pdev)); | 2535 | pci_name(pdev)); |
| @@ -2480,7 +2538,7 @@ static int iommu_no_mapping(struct pci_dev *pdev) | |||
| 2480 | } | 2538 | } |
| 2481 | } | 2539 | } |
| 2482 | 2540 | ||
| 2483 | return iommu_dummy(pdev); | 2541 | return 0; |
| 2484 | } | 2542 | } |
| 2485 | 2543 | ||
| 2486 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2544 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
| @@ -2496,7 +2554,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2496 | 2554 | ||
| 2497 | BUG_ON(dir == DMA_NONE); | 2555 | BUG_ON(dir == DMA_NONE); |
| 2498 | 2556 | ||
| 2499 | if (iommu_no_mapping(pdev)) | 2557 | if (iommu_no_mapping(hwdev)) |
| 2500 | return paddr; | 2558 | return paddr; |
| 2501 | 2559 | ||
| 2502 | domain = get_valid_domain_for_dev(pdev); | 2560 | domain = get_valid_domain_for_dev(pdev); |
| @@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2506 | iommu = domain_get_iommu(domain); | 2564 | iommu = domain_get_iommu(domain); |
| 2507 | size = aligned_nrpages(paddr, size); | 2565 | size = aligned_nrpages(paddr, size); |
| 2508 | 2566 | ||
| 2509 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2567 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
| 2568 | pdev->dma_mask); | ||
| 2510 | if (!iova) | 2569 | if (!iova) |
| 2511 | goto error; | 2570 | goto error; |
| 2512 | 2571 | ||
| @@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 2635 | struct iova *iova; | 2694 | struct iova *iova; |
| 2636 | struct intel_iommu *iommu; | 2695 | struct intel_iommu *iommu; |
| 2637 | 2696 | ||
| 2638 | if (iommu_no_mapping(pdev)) | 2697 | if (iommu_no_mapping(dev)) |
| 2639 | return; | 2698 | return; |
| 2640 | 2699 | ||
| 2641 | domain = find_domain(pdev); | 2700 | domain = find_domain(pdev); |
| @@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2726 | struct iova *iova; | 2785 | struct iova *iova; |
| 2727 | struct intel_iommu *iommu; | 2786 | struct intel_iommu *iommu; |
| 2728 | 2787 | ||
| 2729 | if (iommu_no_mapping(pdev)) | 2788 | if (iommu_no_mapping(hwdev)) |
| 2730 | return; | 2789 | return; |
| 2731 | 2790 | ||
| 2732 | domain = find_domain(pdev); | 2791 | domain = find_domain(pdev); |
| @@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 2785 | struct intel_iommu *iommu; | 2844 | struct intel_iommu *iommu; |
| 2786 | 2845 | ||
| 2787 | BUG_ON(dir == DMA_NONE); | 2846 | BUG_ON(dir == DMA_NONE); |
| 2788 | if (iommu_no_mapping(pdev)) | 2847 | if (iommu_no_mapping(hwdev)) |
| 2789 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 2848 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
| 2790 | 2849 | ||
| 2791 | domain = get_valid_domain_for_dev(pdev); | 2850 | domain = get_valid_domain_for_dev(pdev); |
| @@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 2797 | for_each_sg(sglist, sg, nelems, i) | 2856 | for_each_sg(sglist, sg, nelems, i) |
| 2798 | size += aligned_nrpages(sg->offset, sg->length); | 2857 | size += aligned_nrpages(sg->offset, sg->length); |
| 2799 | 2858 | ||
| 2800 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2859 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
| 2860 | pdev->dma_mask); | ||
| 2801 | if (!iova) { | 2861 | if (!iova) { |
| 2802 | sglist->dma_length = 0; | 2862 | sglist->dma_length = 0; |
| 2803 | return 0; | 2863 | return 0; |
| @@ -3540,6 +3600,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, | |||
| 3540 | { | 3600 | { |
| 3541 | struct dmar_domain *dmar_domain = domain->priv; | 3601 | struct dmar_domain *dmar_domain = domain->priv; |
| 3542 | 3602 | ||
| 3603 | if (!size) | ||
| 3604 | return; | ||
| 3605 | |||
| 3543 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3606 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
| 3544 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3607 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
| 3545 | 3608 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d9f06fbfa0bf..d986afb7032b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control) | |||
| 127 | * reliably as devices without an INTx disable bit will then generate a | 127 | * reliably as devices without an INTx disable bit will then generate a |
| 128 | * level IRQ which will never be cleared. | 128 | * level IRQ which will never be cleared. |
| 129 | */ | 129 | */ |
| 130 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 130 | static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
| 131 | { | 131 | { |
| 132 | u32 mask_bits = desc->masked; | 132 | u32 mask_bits = desc->masked; |
| 133 | 133 | ||
| 134 | if (!desc->msi_attrib.maskbit) | 134 | if (!desc->msi_attrib.maskbit) |
| 135 | return; | 135 | return 0; |
| 136 | 136 | ||
| 137 | mask_bits &= ~mask; | 137 | mask_bits &= ~mask; |
| 138 | mask_bits |= flag; | 138 | mask_bits |= flag; |
| 139 | pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); | 139 | pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); |
| 140 | desc->masked = mask_bits; | 140 | |
| 141 | return mask_bits; | ||
| 142 | } | ||
| 143 | |||
| 144 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | ||
| 145 | { | ||
| 146 | desc->masked = __msi_mask_irq(desc, mask, flag); | ||
| 141 | } | 147 | } |
| 142 | 148 | ||
| 143 | /* | 149 | /* |
| @@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |||
| 147 | * file. This saves a few milliseconds when initialising devices with lots | 153 | * file. This saves a few milliseconds when initialising devices with lots |
| 148 | * of MSI-X interrupts. | 154 | * of MSI-X interrupts. |
| 149 | */ | 155 | */ |
| 150 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | 156 | static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) |
| 151 | { | 157 | { |
| 152 | u32 mask_bits = desc->masked; | 158 | u32 mask_bits = desc->masked; |
| 153 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 159 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
| 154 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | 160 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
| 155 | mask_bits &= ~1; | 161 | mask_bits &= ~1; |
| 156 | mask_bits |= flag; | 162 | mask_bits |= flag; |
| 157 | writel(mask_bits, desc->mask_base + offset); | 163 | writel(mask_bits, desc->mask_base + offset); |
| 158 | desc->masked = mask_bits; | 164 | |
| 165 | return mask_bits; | ||
| 166 | } | ||
| 167 | |||
| 168 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | ||
| 169 | { | ||
| 170 | desc->masked = __msix_mask_irq(desc, flag); | ||
| 159 | } | 171 | } |
| 160 | 172 | ||
| 161 | static void msi_set_mask_bit(unsigned irq, u32 flag) | 173 | static void msi_set_mask_bit(unsigned irq, u32 flag) |
| @@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 188 | void __iomem *base = entry->mask_base + | 200 | void __iomem *base = entry->mask_base + |
| 189 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 201 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
| 190 | 202 | ||
| 191 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | 203 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); |
| 192 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | 204 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); |
| 193 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); | 205 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA); |
| 194 | } else { | 206 | } else { |
| 195 | struct pci_dev *dev = entry->dev; | 207 | struct pci_dev *dev = entry->dev; |
| 196 | int pos = entry->msi_attrib.pos; | 208 | int pos = entry->msi_attrib.pos; |
| @@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 225 | base = entry->mask_base + | 237 | base = entry->mask_base + |
| 226 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 238 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
| 227 | 239 | ||
| 228 | writel(msg->address_lo, | 240 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); |
| 229 | base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | 241 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); |
| 230 | writel(msg->address_hi, | 242 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); |
| 231 | base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | ||
| 232 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); | ||
| 233 | } else { | 243 | } else { |
| 234 | struct pci_dev *dev = entry->dev; | 244 | struct pci_dev *dev = entry->dev; |
| 235 | int pos = entry->msi_attrib.pos; | 245 | int pos = entry->msi_attrib.pos; |
| @@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
| 385 | /* Configure MSI capability structure */ | 395 | /* Configure MSI capability structure */ |
| 386 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | 396 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
| 387 | if (ret) { | 397 | if (ret) { |
| 398 | msi_mask_irq(entry, mask, ~mask); | ||
| 388 | msi_free_irqs(dev); | 399 | msi_free_irqs(dev); |
| 389 | return ret; | 400 | return ret; |
| 390 | } | 401 | } |
| @@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev, | |||
| 439 | 450 | ||
| 440 | for (i = 0; i < nvec; i++) { | 451 | for (i = 0; i < nvec; i++) { |
| 441 | entry = alloc_msi_entry(dev); | 452 | entry = alloc_msi_entry(dev); |
| 442 | if (!entry) | 453 | if (!entry) { |
| 443 | break; | 454 | if (!i) |
| 455 | iounmap(base); | ||
| 456 | else | ||
| 457 | msi_free_irqs(dev); | ||
| 458 | /* No enough memory. Don't try again */ | ||
| 459 | return -ENOMEM; | ||
| 460 | } | ||
| 444 | 461 | ||
| 445 | j = entries[i].entry; | 462 | j = entries[i].entry; |
| 446 | entry->msi_attrib.is_msix = 1; | 463 | entry->msi_attrib.is_msix = 1; |
| @@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
| 487 | set_irq_msi(entry->irq, entry); | 504 | set_irq_msi(entry->irq, entry); |
| 488 | j = entries[i].entry; | 505 | j = entries[i].entry; |
| 489 | entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + | 506 | entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + |
| 490 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); | 507 | PCI_MSIX_ENTRY_VECTOR_CTRL); |
| 491 | msix_mask_irq(entry, 1); | 508 | msix_mask_irq(entry, 1); |
| 492 | i++; | 509 | i++; |
| 493 | } | 510 | } |
| @@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
| 611 | pci_intx_for_msi(dev, 1); | 628 | pci_intx_for_msi(dev, 1); |
| 612 | dev->msi_enabled = 0; | 629 | dev->msi_enabled = 0; |
| 613 | 630 | ||
| 631 | /* Return the device with MSI unmasked as initial states */ | ||
| 614 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); | 632 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); |
| 615 | mask = msi_capable_mask(ctrl); | 633 | mask = msi_capable_mask(ctrl); |
| 616 | msi_mask_irq(desc, mask, ~mask); | 634 | /* Keep cached state to be restored */ |
| 635 | __msi_mask_irq(desc, mask, ~mask); | ||
| 617 | 636 | ||
| 618 | /* Restore dev->irq to its default pin-assertion irq */ | 637 | /* Restore dev->irq to its default pin-assertion irq */ |
| 619 | dev->irq = desc->msi_attrib.default_irq; | 638 | dev->irq = desc->msi_attrib.default_irq; |
| @@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev) | |||
| 653 | 672 | ||
| 654 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | 673 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { |
| 655 | if (entry->msi_attrib.is_msix) { | 674 | if (entry->msi_attrib.is_msix) { |
| 656 | msix_mask_irq(entry, 1); | ||
| 657 | if (list_is_last(&entry->list, &dev->msi_list)) | 675 | if (list_is_last(&entry->list, &dev->msi_list)) |
| 658 | iounmap(entry->mask_base); | 676 | iounmap(entry->mask_base); |
| 659 | } | 677 | } |
| @@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev) | |||
| 741 | 759 | ||
| 742 | void pci_msix_shutdown(struct pci_dev* dev) | 760 | void pci_msix_shutdown(struct pci_dev* dev) |
| 743 | { | 761 | { |
| 762 | struct msi_desc *entry; | ||
| 763 | |||
| 744 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 764 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
| 745 | return; | 765 | return; |
| 746 | 766 | ||
| 767 | /* Return the device with MSI-X masked as initial states */ | ||
| 768 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
| 769 | /* Keep cached states to be restored */ | ||
| 770 | __msix_mask_irq(entry, 1); | ||
| 771 | } | ||
| 772 | |||
| 747 | msix_set_enable(dev, 0); | 773 | msix_set_enable(dev, 0); |
| 748 | pci_intx_for_msi(dev, 1); | 774 | pci_intx_for_msi(dev, 1); |
| 749 | dev->msix_enabled = 0; | 775 | dev->msix_enabled = 0; |
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index a0662842550b..de27c1cb5a2b 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h | |||
| @@ -6,11 +6,11 @@ | |||
| 6 | #ifndef MSI_H | 6 | #ifndef MSI_H |
| 7 | #define MSI_H | 7 | #define MSI_H |
| 8 | 8 | ||
| 9 | #define PCI_MSIX_ENTRY_SIZE 16 | 9 | #define PCI_MSIX_ENTRY_SIZE 16 |
| 10 | #define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0 | 10 | #define PCI_MSIX_ENTRY_LOWER_ADDR 0 |
| 11 | #define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4 | 11 | #define PCI_MSIX_ENTRY_UPPER_ADDR 4 |
| 12 | #define PCI_MSIX_ENTRY_DATA_OFFSET 8 | 12 | #define PCI_MSIX_ENTRY_DATA 8 |
| 13 | #define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12 | 13 | #define PCI_MSIX_ENTRY_VECTOR_CTRL 12 |
| 14 | 14 | ||
| 15 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) | 15 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) |
| 16 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) | 16 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6c93af5ced18..dbd0f947f497 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -1517,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev) | |||
| 1517 | * | 1517 | * |
| 1518 | * Perform INTx swizzling for a device behind one level of bridge. This is | 1518 | * Perform INTx swizzling for a device behind one level of bridge. This is |
| 1519 | * required by section 9.1 of the PCI-to-PCI bridge specification for devices | 1519 | * required by section 9.1 of the PCI-to-PCI bridge specification for devices |
| 1520 | * behind bridges on add-in cards. | 1520 | * behind bridges on add-in cards. For devices with ARI enabled, the slot |
| 1521 | * number is always 0 (see the Implementation Note in section 2.2.8.1 of | ||
| 1522 | * the PCI Express Base Specification, Revision 2.1) | ||
| 1521 | */ | 1523 | */ |
| 1522 | u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) | 1524 | u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) |
| 1523 | { | 1525 | { |
| 1524 | return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; | 1526 | int slot; |
| 1527 | |||
| 1528 | if (pci_ari_enabled(dev->bus)) | ||
| 1529 | slot = 0; | ||
| 1530 | else | ||
| 1531 | slot = PCI_SLOT(dev->devfn); | ||
| 1532 | |||
| 1533 | return (((pin - 1) + slot) % 4) + 1; | ||
| 1525 | } | 1534 | } |
| 1526 | 1535 | ||
| 1527 | int | 1536 | int |
| @@ -2171,7 +2180,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) | |||
| 2171 | u16 ctrl; | 2180 | u16 ctrl; |
| 2172 | struct pci_dev *pdev; | 2181 | struct pci_dev *pdev; |
| 2173 | 2182 | ||
| 2174 | if (dev->subordinate) | 2183 | if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) |
| 2175 | return -ENOTTY; | 2184 | return -ENOTTY; |
| 2176 | 2185 | ||
| 2177 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) | 2186 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) |
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c index ece97df4df6d..a928d8ab6bda 100644 --- a/drivers/pci/pcie/aer/ecrc.c +++ b/drivers/pci/pcie/aer/ecrc.c | |||
| @@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev) | |||
| 106 | disable_ecrc_checking(dev); | 106 | disable_ecrc_checking(dev); |
| 107 | break; | 107 | break; |
| 108 | case ECRC_POLICY_ON: | 108 | case ECRC_POLICY_ON: |
| 109 | enable_ecrc_checking(dev);; | 109 | enable_ecrc_checking(dev); |
| 110 | break; | 110 | break; |
| 111 | default: | 111 | default: |
| 112 | return; | 112 | return; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 56552d74abea..06b965623962 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) | |||
| 1058 | } | 1058 | } |
| 1059 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); | 1059 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); |
| 1060 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); | 1060 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); |
| 1061 | /* ALi loses some register settings that we cannot then restore */ | ||
| 1062 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3); | ||
| 1063 | /* VIA comes back fine but we need to keep it alive or ACPI GTM failures | ||
| 1064 | occur when mode detecting */ | ||
| 1065 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3); | ||
| 1061 | 1066 | ||
| 1062 | /* This was originally an Alpha specific thing, but it really fits here. | 1067 | /* This was originally an Alpha specific thing, but it really fits here. |
| 1063 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. | 1068 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index eddb0748b0ea..8c02b6c53bdb 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
| @@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot); | |||
| 311 | #include <linux/pci_hotplug.h> | 311 | #include <linux/pci_hotplug.h> |
| 312 | /** | 312 | /** |
| 313 | * pci_hp_create_link - create symbolic link to the hotplug driver module. | 313 | * pci_hp_create_link - create symbolic link to the hotplug driver module. |
| 314 | * @slot: struct pci_slot | 314 | * @pci_slot: struct pci_slot |
| 315 | * | 315 | * |
| 316 | * Helper function for pci_hotplug_core.c to create symbolic link to | 316 | * Helper function for pci_hotplug_core.c to create symbolic link to |
| 317 | * the hotplug driver module. | 317 | * the hotplug driver module. |
| @@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link); | |||
| 334 | 334 | ||
| 335 | /** | 335 | /** |
| 336 | * pci_hp_remove_link - remove symbolic link to the hotplug driver module. | 336 | * pci_hp_remove_link - remove symbolic link to the hotplug driver module. |
| 337 | * @slot: struct pci_slot | 337 | * @pci_slot: struct pci_slot |
| 338 | * | 338 | * |
| 339 | * Helper function for pci_hotplug_core.c to remove symbolic link to | 339 | * Helper function for pci_hotplug_core.c to remove symbolic link to |
| 340 | * the hotplug driver module. | 340 | * the hotplug driver module. |
