diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/hotplug/cpci_hotplug_core.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/cpqphp_ctrl.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/cpqphp_sysfs.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/pci_hotplug_core.c | 2 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_ctrl.c | 1 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 130 | ||||
-rw-r--r-- | drivers/pci/msi.c | 64 | ||||
-rw-r--r-- | drivers/pci/msi.h | 10 | ||||
-rw-r--r-- | drivers/pci/pci.c | 15 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/ecrc.c | 2 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 5 | ||||
-rw-r--r-- | drivers/pci/setup-res.c | 4 | ||||
-rw-r--r-- | drivers/pci/slot.c | 4 | ||||
-rw-r--r-- | drivers/pci/syscall.c | 1 |
14 files changed, 172 insertions, 69 deletions
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index a5b9f6ae507b..d703e73fffa7 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/pci_hotplug.h> | 32 | #include <linux/pci_hotplug.h> |
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <linux/smp_lock.h> | ||
36 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
37 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
38 | #include <linux/kthread.h> | 37 | #include <linux/kthread.h> |
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 2fa47af992a8..0ff689afa757 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
37 | #include <linux/smp_lock.h> | ||
38 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
39 | #include <linux/pci_hotplug.h> | 38 | #include <linux/pci_hotplug.h> |
40 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index 8450f4a6568a..e6089bdb6e5b 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/pci_hotplug.h> | 35 | #include <linux/pci_hotplug.h> |
36 | #include <linux/smp_lock.h> | ||
36 | #include <linux/debugfs.h> | 37 | #include <linux/debugfs.h> |
37 | #include "cpqphp.h" | 38 | #include "cpqphp.h" |
38 | 39 | ||
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 844580489d4d..5c5043f239cf 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name) | |||
555 | * @slot: pointer to the &struct hotplug_slot to register | 555 | * @slot: pointer to the &struct hotplug_slot to register |
556 | * @devnr: device number | 556 | * @devnr: device number |
557 | * @name: name registered with kobject core | 557 | * @name: name registered with kobject core |
558 | * @owner: caller module owner | ||
559 | * @mod_name: caller module name | ||
558 | * | 560 | * |
559 | * Registers a hotplug slot with the pci hotplug subsystem, which will allow | 561 | * Registers a hotplug slot with the pci hotplug subsystem, which will allow |
560 | * userspace interaction to the slot. | 562 | * userspace interaction to the slot. |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index ff4034502d24..8aab8edf123e 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/smp_lock.h> | ||
34 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
35 | #include <linux/workqueue.h> | 34 | #include <linux/workqueue.h> |
36 | #include "../pci.h" | 35 | #include "../pci.h" |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 53075424a434..2314ad7ee5fe 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | set_bit(num, iommu->domain_ids); | 1507 | set_bit(num, iommu->domain_ids); |
1508 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1509 | iommu->domains[num] = domain; | 1508 | iommu->domains[num] = domain; |
1510 | id = num; | 1509 | id = num; |
1511 | } | 1510 | } |
@@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
1648 | tmp->devfn); | 1647 | tmp->devfn); |
1649 | } | 1648 | } |
1650 | 1649 | ||
1650 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
1651 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
1652 | size_t size) | ||
1653 | { | ||
1654 | host_addr &= ~PAGE_MASK; | ||
1655 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
1656 | } | ||
1657 | |||
1651 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | 1658 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
1652 | struct scatterlist *sg, unsigned long phys_pfn, | 1659 | struct scatterlist *sg, unsigned long phys_pfn, |
1653 | unsigned long nr_pages, int prot) | 1660 | unsigned long nr_pages, int prot) |
@@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1675 | uint64_t tmp; | 1682 | uint64_t tmp; |
1676 | 1683 | ||
1677 | if (!sg_res) { | 1684 | if (!sg_res) { |
1678 | sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; | 1685 | sg_res = aligned_nrpages(sg->offset, sg->length); |
1679 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | 1686 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
1680 | sg->dma_length = sg->length; | 1687 | sg->dma_length = sg->length; |
1681 | pteval = page_to_phys(sg_page(sg)) | prot; | 1688 | pteval = page_to_phys(sg_page(sg)) | prot; |
@@ -2117,6 +2124,47 @@ static int domain_add_dev_info(struct dmar_domain *domain, | |||
2117 | return 0; | 2124 | return 0; |
2118 | } | 2125 | } |
2119 | 2126 | ||
2127 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | ||
2128 | { | ||
2129 | if (iommu_identity_mapping == 2) | ||
2130 | return IS_GFX_DEVICE(pdev); | ||
2131 | |||
2132 | /* | ||
2133 | * We want to start off with all devices in the 1:1 domain, and | ||
2134 | * take them out later if we find they can't access all of memory. | ||
2135 | * | ||
2136 | * However, we can't do this for PCI devices behind bridges, | ||
2137 | * because all PCI devices behind the same bridge will end up | ||
2138 | * with the same source-id on their transactions. | ||
2139 | * | ||
2140 | * Practically speaking, we can't change things around for these | ||
2141 | * devices at run-time, because we can't be sure there'll be no | ||
2142 | * DMA transactions in flight for any of their siblings. | ||
2143 | * | ||
2144 | * So PCI devices (unless they're on the root bus) as well as | ||
2145 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
2146 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
2147 | * not to be able to map all of memory. | ||
2148 | */ | ||
2149 | if (!pdev->is_pcie) { | ||
2150 | if (!pci_is_root_bus(pdev->bus)) | ||
2151 | return 0; | ||
2152 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | ||
2153 | return 0; | ||
2154 | } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
2155 | return 0; | ||
2156 | |||
2157 | /* | ||
2158 | * At boot time, we don't yet know if devices will be 64-bit capable. | ||
2159 | * Assume that they will -- if they turn out not to be, then we can | ||
2160 | * take them out of the 1:1 domain later. | ||
2161 | */ | ||
2162 | if (!startup) | ||
2163 | return pdev->dma_mask > DMA_BIT_MASK(32); | ||
2164 | |||
2165 | return 1; | ||
2166 | } | ||
2167 | |||
2120 | static int iommu_prepare_static_identity_mapping(void) | 2168 | static int iommu_prepare_static_identity_mapping(void) |
2121 | { | 2169 | { |
2122 | struct pci_dev *pdev = NULL; | 2170 | struct pci_dev *pdev = NULL; |
@@ -2127,16 +2175,18 @@ static int iommu_prepare_static_identity_mapping(void) | |||
2127 | return -EFAULT; | 2175 | return -EFAULT; |
2128 | 2176 | ||
2129 | for_each_pci_dev(pdev) { | 2177 | for_each_pci_dev(pdev) { |
2130 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", | 2178 | if (iommu_should_identity_map(pdev, 1)) { |
2131 | pci_name(pdev)); | 2179 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", |
2180 | pci_name(pdev)); | ||
2132 | 2181 | ||
2133 | ret = domain_context_mapping(si_domain, pdev, | 2182 | ret = domain_context_mapping(si_domain, pdev, |
2134 | CONTEXT_TT_MULTI_LEVEL); | 2183 | CONTEXT_TT_MULTI_LEVEL); |
2135 | if (ret) | 2184 | if (ret) |
2136 | return ret; | 2185 | return ret; |
2137 | ret = domain_add_dev_info(si_domain, pdev); | 2186 | ret = domain_add_dev_info(si_domain, pdev); |
2138 | if (ret) | 2187 | if (ret) |
2139 | return ret; | 2188 | return ret; |
2189 | } | ||
2140 | } | 2190 | } |
2141 | 2191 | ||
2142 | return 0; | 2192 | return 0; |
@@ -2291,6 +2341,10 @@ int __init init_dmars(void) | |||
2291 | * identity mapping if iommu_identity_mapping is set. | 2341 | * identity mapping if iommu_identity_mapping is set. |
2292 | */ | 2342 | */ |
2293 | if (!iommu_pass_through) { | 2343 | if (!iommu_pass_through) { |
2344 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | ||
2345 | if (!iommu_identity_mapping) | ||
2346 | iommu_identity_mapping = 2; | ||
2347 | #endif | ||
2294 | if (iommu_identity_mapping) | 2348 | if (iommu_identity_mapping) |
2295 | iommu_prepare_static_identity_mapping(); | 2349 | iommu_prepare_static_identity_mapping(); |
2296 | /* | 2350 | /* |
@@ -2368,15 +2422,7 @@ error: | |||
2368 | return ret; | 2422 | return ret; |
2369 | } | 2423 | } |
2370 | 2424 | ||
2371 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | 2425 | /* This takes a number of _MM_ pages, not VTD pages */ |
2372 | size_t size) | ||
2373 | { | ||
2374 | host_addr &= ~PAGE_MASK; | ||
2375 | host_addr += size + PAGE_SIZE - 1; | ||
2376 | |||
2377 | return host_addr >> VTD_PAGE_SHIFT; | ||
2378 | } | ||
2379 | |||
2380 | static struct iova *intel_alloc_iova(struct device *dev, | 2426 | static struct iova *intel_alloc_iova(struct device *dev, |
2381 | struct dmar_domain *domain, | 2427 | struct dmar_domain *domain, |
2382 | unsigned long nrpages, uint64_t dma_mask) | 2428 | unsigned long nrpages, uint64_t dma_mask) |
@@ -2443,16 +2489,24 @@ static int iommu_dummy(struct pci_dev *pdev) | |||
2443 | } | 2489 | } |
2444 | 2490 | ||
2445 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | 2491 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ |
2446 | static int iommu_no_mapping(struct pci_dev *pdev) | 2492 | static int iommu_no_mapping(struct device *dev) |
2447 | { | 2493 | { |
2494 | struct pci_dev *pdev; | ||
2448 | int found; | 2495 | int found; |
2449 | 2496 | ||
2497 | if (unlikely(dev->bus != &pci_bus_type)) | ||
2498 | return 1; | ||
2499 | |||
2500 | pdev = to_pci_dev(dev); | ||
2501 | if (iommu_dummy(pdev)) | ||
2502 | return 1; | ||
2503 | |||
2450 | if (!iommu_identity_mapping) | 2504 | if (!iommu_identity_mapping) |
2451 | return iommu_dummy(pdev); | 2505 | return 0; |
2452 | 2506 | ||
2453 | found = identity_mapping(pdev); | 2507 | found = identity_mapping(pdev); |
2454 | if (found) { | 2508 | if (found) { |
2455 | if (pdev->dma_mask > DMA_BIT_MASK(32)) | 2509 | if (iommu_should_identity_map(pdev, 0)) |
2456 | return 1; | 2510 | return 1; |
2457 | else { | 2511 | else { |
2458 | /* | 2512 | /* |
@@ -2469,9 +2523,12 @@ static int iommu_no_mapping(struct pci_dev *pdev) | |||
2469 | * In case of a detached 64 bit DMA device from vm, the device | 2523 | * In case of a detached 64 bit DMA device from vm, the device |
2470 | * is put into si_domain for identity mapping. | 2524 | * is put into si_domain for identity mapping. |
2471 | */ | 2525 | */ |
2472 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { | 2526 | if (iommu_should_identity_map(pdev, 0)) { |
2473 | int ret; | 2527 | int ret; |
2474 | ret = domain_add_dev_info(si_domain, pdev); | 2528 | ret = domain_add_dev_info(si_domain, pdev); |
2529 | if (ret) | ||
2530 | return 0; | ||
2531 | ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | ||
2475 | if (!ret) { | 2532 | if (!ret) { |
2476 | printk(KERN_INFO "64bit %s uses identity mapping\n", | 2533 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
2477 | pci_name(pdev)); | 2534 | pci_name(pdev)); |
@@ -2480,7 +2537,7 @@ static int iommu_no_mapping(struct pci_dev *pdev) | |||
2480 | } | 2537 | } |
2481 | } | 2538 | } |
2482 | 2539 | ||
2483 | return iommu_dummy(pdev); | 2540 | return 0; |
2484 | } | 2541 | } |
2485 | 2542 | ||
2486 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2543 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
@@ -2493,10 +2550,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2493 | int prot = 0; | 2550 | int prot = 0; |
2494 | int ret; | 2551 | int ret; |
2495 | struct intel_iommu *iommu; | 2552 | struct intel_iommu *iommu; |
2553 | unsigned long paddr_pfn = paddr >> PAGE_SHIFT; | ||
2496 | 2554 | ||
2497 | BUG_ON(dir == DMA_NONE); | 2555 | BUG_ON(dir == DMA_NONE); |
2498 | 2556 | ||
2499 | if (iommu_no_mapping(pdev)) | 2557 | if (iommu_no_mapping(hwdev)) |
2500 | return paddr; | 2558 | return paddr; |
2501 | 2559 | ||
2502 | domain = get_valid_domain_for_dev(pdev); | 2560 | domain = get_valid_domain_for_dev(pdev); |
@@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2506 | iommu = domain_get_iommu(domain); | 2564 | iommu = domain_get_iommu(domain); |
2507 | size = aligned_nrpages(paddr, size); | 2565 | size = aligned_nrpages(paddr, size); |
2508 | 2566 | ||
2509 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2567 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
2568 | pdev->dma_mask); | ||
2510 | if (!iova) | 2569 | if (!iova) |
2511 | goto error; | 2570 | goto error; |
2512 | 2571 | ||
@@ -2526,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2526 | * is not a big problem | 2585 | * is not a big problem |
2527 | */ | 2586 | */ |
2528 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), | 2587 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), |
2529 | paddr >> VTD_PAGE_SHIFT, size, prot); | 2588 | mm_to_dma_pfn(paddr_pfn), size, prot); |
2530 | if (ret) | 2589 | if (ret) |
2531 | goto error; | 2590 | goto error; |
2532 | 2591 | ||
@@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2635 | struct iova *iova; | 2694 | struct iova *iova; |
2636 | struct intel_iommu *iommu; | 2695 | struct intel_iommu *iommu; |
2637 | 2696 | ||
2638 | if (iommu_no_mapping(pdev)) | 2697 | if (iommu_no_mapping(dev)) |
2639 | return; | 2698 | return; |
2640 | 2699 | ||
2641 | domain = find_domain(pdev); | 2700 | domain = find_domain(pdev); |
@@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2726 | struct iova *iova; | 2785 | struct iova *iova; |
2727 | struct intel_iommu *iommu; | 2786 | struct intel_iommu *iommu; |
2728 | 2787 | ||
2729 | if (iommu_no_mapping(pdev)) | 2788 | if (iommu_no_mapping(hwdev)) |
2730 | return; | 2789 | return; |
2731 | 2790 | ||
2732 | domain = find_domain(pdev); | 2791 | domain = find_domain(pdev); |
@@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2785 | struct intel_iommu *iommu; | 2844 | struct intel_iommu *iommu; |
2786 | 2845 | ||
2787 | BUG_ON(dir == DMA_NONE); | 2846 | BUG_ON(dir == DMA_NONE); |
2788 | if (iommu_no_mapping(pdev)) | 2847 | if (iommu_no_mapping(hwdev)) |
2789 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 2848 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
2790 | 2849 | ||
2791 | domain = get_valid_domain_for_dev(pdev); | 2850 | domain = get_valid_domain_for_dev(pdev); |
@@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2797 | for_each_sg(sglist, sg, nelems, i) | 2856 | for_each_sg(sglist, sg, nelems, i) |
2798 | size += aligned_nrpages(sg->offset, sg->length); | 2857 | size += aligned_nrpages(sg->offset, sg->length); |
2799 | 2858 | ||
2800 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2859 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
2860 | pdev->dma_mask); | ||
2801 | if (!iova) { | 2861 | if (!iova) { |
2802 | sglist->dma_length = 0; | 2862 | sglist->dma_length = 0; |
2803 | return 0; | 2863 | return 0; |
@@ -2815,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2815 | 2875 | ||
2816 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); | 2876 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); |
2817 | 2877 | ||
2818 | ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); | 2878 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
2819 | if (unlikely(ret)) { | 2879 | if (unlikely(ret)) { |
2820 | /* clear the page */ | 2880 | /* clear the page */ |
2821 | dma_pte_clear_range(domain, start_vpfn, | 2881 | dma_pte_clear_range(domain, start_vpfn, |
@@ -3348,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
3348 | 3408 | ||
3349 | domain->iommu_count = 0; | 3409 | domain->iommu_count = 0; |
3350 | domain->iommu_coherency = 0; | 3410 | domain->iommu_coherency = 0; |
3411 | domain->iommu_snooping = 0; | ||
3351 | domain->max_addr = 0; | 3412 | domain->max_addr = 0; |
3352 | 3413 | ||
3353 | /* always allocate the top pgd */ | 3414 | /* always allocate the top pgd */ |
@@ -3540,6 +3601,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, | |||
3540 | { | 3601 | { |
3541 | struct dmar_domain *dmar_domain = domain->priv; | 3602 | struct dmar_domain *dmar_domain = domain->priv; |
3542 | 3603 | ||
3604 | if (!size) | ||
3605 | return; | ||
3606 | |||
3543 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3607 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3544 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3608 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3545 | 3609 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d9f06fbfa0bf..d986afb7032b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control) | |||
127 | * reliably as devices without an INTx disable bit will then generate a | 127 | * reliably as devices without an INTx disable bit will then generate a |
128 | * level IRQ which will never be cleared. | 128 | * level IRQ which will never be cleared. |
129 | */ | 129 | */ |
130 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 130 | static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
131 | { | 131 | { |
132 | u32 mask_bits = desc->masked; | 132 | u32 mask_bits = desc->masked; |
133 | 133 | ||
134 | if (!desc->msi_attrib.maskbit) | 134 | if (!desc->msi_attrib.maskbit) |
135 | return; | 135 | return 0; |
136 | 136 | ||
137 | mask_bits &= ~mask; | 137 | mask_bits &= ~mask; |
138 | mask_bits |= flag; | 138 | mask_bits |= flag; |
139 | pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); | 139 | pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); |
140 | desc->masked = mask_bits; | 140 | |
141 | return mask_bits; | ||
142 | } | ||
143 | |||
144 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | ||
145 | { | ||
146 | desc->masked = __msi_mask_irq(desc, mask, flag); | ||
141 | } | 147 | } |
142 | 148 | ||
143 | /* | 149 | /* |
@@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |||
147 | * file. This saves a few milliseconds when initialising devices with lots | 153 | * file. This saves a few milliseconds when initialising devices with lots |
148 | * of MSI-X interrupts. | 154 | * of MSI-X interrupts. |
149 | */ | 155 | */ |
150 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | 156 | static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) |
151 | { | 157 | { |
152 | u32 mask_bits = desc->masked; | 158 | u32 mask_bits = desc->masked; |
153 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 159 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
154 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | 160 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
155 | mask_bits &= ~1; | 161 | mask_bits &= ~1; |
156 | mask_bits |= flag; | 162 | mask_bits |= flag; |
157 | writel(mask_bits, desc->mask_base + offset); | 163 | writel(mask_bits, desc->mask_base + offset); |
158 | desc->masked = mask_bits; | 164 | |
165 | return mask_bits; | ||
166 | } | ||
167 | |||
168 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | ||
169 | { | ||
170 | desc->masked = __msix_mask_irq(desc, flag); | ||
159 | } | 171 | } |
160 | 172 | ||
161 | static void msi_set_mask_bit(unsigned irq, u32 flag) | 173 | static void msi_set_mask_bit(unsigned irq, u32 flag) |
@@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
188 | void __iomem *base = entry->mask_base + | 200 | void __iomem *base = entry->mask_base + |
189 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 201 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
190 | 202 | ||
191 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | 203 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); |
192 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | 204 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); |
193 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); | 205 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA); |
194 | } else { | 206 | } else { |
195 | struct pci_dev *dev = entry->dev; | 207 | struct pci_dev *dev = entry->dev; |
196 | int pos = entry->msi_attrib.pos; | 208 | int pos = entry->msi_attrib.pos; |
@@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
225 | base = entry->mask_base + | 237 | base = entry->mask_base + |
226 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 238 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
227 | 239 | ||
228 | writel(msg->address_lo, | 240 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); |
229 | base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | 241 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); |
230 | writel(msg->address_hi, | 242 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); |
231 | base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | ||
232 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); | ||
233 | } else { | 243 | } else { |
234 | struct pci_dev *dev = entry->dev; | 244 | struct pci_dev *dev = entry->dev; |
235 | int pos = entry->msi_attrib.pos; | 245 | int pos = entry->msi_attrib.pos; |
@@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
385 | /* Configure MSI capability structure */ | 395 | /* Configure MSI capability structure */ |
386 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | 396 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
387 | if (ret) { | 397 | if (ret) { |
398 | msi_mask_irq(entry, mask, ~mask); | ||
388 | msi_free_irqs(dev); | 399 | msi_free_irqs(dev); |
389 | return ret; | 400 | return ret; |
390 | } | 401 | } |
@@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev, | |||
439 | 450 | ||
440 | for (i = 0; i < nvec; i++) { | 451 | for (i = 0; i < nvec; i++) { |
441 | entry = alloc_msi_entry(dev); | 452 | entry = alloc_msi_entry(dev); |
442 | if (!entry) | 453 | if (!entry) { |
443 | break; | 454 | if (!i) |
455 | iounmap(base); | ||
456 | else | ||
457 | msi_free_irqs(dev); | ||
458 | /* No enough memory. Don't try again */ | ||
459 | return -ENOMEM; | ||
460 | } | ||
444 | 461 | ||
445 | j = entries[i].entry; | 462 | j = entries[i].entry; |
446 | entry->msi_attrib.is_msix = 1; | 463 | entry->msi_attrib.is_msix = 1; |
@@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
487 | set_irq_msi(entry->irq, entry); | 504 | set_irq_msi(entry->irq, entry); |
488 | j = entries[i].entry; | 505 | j = entries[i].entry; |
489 | entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + | 506 | entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + |
490 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); | 507 | PCI_MSIX_ENTRY_VECTOR_CTRL); |
491 | msix_mask_irq(entry, 1); | 508 | msix_mask_irq(entry, 1); |
492 | i++; | 509 | i++; |
493 | } | 510 | } |
@@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
611 | pci_intx_for_msi(dev, 1); | 628 | pci_intx_for_msi(dev, 1); |
612 | dev->msi_enabled = 0; | 629 | dev->msi_enabled = 0; |
613 | 630 | ||
631 | /* Return the device with MSI unmasked as initial states */ | ||
614 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); | 632 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); |
615 | mask = msi_capable_mask(ctrl); | 633 | mask = msi_capable_mask(ctrl); |
616 | msi_mask_irq(desc, mask, ~mask); | 634 | /* Keep cached state to be restored */ |
635 | __msi_mask_irq(desc, mask, ~mask); | ||
617 | 636 | ||
618 | /* Restore dev->irq to its default pin-assertion irq */ | 637 | /* Restore dev->irq to its default pin-assertion irq */ |
619 | dev->irq = desc->msi_attrib.default_irq; | 638 | dev->irq = desc->msi_attrib.default_irq; |
@@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev) | |||
653 | 672 | ||
654 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | 673 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { |
655 | if (entry->msi_attrib.is_msix) { | 674 | if (entry->msi_attrib.is_msix) { |
656 | msix_mask_irq(entry, 1); | ||
657 | if (list_is_last(&entry->list, &dev->msi_list)) | 675 | if (list_is_last(&entry->list, &dev->msi_list)) |
658 | iounmap(entry->mask_base); | 676 | iounmap(entry->mask_base); |
659 | } | 677 | } |
@@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev) | |||
741 | 759 | ||
742 | void pci_msix_shutdown(struct pci_dev* dev) | 760 | void pci_msix_shutdown(struct pci_dev* dev) |
743 | { | 761 | { |
762 | struct msi_desc *entry; | ||
763 | |||
744 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 764 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
745 | return; | 765 | return; |
746 | 766 | ||
767 | /* Return the device with MSI-X masked as initial states */ | ||
768 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
769 | /* Keep cached states to be restored */ | ||
770 | __msix_mask_irq(entry, 1); | ||
771 | } | ||
772 | |||
747 | msix_set_enable(dev, 0); | 773 | msix_set_enable(dev, 0); |
748 | pci_intx_for_msi(dev, 1); | 774 | pci_intx_for_msi(dev, 1); |
749 | dev->msix_enabled = 0; | 775 | dev->msix_enabled = 0; |
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index a0662842550b..de27c1cb5a2b 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h | |||
@@ -6,11 +6,11 @@ | |||
6 | #ifndef MSI_H | 6 | #ifndef MSI_H |
7 | #define MSI_H | 7 | #define MSI_H |
8 | 8 | ||
9 | #define PCI_MSIX_ENTRY_SIZE 16 | 9 | #define PCI_MSIX_ENTRY_SIZE 16 |
10 | #define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0 | 10 | #define PCI_MSIX_ENTRY_LOWER_ADDR 0 |
11 | #define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4 | 11 | #define PCI_MSIX_ENTRY_UPPER_ADDR 4 |
12 | #define PCI_MSIX_ENTRY_DATA_OFFSET 8 | 12 | #define PCI_MSIX_ENTRY_DATA 8 |
13 | #define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12 | 13 | #define PCI_MSIX_ENTRY_VECTOR_CTRL 12 |
14 | 14 | ||
15 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) | 15 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) |
16 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) | 16 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6c93af5ced18..dbd0f947f497 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1517,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev) | |||
1517 | * | 1517 | * |
1518 | * Perform INTx swizzling for a device behind one level of bridge. This is | 1518 | * Perform INTx swizzling for a device behind one level of bridge. This is |
1519 | * required by section 9.1 of the PCI-to-PCI bridge specification for devices | 1519 | * required by section 9.1 of the PCI-to-PCI bridge specification for devices |
1520 | * behind bridges on add-in cards. | 1520 | * behind bridges on add-in cards. For devices with ARI enabled, the slot |
1521 | * number is always 0 (see the Implementation Note in section 2.2.8.1 of | ||
1522 | * the PCI Express Base Specification, Revision 2.1) | ||
1521 | */ | 1523 | */ |
1522 | u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) | 1524 | u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) |
1523 | { | 1525 | { |
1524 | return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; | 1526 | int slot; |
1527 | |||
1528 | if (pci_ari_enabled(dev->bus)) | ||
1529 | slot = 0; | ||
1530 | else | ||
1531 | slot = PCI_SLOT(dev->devfn); | ||
1532 | |||
1533 | return (((pin - 1) + slot) % 4) + 1; | ||
1525 | } | 1534 | } |
1526 | 1535 | ||
1527 | int | 1536 | int |
@@ -2171,7 +2180,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) | |||
2171 | u16 ctrl; | 2180 | u16 ctrl; |
2172 | struct pci_dev *pdev; | 2181 | struct pci_dev *pdev; |
2173 | 2182 | ||
2174 | if (dev->subordinate) | 2183 | if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) |
2175 | return -ENOTTY; | 2184 | return -ENOTTY; |
2176 | 2185 | ||
2177 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) | 2186 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) |
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c index ece97df4df6d..a928d8ab6bda 100644 --- a/drivers/pci/pcie/aer/ecrc.c +++ b/drivers/pci/pcie/aer/ecrc.c | |||
@@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev) | |||
106 | disable_ecrc_checking(dev); | 106 | disable_ecrc_checking(dev); |
107 | break; | 107 | break; |
108 | case ECRC_POLICY_ON: | 108 | case ECRC_POLICY_ON: |
109 | enable_ecrc_checking(dev);; | 109 | enable_ecrc_checking(dev); |
110 | break; | 110 | break; |
111 | default: | 111 | default: |
112 | return; | 112 | return; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 56552d74abea..06b965623962 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) | |||
1058 | } | 1058 | } |
1059 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); | 1059 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); |
1060 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); | 1060 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); |
1061 | /* ALi loses some register settings that we cannot then restore */ | ||
1062 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3); | ||
1063 | /* VIA comes back fine but we need to keep it alive or ACPI GTM failures | ||
1064 | occur when mode detecting */ | ||
1065 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3); | ||
1061 | 1066 | ||
1062 | /* This was originally an Alpha specific thing, but it really fits here. | 1067 | /* This was originally an Alpha specific thing, but it really fits here. |
1063 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. | 1068 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index b711fb7181e2..1898c7b47907 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -100,16 +100,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
100 | { | 100 | { |
101 | struct resource *res = &dev->resource[resource]; | 101 | struct resource *res = &dev->resource[resource]; |
102 | struct resource *root; | 102 | struct resource *root; |
103 | char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; | ||
104 | int err; | 103 | int err; |
105 | 104 | ||
106 | root = pci_find_parent_resource(dev, res); | 105 | root = pci_find_parent_resource(dev, res); |
107 | 106 | ||
108 | err = -EINVAL; | 107 | err = -EINVAL; |
109 | if (root != NULL) | 108 | if (root != NULL) |
110 | err = insert_resource(root, res); | 109 | err = request_resource(root, res); |
111 | 110 | ||
112 | if (err) { | 111 | if (err) { |
112 | const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; | ||
113 | dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", | 113 | dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", |
114 | resource, | 114 | resource, |
115 | root ? "address space collision on" : | 115 | root ? "address space collision on" : |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index eddb0748b0ea..8c02b6c53bdb 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot); | |||
311 | #include <linux/pci_hotplug.h> | 311 | #include <linux/pci_hotplug.h> |
312 | /** | 312 | /** |
313 | * pci_hp_create_link - create symbolic link to the hotplug driver module. | 313 | * pci_hp_create_link - create symbolic link to the hotplug driver module. |
314 | * @slot: struct pci_slot | 314 | * @pci_slot: struct pci_slot |
315 | * | 315 | * |
316 | * Helper function for pci_hotplug_core.c to create symbolic link to | 316 | * Helper function for pci_hotplug_core.c to create symbolic link to |
317 | * the hotplug driver module. | 317 | * the hotplug driver module. |
@@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link); | |||
334 | 334 | ||
335 | /** | 335 | /** |
336 | * pci_hp_remove_link - remove symbolic link to the hotplug driver module. | 336 | * pci_hp_remove_link - remove symbolic link to the hotplug driver module. |
337 | * @slot: struct pci_slot | 337 | * @pci_slot: struct pci_slot |
338 | * | 338 | * |
339 | * Helper function for pci_hotplug_core.c to remove symbolic link to | 339 | * Helper function for pci_hotplug_core.c to remove symbolic link to |
340 | * the hotplug driver module. | 340 | * the hotplug driver module. |
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index ec22284eed30..e1c1ec540893 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/syscalls.h> | 12 | #include <linux/syscalls.h> |
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include "pci.h" | 14 | #include "pci.h" |