diff options
34 files changed, 372 insertions, 278 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 8285ed4676b6..624e6516fdd3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6449,19 +6449,52 @@ F: drivers/pci/ | |||
6449 | F: include/linux/pci* | 6449 | F: include/linux/pci* |
6450 | F: arch/x86/pci/ | 6450 | F: arch/x86/pci/ |
6451 | 6451 | ||
6452 | PCI DRIVER FOR IMX6 | ||
6453 | M: Richard Zhu <r65037@freescale.com> | ||
6454 | M: Shawn Guo <shawn.guo@linaro.org> | ||
6455 | L: linux-pci@vger.kernel.org | ||
6456 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
6457 | S: Maintained | ||
6458 | F: drivers/pci/host/*imx6* | ||
6459 | |||
6460 | PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) | ||
6461 | M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
6462 | M: Jason Cooper <jason@lakedaemon.net> | ||
6463 | L: linux-pci@vger.kernel.org | ||
6464 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
6465 | S: Maintained | ||
6466 | F: drivers/pci/host/*mvebu* | ||
6467 | |||
6452 | PCI DRIVER FOR NVIDIA TEGRA | 6468 | PCI DRIVER FOR NVIDIA TEGRA |
6453 | M: Thierry Reding <thierry.reding@gmail.com> | 6469 | M: Thierry Reding <thierry.reding@gmail.com> |
6454 | L: linux-tegra@vger.kernel.org | 6470 | L: linux-tegra@vger.kernel.org |
6471 | L: linux-pci@vger.kernel.org | ||
6455 | S: Supported | 6472 | S: Supported |
6456 | F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt | 6473 | F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt |
6457 | F: drivers/pci/host/pci-tegra.c | 6474 | F: drivers/pci/host/pci-tegra.c |
6458 | 6475 | ||
6476 | PCI DRIVER FOR RENESAS R-CAR | ||
6477 | M: Simon Horman <horms@verge.net.au> | ||
6478 | L: linux-pci@vger.kernel.org | ||
6479 | L: linux-sh@vger.kernel.org | ||
6480 | S: Maintained | ||
6481 | F: drivers/pci/host/*rcar* | ||
6482 | |||
6459 | PCI DRIVER FOR SAMSUNG EXYNOS | 6483 | PCI DRIVER FOR SAMSUNG EXYNOS |
6460 | M: Jingoo Han <jg1.han@samsung.com> | 6484 | M: Jingoo Han <jg1.han@samsung.com> |
6461 | L: linux-pci@vger.kernel.org | 6485 | L: linux-pci@vger.kernel.org |
6486 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
6487 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | ||
6462 | S: Maintained | 6488 | S: Maintained |
6463 | F: drivers/pci/host/pci-exynos.c | 6489 | F: drivers/pci/host/pci-exynos.c |
6464 | 6490 | ||
6491 | PCI DRIVER FOR SYNOPSIS DESIGNWARE | ||
6492 | M: Mohit Kumar <mohit.kumar@st.com> | ||
6493 | M: Jingoo Han <jg1.han@samsung.com> | ||
6494 | L: linux-pci@vger.kernel.org | ||
6495 | S: Maintained | ||
6496 | F: drivers/pci/host/*designware* | ||
6497 | |||
6465 | PCMCIA SUBSYSTEM | 6498 | PCMCIA SUBSYSTEM |
6466 | P: Linux PCMCIA Team | 6499 | P: Linux PCMCIA Team |
6467 | L: linux-pcmcia@lists.infradead.org | 6500 | L: linux-pcmcia@lists.infradead.org |
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index a21d0ab3b19e..eddee7720343 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -325,7 +325,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
325 | /* Helper for generic DMA-mapping functions. */ | 325 | /* Helper for generic DMA-mapping functions. */ |
326 | static struct pci_dev *alpha_gendev_to_pci(struct device *dev) | 326 | static struct pci_dev *alpha_gendev_to_pci(struct device *dev) |
327 | { | 327 | { |
328 | if (dev && dev->bus == &pci_bus_type) | 328 | if (dev && dev_is_pci(dev)) |
329 | return to_pci_dev(dev); | 329 | return to_pci_dev(dev); |
330 | 330 | ||
331 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, | 331 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 001f4913799c..5114b68e99d5 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -257,7 +257,7 @@ static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s | |||
257 | */ | 257 | */ |
258 | static int it8152_pci_platform_notify(struct device *dev) | 258 | static int it8152_pci_platform_notify(struct device *dev) |
259 | { | 259 | { |
260 | if (dev->bus == &pci_bus_type) { | 260 | if (dev_is_pci(dev)) { |
261 | if (dev->dma_mask) | 261 | if (dev->dma_mask) |
262 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 262 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
263 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 263 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
@@ -268,7 +268,7 @@ static int it8152_pci_platform_notify(struct device *dev) | |||
268 | 268 | ||
269 | static int it8152_pci_platform_notify_remove(struct device *dev) | 269 | static int it8152_pci_platform_notify_remove(struct device *dev) |
270 | { | 270 | { |
271 | if (dev->bus == &pci_bus_type) | 271 | if (dev_is_pci(dev)) |
272 | dmabounce_unregister_dev(dev); | 272 | dmabounce_unregister_dev(dev); |
273 | 273 | ||
274 | return 0; | 274 | return 0; |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 6d6bde3e15fa..200970d56f6d 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -326,7 +326,7 @@ static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s | |||
326 | */ | 326 | */ |
327 | static int ixp4xx_pci_platform_notify(struct device *dev) | 327 | static int ixp4xx_pci_platform_notify(struct device *dev) |
328 | { | 328 | { |
329 | if(dev->bus == &pci_bus_type) { | 329 | if (dev_is_pci(dev)) { |
330 | *dev->dma_mask = SZ_64M - 1; | 330 | *dev->dma_mask = SZ_64M - 1; |
331 | dev->coherent_dma_mask = SZ_64M - 1; | 331 | dev->coherent_dma_mask = SZ_64M - 1; |
332 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); | 332 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); |
@@ -336,9 +336,9 @@ static int ixp4xx_pci_platform_notify(struct device *dev) | |||
336 | 336 | ||
337 | static int ixp4xx_pci_platform_notify_remove(struct device *dev) | 337 | static int ixp4xx_pci_platform_notify_remove(struct device *dev) |
338 | { | 338 | { |
339 | if(dev->bus == &pci_bus_type) { | 339 | if (dev_is_pci(dev)) |
340 | dmabounce_unregister_dev(dev); | 340 | dmabounce_unregister_dev(dev); |
341 | } | 341 | |
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 4c530a82fc46..8e858b593e4f 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -255,7 +255,7 @@ static u64 prefetch_spill_page; | |||
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | #ifdef CONFIG_PCI | 257 | #ifdef CONFIG_PCI |
258 | # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ | 258 | # define GET_IOC(dev) ((dev_is_pci(dev)) \ |
259 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) | 259 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) |
260 | #else | 260 | #else |
261 | # define GET_IOC(dev) NULL | 261 | # define GET_IOC(dev) NULL |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 3290d6e00c31..d0853e8e8623 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | static int sn_dma_supported(struct device *dev, u64 mask) | 35 | static int sn_dma_supported(struct device *dev, u64 mask) |
36 | { | 36 | { |
37 | BUG_ON(dev->bus != &pci_bus_type); | 37 | BUG_ON(!dev_is_pci(dev)); |
38 | 38 | ||
39 | if (mask < 0x7fffffff) | 39 | if (mask < 0x7fffffff) |
40 | return 0; | 40 | return 0; |
@@ -50,7 +50,7 @@ static int sn_dma_supported(struct device *dev, u64 mask) | |||
50 | */ | 50 | */ |
51 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) | 51 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) |
52 | { | 52 | { |
53 | BUG_ON(dev->bus != &pci_bus_type); | 53 | BUG_ON(!dev_is_pci(dev)); |
54 | 54 | ||
55 | if (!sn_dma_supported(dev, dma_mask)) | 55 | if (!sn_dma_supported(dev, dma_mask)) |
56 | return 0; | 56 | return 0; |
@@ -85,7 +85,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
85 | struct pci_dev *pdev = to_pci_dev(dev); | 85 | struct pci_dev *pdev = to_pci_dev(dev); |
86 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 86 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
87 | 87 | ||
88 | BUG_ON(dev->bus != &pci_bus_type); | 88 | BUG_ON(!dev_is_pci(dev)); |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Allocate the memory. | 91 | * Allocate the memory. |
@@ -143,7 +143,7 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr | |||
143 | struct pci_dev *pdev = to_pci_dev(dev); | 143 | struct pci_dev *pdev = to_pci_dev(dev); |
144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
145 | 145 | ||
146 | BUG_ON(dev->bus != &pci_bus_type); | 146 | BUG_ON(!dev_is_pci(dev)); |
147 | 147 | ||
148 | provider->dma_unmap(pdev, dma_handle, 0); | 148 | provider->dma_unmap(pdev, dma_handle, 0); |
149 | free_pages((unsigned long)cpu_addr, get_order(size)); | 149 | free_pages((unsigned long)cpu_addr, get_order(size)); |
@@ -187,7 +187,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | |||
187 | 187 | ||
188 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | 188 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); |
189 | 189 | ||
190 | BUG_ON(dev->bus != &pci_bus_type); | 190 | BUG_ON(!dev_is_pci(dev)); |
191 | 191 | ||
192 | phys_addr = __pa(cpu_addr); | 192 | phys_addr = __pa(cpu_addr); |
193 | if (dmabarr) | 193 | if (dmabarr) |
@@ -223,7 +223,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
223 | struct pci_dev *pdev = to_pci_dev(dev); | 223 | struct pci_dev *pdev = to_pci_dev(dev); |
224 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 224 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
225 | 225 | ||
226 | BUG_ON(dev->bus != &pci_bus_type); | 226 | BUG_ON(!dev_is_pci(dev)); |
227 | 227 | ||
228 | provider->dma_unmap(pdev, dma_addr, dir); | 228 | provider->dma_unmap(pdev, dma_addr, dir); |
229 | } | 229 | } |
@@ -247,7 +247,7 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
247 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 247 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
248 | struct scatterlist *sg; | 248 | struct scatterlist *sg; |
249 | 249 | ||
250 | BUG_ON(dev->bus != &pci_bus_type); | 250 | BUG_ON(!dev_is_pci(dev)); |
251 | 251 | ||
252 | for_each_sg(sgl, sg, nhwentries, i) { | 252 | for_each_sg(sgl, sg, nhwentries, i) { |
253 | provider->dma_unmap(pdev, sg->dma_address, dir); | 253 | provider->dma_unmap(pdev, sg->dma_address, dir); |
@@ -284,7 +284,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |||
284 | 284 | ||
285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | 285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); |
286 | 286 | ||
287 | BUG_ON(dev->bus != &pci_bus_type); | 287 | BUG_ON(!dev_is_pci(dev)); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * Setup a DMA address for each entry in the scatterlist. | 290 | * Setup a DMA address for each entry in the scatterlist. |
@@ -323,26 +323,26 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |||
323 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 323 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
324 | size_t size, enum dma_data_direction dir) | 324 | size_t size, enum dma_data_direction dir) |
325 | { | 325 | { |
326 | BUG_ON(dev->bus != &pci_bus_type); | 326 | BUG_ON(!dev_is_pci(dev)); |
327 | } | 327 | } |
328 | 328 | ||
329 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 329 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
330 | size_t size, | 330 | size_t size, |
331 | enum dma_data_direction dir) | 331 | enum dma_data_direction dir) |
332 | { | 332 | { |
333 | BUG_ON(dev->bus != &pci_bus_type); | 333 | BUG_ON(!dev_is_pci(dev)); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 336 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
337 | int nelems, enum dma_data_direction dir) | 337 | int nelems, enum dma_data_direction dir) |
338 | { | 338 | { |
339 | BUG_ON(dev->bus != &pci_bus_type); | 339 | BUG_ON(!dev_is_pci(dev)); |
340 | } | 340 | } |
341 | 341 | ||
342 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 342 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
343 | int nelems, enum dma_data_direction dir) | 343 | int nelems, enum dma_data_direction dir) |
344 | { | 344 | { |
345 | BUG_ON(dev->bus != &pci_bus_type); | 345 | BUG_ON(!dev_is_pci(dev)); |
346 | } | 346 | } |
347 | 347 | ||
348 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 348 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 14285caec71a..dba508fe1683 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
@@ -282,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type) | |||
282 | return NULL; | 282 | return NULL; |
283 | } | 283 | } |
284 | 284 | ||
285 | #ifdef CONFIG_PCI | ||
286 | static inline int is_pci_dev(struct device *dev) | ||
287 | { | ||
288 | return dev->bus == &pci_bus_type; | ||
289 | } | ||
290 | #else | ||
291 | static inline int is_pci_dev(struct device *dev) | ||
292 | { | ||
293 | return 0; | ||
294 | } | ||
295 | #endif | ||
296 | |||
297 | /* | 285 | /* |
298 | * get_node_path fills in @path with the firmware path to the device. | 286 | * get_node_path fills in @path with the firmware path to the device. |
299 | * Note that if @node is a parisc device, we don't fill in the 'mod' field. | 287 | * Note that if @node is a parisc device, we don't fill in the 'mod' field. |
@@ -306,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path) | |||
306 | int i = 5; | 294 | int i = 5; |
307 | memset(&path->bc, -1, 6); | 295 | memset(&path->bc, -1, 6); |
308 | 296 | ||
309 | if (is_pci_dev(dev)) { | 297 | if (dev_is_pci(dev)) { |
310 | unsigned int devfn = to_pci_dev(dev)->devfn; | 298 | unsigned int devfn = to_pci_dev(dev)->devfn; |
311 | path->mod = PCI_FUNC(devfn); | 299 | path->mod = PCI_FUNC(devfn); |
312 | path->bc[i--] = PCI_SLOT(devfn); | 300 | path->bc[i--] = PCI_SLOT(devfn); |
@@ -314,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path) | |||
314 | } | 302 | } |
315 | 303 | ||
316 | while (dev != &root) { | 304 | while (dev != &root) { |
317 | if (is_pci_dev(dev)) { | 305 | if (dev_is_pci(dev)) { |
318 | unsigned int devfn = to_pci_dev(dev)->devfn; | 306 | unsigned int devfn = to_pci_dev(dev)->devfn; |
319 | path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); | 307 | path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); |
320 | } else if (dev->bus == &parisc_bus_type) { | 308 | } else if (dev->bus == &parisc_bus_type) { |
@@ -695,7 +683,7 @@ static int check_parent(struct device * dev, void * data) | |||
695 | if (dev->bus == &parisc_bus_type) { | 683 | if (dev->bus == &parisc_bus_type) { |
696 | if (match_parisc_device(dev, d->index, d->modpath)) | 684 | if (match_parisc_device(dev, d->index, d->modpath)) |
697 | d->dev = dev; | 685 | d->dev = dev; |
698 | } else if (is_pci_dev(dev)) { | 686 | } else if (dev_is_pci(dev)) { |
699 | if (match_pci_device(dev, d->index, d->modpath)) | 687 | if (match_pci_device(dev, d->index, d->modpath)) |
700 | d->dev = dev; | 688 | d->dev = dev; |
701 | } else if (dev->bus == NULL) { | 689 | } else if (dev->bus == NULL) { |
@@ -753,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath) | |||
753 | if (!parent) | 741 | if (!parent) |
754 | return NULL; | 742 | return NULL; |
755 | } | 743 | } |
756 | if (is_pci_dev(parent)) /* pci devices already parse MOD */ | 744 | if (dev_is_pci(parent)) /* pci devices already parse MOD */ |
757 | return parent; | 745 | return parent; |
758 | else | 746 | else |
759 | return parse_tree_node(parent, 6, modpath); | 747 | return parse_tree_node(parent, 6, modpath); |
@@ -772,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path) | |||
772 | padev = to_parisc_device(dev); | 760 | padev = to_parisc_device(dev); |
773 | get_node_path(dev->parent, path); | 761 | get_node_path(dev->parent, path); |
774 | path->mod = padev->hw_path; | 762 | path->mod = padev->hw_path; |
775 | } else if (is_pci_dev(dev)) { | 763 | } else if (dev_is_pci(dev)) { |
776 | get_node_path(dev, path); | 764 | get_node_path(dev, path); |
777 | } | 765 | } |
778 | } | 766 | } |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed141aac7..76663b019eb5 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
854 | return 1; | 854 | return 1; |
855 | 855 | ||
856 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
857 | if (dev->bus == &pci_bus_type) | 857 | if (dev_is_pci(dev)) |
858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
859 | #endif | 859 | #endif |
860 | 860 | ||
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468de9b2..e7e215dfa866 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); | |||
666 | */ | 666 | */ |
667 | int dma_supported(struct device *dev, u64 mask) | 667 | int dma_supported(struct device *dev, u64 mask) |
668 | { | 668 | { |
669 | #ifdef CONFIG_PCI | 669 | if (dev_is_pci(dev)) |
670 | if (dev->bus == &pci_bus_type) | ||
671 | return 1; | 670 | return 1; |
672 | #endif | 671 | |
673 | return 0; | 672 | return 0; |
674 | } | 673 | } |
675 | EXPORT_SYMBOL(dma_supported); | 674 | EXPORT_SYMBOL(dma_supported); |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 947b5c417e83..0de52c5bf9a2 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -104,7 +104,7 @@ extern void pci_iommu_alloc(void); | |||
104 | struct msi_desc; | 104 | struct msi_desc; |
105 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 105 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
106 | void native_teardown_msi_irq(unsigned int irq); | 106 | void native_teardown_msi_irq(unsigned int irq); |
107 | void native_restore_msi_irqs(struct pci_dev *dev, int irq); | 107 | void native_restore_msi_irqs(struct pci_dev *dev); |
108 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 108 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
109 | unsigned int irq_base, unsigned int irq_offset); | 109 | unsigned int irq_base, unsigned int irq_offset); |
110 | #else | 110 | #else |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 0f1be11e43d2..e45e4da96bf1 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -181,7 +181,7 @@ struct x86_msi_ops { | |||
181 | u8 hpet_id); | 181 | u8 hpet_id); |
182 | void (*teardown_msi_irq)(unsigned int irq); | 182 | void (*teardown_msi_irq)(unsigned int irq); |
183 | void (*teardown_msi_irqs)(struct pci_dev *dev); | 183 | void (*teardown_msi_irqs)(struct pci_dev *dev); |
184 | void (*restore_msi_irqs)(struct pci_dev *dev, int irq); | 184 | void (*restore_msi_irqs)(struct pci_dev *dev); |
185 | int (*setup_hpet_msi)(unsigned int irq, unsigned int id); | 185 | int (*setup_hpet_msi)(unsigned int irq, unsigned int id); |
186 | u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); | 186 | u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); |
187 | u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); | 187 | u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6c0b43bd024b..d359d0fffa50 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1034,9 +1034,7 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, | |||
1034 | 1034 | ||
1035 | if (!acpi_ioapic) | 1035 | if (!acpi_ioapic) |
1036 | return 0; | 1036 | return 0; |
1037 | if (!dev) | 1037 | if (!dev || !dev_is_pci(dev)) |
1038 | return 0; | ||
1039 | if (dev->bus != &pci_bus_type) | ||
1040 | return 0; | 1038 | return 0; |
1041 | 1039 | ||
1042 | pdev = to_pci_dev(dev); | 1040 | pdev = to_pci_dev(dev); |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 021783b1f46a..e48b674639cc 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -136,9 +136,9 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
136 | x86_msi.teardown_msi_irq(irq); | 136 | x86_msi.teardown_msi_irq(irq); |
137 | } | 137 | } |
138 | 138 | ||
139 | void arch_restore_msi_irqs(struct pci_dev *dev, int irq) | 139 | void arch_restore_msi_irqs(struct pci_dev *dev) |
140 | { | 140 | { |
141 | x86_msi.restore_msi_irqs(dev, irq); | 141 | x86_msi.restore_msi_irqs(dev); |
142 | } | 142 | } |
143 | u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 143 | u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
144 | { | 144 | { |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 5eee4959785d..103e702ec5a7 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -337,7 +337,7 @@ out: | |||
337 | return ret; | 337 | return ret; |
338 | } | 338 | } |
339 | 339 | ||
340 | static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) | 340 | static void xen_initdom_restore_msi_irqs(struct pci_dev *dev) |
341 | { | 341 | { |
342 | int ret = 0; | 342 | int ret = 0; |
343 | 343 | ||
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c index 272a3ec35957..8842cde69177 100644 --- a/drivers/eisa/eisa-bus.c +++ b/drivers/eisa/eisa-bus.c | |||
@@ -232,8 +232,10 @@ static int __init eisa_init_device(struct eisa_root_device *root, | |||
232 | static int __init eisa_register_device(struct eisa_device *edev) | 232 | static int __init eisa_register_device(struct eisa_device *edev) |
233 | { | 233 | { |
234 | int rc = device_register(&edev->dev); | 234 | int rc = device_register(&edev->dev); |
235 | if (rc) | 235 | if (rc) { |
236 | put_device(&edev->dev); | ||
236 | return rc; | 237 | return rc; |
238 | } | ||
237 | 239 | ||
238 | rc = device_create_file(&edev->dev, &dev_attr_signature); | 240 | rc = device_create_file(&edev->dev, &dev_attr_signature); |
239 | if (rc) | 241 | if (rc) |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index fc1b74013743..a037d81f21ed 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -176,6 +176,7 @@ int pci_bus_add_device(struct pci_dev *dev) | |||
176 | */ | 176 | */ |
177 | pci_fixup_device(pci_fixup_final, dev); | 177 | pci_fixup_device(pci_fixup_final, dev); |
178 | pci_create_sysfs_dev_files(dev); | 178 | pci_create_sysfs_dev_files(dev); |
179 | pci_proc_attach_device(dev); | ||
179 | 180 | ||
180 | dev->match_driver = true; | 181 | dev->match_driver = true; |
181 | retval = device_attach(&dev->dev); | 182 | retval = device_attach(&dev->dev); |
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index bd70af8f31ac..9fc1cb66c64e 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c | |||
@@ -426,16 +426,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
426 | "imprecise external abort"); | 426 | "imprecise external abort"); |
427 | 427 | ||
428 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 428 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
429 | if (!dbi_base) { | ||
430 | dev_err(&pdev->dev, "dbi_base memory resource not found\n"); | ||
431 | return -ENODEV; | ||
432 | } | ||
433 | |||
434 | pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); | 429 | pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); |
435 | if (IS_ERR(pp->dbi_base)) { | 430 | if (IS_ERR(pp->dbi_base)) |
436 | ret = PTR_ERR(pp->dbi_base); | 431 | return PTR_ERR(pp->dbi_base); |
437 | goto err; | ||
438 | } | ||
439 | 432 | ||
440 | /* Fetch GPIOs */ | 433 | /* Fetch GPIOs */ |
441 | imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | 434 | imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); |
@@ -449,7 +442,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
449 | "PCIe reset"); | 442 | "PCIe reset"); |
450 | if (ret) { | 443 | if (ret) { |
451 | dev_err(&pdev->dev, "unable to get reset gpio\n"); | 444 | dev_err(&pdev->dev, "unable to get reset gpio\n"); |
452 | goto err; | 445 | return ret; |
453 | } | 446 | } |
454 | 447 | ||
455 | imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); | 448 | imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); |
@@ -460,7 +453,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
460 | "PCIe power enable"); | 453 | "PCIe power enable"); |
461 | if (ret) { | 454 | if (ret) { |
462 | dev_err(&pdev->dev, "unable to get power-on gpio\n"); | 455 | dev_err(&pdev->dev, "unable to get power-on gpio\n"); |
463 | goto err; | 456 | return ret; |
464 | } | 457 | } |
465 | } | 458 | } |
466 | 459 | ||
@@ -472,7 +465,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
472 | "PCIe wake up"); | 465 | "PCIe wake up"); |
473 | if (ret) { | 466 | if (ret) { |
474 | dev_err(&pdev->dev, "unable to get wake-up gpio\n"); | 467 | dev_err(&pdev->dev, "unable to get wake-up gpio\n"); |
475 | goto err; | 468 | return ret; |
476 | } | 469 | } |
477 | } | 470 | } |
478 | 471 | ||
@@ -484,7 +477,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
484 | "PCIe disable endpoint"); | 477 | "PCIe disable endpoint"); |
485 | if (ret) { | 478 | if (ret) { |
486 | dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); | 479 | dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); |
487 | goto err; | 480 | return ret; |
488 | } | 481 | } |
489 | } | 482 | } |
490 | 483 | ||
@@ -493,32 +486,28 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
493 | if (IS_ERR(imx6_pcie->lvds_gate)) { | 486 | if (IS_ERR(imx6_pcie->lvds_gate)) { |
494 | dev_err(&pdev->dev, | 487 | dev_err(&pdev->dev, |
495 | "lvds_gate clock select missing or invalid\n"); | 488 | "lvds_gate clock select missing or invalid\n"); |
496 | ret = PTR_ERR(imx6_pcie->lvds_gate); | 489 | return PTR_ERR(imx6_pcie->lvds_gate); |
497 | goto err; | ||
498 | } | 490 | } |
499 | 491 | ||
500 | imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); | 492 | imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); |
501 | if (IS_ERR(imx6_pcie->sata_ref_100m)) { | 493 | if (IS_ERR(imx6_pcie->sata_ref_100m)) { |
502 | dev_err(&pdev->dev, | 494 | dev_err(&pdev->dev, |
503 | "sata_ref_100m clock source missing or invalid\n"); | 495 | "sata_ref_100m clock source missing or invalid\n"); |
504 | ret = PTR_ERR(imx6_pcie->sata_ref_100m); | 496 | return PTR_ERR(imx6_pcie->sata_ref_100m); |
505 | goto err; | ||
506 | } | 497 | } |
507 | 498 | ||
508 | imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); | 499 | imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); |
509 | if (IS_ERR(imx6_pcie->pcie_ref_125m)) { | 500 | if (IS_ERR(imx6_pcie->pcie_ref_125m)) { |
510 | dev_err(&pdev->dev, | 501 | dev_err(&pdev->dev, |
511 | "pcie_ref_125m clock source missing or invalid\n"); | 502 | "pcie_ref_125m clock source missing or invalid\n"); |
512 | ret = PTR_ERR(imx6_pcie->pcie_ref_125m); | 503 | return PTR_ERR(imx6_pcie->pcie_ref_125m); |
513 | goto err; | ||
514 | } | 504 | } |
515 | 505 | ||
516 | imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); | 506 | imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); |
517 | if (IS_ERR(imx6_pcie->pcie_axi)) { | 507 | if (IS_ERR(imx6_pcie->pcie_axi)) { |
518 | dev_err(&pdev->dev, | 508 | dev_err(&pdev->dev, |
519 | "pcie_axi clock source missing or invalid\n"); | 509 | "pcie_axi clock source missing or invalid\n"); |
520 | ret = PTR_ERR(imx6_pcie->pcie_axi); | 510 | return PTR_ERR(imx6_pcie->pcie_axi); |
521 | goto err; | ||
522 | } | 511 | } |
523 | 512 | ||
524 | /* Grab GPR config register range */ | 513 | /* Grab GPR config register range */ |
@@ -526,19 +515,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
526 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | 515 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); |
527 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { | 516 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { |
528 | dev_err(&pdev->dev, "unable to find iomuxc registers\n"); | 517 | dev_err(&pdev->dev, "unable to find iomuxc registers\n"); |
529 | ret = PTR_ERR(imx6_pcie->iomuxc_gpr); | 518 | return PTR_ERR(imx6_pcie->iomuxc_gpr); |
530 | goto err; | ||
531 | } | 519 | } |
532 | 520 | ||
533 | ret = imx6_add_pcie_port(pp, pdev); | 521 | ret = imx6_add_pcie_port(pp, pdev); |
534 | if (ret < 0) | 522 | if (ret < 0) |
535 | goto err; | 523 | return ret; |
536 | 524 | ||
537 | platform_set_drvdata(pdev, imx6_pcie); | 525 | platform_set_drvdata(pdev, imx6_pcie); |
538 | return 0; | 526 | return 0; |
539 | |||
540 | err: | ||
541 | return ret; | ||
542 | } | 527 | } |
543 | 528 | ||
544 | static const struct of_device_id imx6_pcie_of_match[] = { | 529 | static const struct of_device_id imx6_pcie_of_match[] = { |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index c269e430c760..533e0df02718 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -150,6 +150,11 @@ static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) | |||
150 | return readl(port->base + reg); | 150 | return readl(port->base + reg); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) | ||
154 | { | ||
155 | return port->io_target != -1 && port->io_attr != -1; | ||
156 | } | ||
157 | |||
153 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) | 158 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) |
154 | { | 159 | { |
155 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); | 160 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); |
@@ -300,7 +305,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
300 | 305 | ||
301 | /* Are the new iobase/iolimit values invalid? */ | 306 | /* Are the new iobase/iolimit values invalid? */ |
302 | if (port->bridge.iolimit < port->bridge.iobase || | 307 | if (port->bridge.iolimit < port->bridge.iobase || |
303 | port->bridge.iolimitupper < port->bridge.iobaseupper) { | 308 | port->bridge.iolimitupper < port->bridge.iobaseupper || |
309 | !(port->bridge.command & PCI_COMMAND_IO)) { | ||
304 | 310 | ||
305 | /* If a window was configured, remove it */ | 311 | /* If a window was configured, remove it */ |
306 | if (port->iowin_base) { | 312 | if (port->iowin_base) { |
@@ -313,6 +319,12 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
313 | return; | 319 | return; |
314 | } | 320 | } |
315 | 321 | ||
322 | if (!mvebu_has_ioport(port)) { | ||
323 | dev_WARN(&port->pcie->pdev->dev, | ||
324 | "Attempt to set IO when IO is disabled\n"); | ||
325 | return; | ||
326 | } | ||
327 | |||
316 | /* | 328 | /* |
317 | * We read the PCI-to-PCI bridge emulated registers, and | 329 | * We read the PCI-to-PCI bridge emulated registers, and |
318 | * calculate the base address and size of the address decoding | 330 | * calculate the base address and size of the address decoding |
@@ -337,7 +349,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
337 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) | 349 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) |
338 | { | 350 | { |
339 | /* Are the new membase/memlimit values invalid? */ | 351 | /* Are the new membase/memlimit values invalid? */ |
340 | if (port->bridge.memlimit < port->bridge.membase) { | 352 | if (port->bridge.memlimit < port->bridge.membase || |
353 | !(port->bridge.command & PCI_COMMAND_MEMORY)) { | ||
341 | 354 | ||
342 | /* If a window was configured, remove it */ | 355 | /* If a window was configured, remove it */ |
343 | if (port->memwin_base) { | 356 | if (port->memwin_base) { |
@@ -426,9 +439,12 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, | |||
426 | break; | 439 | break; |
427 | 440 | ||
428 | case PCI_IO_BASE: | 441 | case PCI_IO_BASE: |
429 | *value = (bridge->secondary_status << 16 | | 442 | if (!mvebu_has_ioport(port)) |
430 | bridge->iolimit << 8 | | 443 | *value = bridge->secondary_status << 16; |
431 | bridge->iobase); | 444 | else |
445 | *value = (bridge->secondary_status << 16 | | ||
446 | bridge->iolimit << 8 | | ||
447 | bridge->iobase); | ||
432 | break; | 448 | break; |
433 | 449 | ||
434 | case PCI_MEMORY_BASE: | 450 | case PCI_MEMORY_BASE: |
@@ -447,6 +463,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, | |||
447 | *value = 0; | 463 | *value = 0; |
448 | break; | 464 | break; |
449 | 465 | ||
466 | case PCI_INTERRUPT_LINE: | ||
467 | /* LINE PIN MIN_GNT MAX_LAT */ | ||
468 | *value = 0; | ||
469 | break; | ||
470 | |||
450 | default: | 471 | default: |
451 | *value = 0xffffffff; | 472 | *value = 0xffffffff; |
452 | return PCIBIOS_BAD_REGISTER_NUMBER; | 473 | return PCIBIOS_BAD_REGISTER_NUMBER; |
@@ -485,8 +506,19 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | |||
485 | 506 | ||
486 | switch (where & ~3) { | 507 | switch (where & ~3) { |
487 | case PCI_COMMAND: | 508 | case PCI_COMMAND: |
509 | { | ||
510 | u32 old = bridge->command; | ||
511 | |||
512 | if (!mvebu_has_ioport(port)) | ||
513 | value &= ~PCI_COMMAND_IO; | ||
514 | |||
488 | bridge->command = value & 0xffff; | 515 | bridge->command = value & 0xffff; |
516 | if ((old ^ bridge->command) & PCI_COMMAND_IO) | ||
517 | mvebu_pcie_handle_iobase_change(port); | ||
518 | if ((old ^ bridge->command) & PCI_COMMAND_MEMORY) | ||
519 | mvebu_pcie_handle_membase_change(port); | ||
489 | break; | 520 | break; |
521 | } | ||
490 | 522 | ||
491 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: | 523 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: |
492 | bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; | 524 | bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; |
@@ -500,7 +532,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | |||
500 | */ | 532 | */ |
501 | bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; | 533 | bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; |
502 | bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; | 534 | bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; |
503 | bridge->secondary_status = value >> 16; | ||
504 | mvebu_pcie_handle_iobase_change(port); | 535 | mvebu_pcie_handle_iobase_change(port); |
505 | break; | 536 | break; |
506 | 537 | ||
@@ -651,7 +682,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | |||
651 | struct mvebu_pcie *pcie = sys_to_pcie(sys); | 682 | struct mvebu_pcie *pcie = sys_to_pcie(sys); |
652 | int i; | 683 | int i; |
653 | 684 | ||
654 | pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset); | 685 | if (resource_size(&pcie->realio) != 0) |
686 | pci_add_resource_offset(&sys->resources, &pcie->realio, | ||
687 | sys->io_offset); | ||
655 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); | 688 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); |
656 | pci_add_resource(&sys->resources, &pcie->busn); | 689 | pci_add_resource(&sys->resources, &pcie->busn); |
657 | 690 | ||
@@ -752,12 +785,17 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, | |||
752 | #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) | 785 | #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) |
753 | 786 | ||
754 | static int mvebu_get_tgt_attr(struct device_node *np, int devfn, | 787 | static int mvebu_get_tgt_attr(struct device_node *np, int devfn, |
755 | unsigned long type, int *tgt, int *attr) | 788 | unsigned long type, |
789 | unsigned int *tgt, | ||
790 | unsigned int *attr) | ||
756 | { | 791 | { |
757 | const int na = 3, ns = 2; | 792 | const int na = 3, ns = 2; |
758 | const __be32 *range; | 793 | const __be32 *range; |
759 | int rlen, nranges, rangesz, pna, i; | 794 | int rlen, nranges, rangesz, pna, i; |
760 | 795 | ||
796 | *tgt = -1; | ||
797 | *attr = -1; | ||
798 | |||
761 | range = of_get_property(np, "ranges", &rlen); | 799 | range = of_get_property(np, "ranges", &rlen); |
762 | if (!range) | 800 | if (!range) |
763 | return -EINVAL; | 801 | return -EINVAL; |
@@ -827,16 +865,15 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
827 | } | 865 | } |
828 | 866 | ||
829 | mvebu_mbus_get_pcie_io_aperture(&pcie->io); | 867 | mvebu_mbus_get_pcie_io_aperture(&pcie->io); |
830 | if (resource_size(&pcie->io) == 0) { | ||
831 | dev_err(&pdev->dev, "invalid I/O aperture size\n"); | ||
832 | return -EINVAL; | ||
833 | } | ||
834 | 868 | ||
835 | pcie->realio.flags = pcie->io.flags; | 869 | if (resource_size(&pcie->io) != 0) { |
836 | pcie->realio.start = PCIBIOS_MIN_IO; | 870 | pcie->realio.flags = pcie->io.flags; |
837 | pcie->realio.end = min_t(resource_size_t, | 871 | pcie->realio.start = PCIBIOS_MIN_IO; |
838 | IO_SPACE_LIMIT, | 872 | pcie->realio.end = min_t(resource_size_t, |
839 | resource_size(&pcie->io)); | 873 | IO_SPACE_LIMIT, |
874 | resource_size(&pcie->io)); | ||
875 | } else | ||
876 | pcie->realio = pcie->io; | ||
840 | 877 | ||
841 | /* Get the bus range */ | 878 | /* Get the bus range */ |
842 | ret = of_pci_parse_bus_range(np, &pcie->busn); | 879 | ret = of_pci_parse_bus_range(np, &pcie->busn); |
@@ -895,12 +932,12 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
895 | continue; | 932 | continue; |
896 | } | 933 | } |
897 | 934 | ||
898 | ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO, | 935 | if (resource_size(&pcie->io) != 0) |
899 | &port->io_target, &port->io_attr); | 936 | mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO, |
900 | if (ret < 0) { | 937 | &port->io_target, &port->io_attr); |
901 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for io window\n", | 938 | else { |
902 | port->port, port->lane); | 939 | port->io_target = -1; |
903 | continue; | 940 | port->io_attr = -1; |
904 | } | 941 | } |
905 | 942 | ||
906 | port->reset_gpio = of_get_named_gpio_flags(child, | 943 | port->reset_gpio = of_get_named_gpio_flags(child, |
@@ -949,14 +986,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
949 | 986 | ||
950 | mvebu_pcie_set_local_dev_nr(port, 1); | 987 | mvebu_pcie_set_local_dev_nr(port, 1); |
951 | 988 | ||
952 | port->clk = of_clk_get_by_name(child, NULL); | ||
953 | if (IS_ERR(port->clk)) { | ||
954 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", | ||
955 | port->port, port->lane); | ||
956 | iounmap(port->base); | ||
957 | continue; | ||
958 | } | ||
959 | |||
960 | port->dn = child; | 989 | port->dn = child; |
961 | spin_lock_init(&port->conf_lock); | 990 | spin_lock_init(&port->conf_lock); |
962 | mvebu_sw_pci_bridge_init(port); | 991 | mvebu_sw_pci_bridge_init(port); |
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index e33b68be0391..1c92833a4ed3 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
@@ -209,6 +209,23 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0) | |||
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, | ||
213 | unsigned int nvec, unsigned int pos) | ||
214 | { | ||
215 | unsigned int i, res, bit, val; | ||
216 | |||
217 | for (i = 0; i < nvec; i++) { | ||
218 | irq_set_msi_desc_off(irq_base, i, NULL); | ||
219 | clear_bit(pos + i, pp->msi_irq_in_use); | ||
220 | /* Disable corresponding interrupt on MSI interrupt controller */ | ||
221 | res = ((pos + i) / 32) * 12; | ||
222 | bit = (pos + i) % 32; | ||
223 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | ||
224 | val &= ~(1 << bit); | ||
225 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | ||
226 | } | ||
227 | } | ||
228 | |||
212 | static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) | 229 | static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) |
213 | { | 230 | { |
214 | int res, bit, irq, pos0, pos1, i; | 231 | int res, bit, irq, pos0, pos1, i; |
@@ -242,18 +259,25 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) | |||
242 | if (!irq) | 259 | if (!irq) |
243 | goto no_valid_irq; | 260 | goto no_valid_irq; |
244 | 261 | ||
245 | i = 0; | 262 | /* |
246 | while (i < no_irqs) { | 263 | * irq_create_mapping (called from dw_pcie_host_init) pre-allocates |
264 | * descs so there is no need to allocate descs here. We can therefore | ||
265 | * assume that if irq_find_mapping above returns non-zero, then the | ||
266 | * descs are also successfully allocated. | ||
267 | */ | ||
268 | |||
269 | for (i = 0; i < no_irqs; i++) { | ||
270 | if (irq_set_msi_desc_off(irq, i, desc) != 0) { | ||
271 | clear_irq_range(pp, irq, i, pos0); | ||
272 | goto no_valid_irq; | ||
273 | } | ||
247 | set_bit(pos0 + i, pp->msi_irq_in_use); | 274 | set_bit(pos0 + i, pp->msi_irq_in_use); |
248 | irq_alloc_descs((irq + i), (irq + i), 1, 0); | ||
249 | irq_set_msi_desc(irq + i, desc); | ||
250 | /*Enable corresponding interrupt in MSI interrupt controller */ | 275 | /*Enable corresponding interrupt in MSI interrupt controller */ |
251 | res = ((pos0 + i) / 32) * 12; | 276 | res = ((pos0 + i) / 32) * 12; |
252 | bit = (pos0 + i) % 32; | 277 | bit = (pos0 + i) % 32; |
253 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | 278 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); |
254 | val |= 1 << bit; | 279 | val |= 1 << bit; |
255 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | 280 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); |
256 | i++; | ||
257 | } | 281 | } |
258 | 282 | ||
259 | *pos = pos0; | 283 | *pos = pos0; |
@@ -266,7 +290,7 @@ no_valid_irq: | |||
266 | 290 | ||
267 | static void clear_irq(unsigned int irq) | 291 | static void clear_irq(unsigned int irq) |
268 | { | 292 | { |
269 | int res, bit, val, pos; | 293 | unsigned int pos, nvec; |
270 | struct irq_desc *desc; | 294 | struct irq_desc *desc; |
271 | struct msi_desc *msi; | 295 | struct msi_desc *msi; |
272 | struct pcie_port *pp; | 296 | struct pcie_port *pp; |
@@ -281,18 +305,15 @@ static void clear_irq(unsigned int irq) | |||
281 | return; | 305 | return; |
282 | } | 306 | } |
283 | 307 | ||
308 | /* undo what was done in assign_irq */ | ||
284 | pos = data->hwirq; | 309 | pos = data->hwirq; |
310 | nvec = 1 << msi->msi_attrib.multiple; | ||
285 | 311 | ||
286 | irq_free_desc(irq); | 312 | clear_irq_range(pp, irq, nvec, pos); |
287 | 313 | ||
288 | clear_bit(pos, pp->msi_irq_in_use); | 314 | /* all irqs cleared; reset attributes */ |
289 | 315 | msi->irq = 0; | |
290 | /* Disable corresponding interrupt on MSI interrupt controller */ | 316 | msi->msi_attrib.multiple = 0; |
291 | res = (pos / 32) * 12; | ||
292 | bit = pos % 32; | ||
293 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | ||
294 | val &= ~(1 << bit); | ||
295 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | ||
296 | } | 317 | } |
297 | 318 | ||
298 | static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, | 319 | static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, |
@@ -320,10 +341,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, | |||
320 | if (irq < 0) | 341 | if (irq < 0) |
321 | return irq; | 342 | return irq; |
322 | 343 | ||
323 | msg_ctr &= ~PCI_MSI_FLAGS_QSIZE; | 344 | /* |
324 | msg_ctr |= msgvec << 4; | 345 | * write_msi_msg() will update PCI_MSI_FLAGS so there is |
325 | pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, | 346 | * no need to explicitly call pci_write_config_word(). |
326 | msg_ctr); | 347 | */ |
327 | desc->msi_attrib.multiple = msgvec; | 348 | desc->msi_attrib.multiple = msgvec; |
328 | 349 | ||
329 | msg.address_lo = virt_to_phys((void *)pp->msi_data); | 350 | msg.address_lo = virt_to_phys((void *)pp->msi_data); |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 1fe2d6fb19d5..68311ec849ee 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -441,6 +441,7 @@ static int sriov_init(struct pci_dev *dev, int pos) | |||
441 | 441 | ||
442 | found: | 442 | found: |
443 | pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); | 443 | pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); |
444 | pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0); | ||
444 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); | 445 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); |
445 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); | 446 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); |
446 | if (!offset || (total > 1 && !stride)) | 447 | if (!offset || (total > 1 && !stride)) |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 3fcd67a16677..51bf0400a889 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -116,7 +116,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev) | |||
116 | return default_teardown_msi_irqs(dev); | 116 | return default_teardown_msi_irqs(dev); |
117 | } | 117 | } |
118 | 118 | ||
119 | void default_restore_msi_irqs(struct pci_dev *dev, int irq) | 119 | static void default_restore_msi_irq(struct pci_dev *dev, int irq) |
120 | { | 120 | { |
121 | struct msi_desc *entry; | 121 | struct msi_desc *entry; |
122 | 122 | ||
@@ -134,9 +134,9 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq) | |||
134 | write_msi_msg(irq, &entry->msg); | 134 | write_msi_msg(irq, &entry->msg); |
135 | } | 135 | } |
136 | 136 | ||
137 | void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq) | 137 | void __weak arch_restore_msi_irqs(struct pci_dev *dev) |
138 | { | 138 | { |
139 | return default_restore_msi_irqs(dev, irq); | 139 | return default_restore_msi_irqs(dev); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void msi_set_enable(struct pci_dev *dev, int enable) | 142 | static void msi_set_enable(struct pci_dev *dev, int enable) |
@@ -262,6 +262,15 @@ void unmask_msi_irq(struct irq_data *data) | |||
262 | msi_set_mask_bit(data, 0); | 262 | msi_set_mask_bit(data, 0); |
263 | } | 263 | } |
264 | 264 | ||
265 | void default_restore_msi_irqs(struct pci_dev *dev) | ||
266 | { | ||
267 | struct msi_desc *entry; | ||
268 | |||
269 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
270 | default_restore_msi_irq(dev, entry->irq); | ||
271 | } | ||
272 | } | ||
273 | |||
265 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | 274 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
266 | { | 275 | { |
267 | BUG_ON(entry->dev->current_state != PCI_D0); | 276 | BUG_ON(entry->dev->current_state != PCI_D0); |
@@ -430,7 +439,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
430 | 439 | ||
431 | pci_intx_for_msi(dev, 0); | 440 | pci_intx_for_msi(dev, 0); |
432 | msi_set_enable(dev, 0); | 441 | msi_set_enable(dev, 0); |
433 | arch_restore_msi_irqs(dev, dev->irq); | 442 | arch_restore_msi_irqs(dev); |
434 | 443 | ||
435 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | 444 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); |
436 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); | 445 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); |
@@ -455,8 +464,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
455 | control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; | 464 | control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; |
456 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 465 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); |
457 | 466 | ||
467 | arch_restore_msi_irqs(dev); | ||
458 | list_for_each_entry(entry, &dev->msi_list, list) { | 468 | list_for_each_entry(entry, &dev->msi_list, list) { |
459 | arch_restore_msi_irqs(dev, entry->irq); | ||
460 | msix_mask_irq(entry, entry->masked); | 469 | msix_mask_irq(entry, entry->masked); |
461 | } | 470 | } |
462 | 471 | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 577074efbe62..e0431f1af33b 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -358,7 +358,7 @@ static void pci_acpi_cleanup(struct device *dev) | |||
358 | 358 | ||
359 | static bool pci_acpi_bus_match(struct device *dev) | 359 | static bool pci_acpi_bus_match(struct device *dev) |
360 | { | 360 | { |
361 | return dev->bus == &pci_bus_type; | 361 | return dev_is_pci(dev); |
362 | } | 362 | } |
363 | 363 | ||
364 | static struct acpi_bus_type acpi_pci_bus = { | 364 | static struct acpi_bus_type acpi_pci_bus = { |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 9042fdbd7244..25f0bc659164 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/pm_runtime.h> | 20 | #include <linux/pm_runtime.h> |
21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
22 | #include <linux/kexec.h> | ||
22 | #include "pci.h" | 23 | #include "pci.h" |
23 | 24 | ||
24 | struct pci_dynid { | 25 | struct pci_dynid { |
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | |||
288 | int error, node; | 289 | int error, node; |
289 | struct drv_dev_and_id ddi = { drv, dev, id }; | 290 | struct drv_dev_and_id ddi = { drv, dev, id }; |
290 | 291 | ||
291 | /* Execute driver initialization on node where the device's | 292 | /* |
292 | bus is attached to. This way the driver likely allocates | 293 | * Execute driver initialization on node where the device is |
293 | its local memory on the right node without any need to | 294 | * attached. This way the driver likely allocates its local memory |
294 | change it. */ | 295 | * on the right node. |
296 | */ | ||
295 | node = dev_to_node(&dev->dev); | 297 | node = dev_to_node(&dev->dev); |
296 | if (node >= 0) { | 298 | |
299 | /* | ||
300 | * On NUMA systems, we are likely to call a PF probe function using | ||
301 | * work_on_cpu(). If that probe calls pci_enable_sriov() (which | ||
302 | * adds the VF devices via pci_bus_add_device()), we may re-enter | ||
303 | * this function to call the VF probe function. Calling | ||
304 | * work_on_cpu() again will cause a lockdep warning. Since VFs are | ||
305 | * always on the same node as the PF, we can work around this by | ||
306 | * avoiding work_on_cpu() when we're already on the correct node. | ||
307 | * | ||
308 | * Preemption is enabled, so it's theoretically unsafe to use | ||
309 | * numa_node_id(), but even if we run the probe function on the | ||
310 | * wrong node, it should be functionally correct. | ||
311 | */ | ||
312 | if (node >= 0 && node != numa_node_id()) { | ||
297 | int cpu; | 313 | int cpu; |
298 | 314 | ||
299 | get_online_cpus(); | 315 | get_online_cpus(); |
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | |||
305 | put_online_cpus(); | 321 | put_online_cpus(); |
306 | } else | 322 | } else |
307 | error = local_pci_probe(&ddi); | 323 | error = local_pci_probe(&ddi); |
324 | |||
308 | return error; | 325 | return error; |
309 | } | 326 | } |
310 | 327 | ||
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev) | |||
399 | pci_msi_shutdown(pci_dev); | 416 | pci_msi_shutdown(pci_dev); |
400 | pci_msix_shutdown(pci_dev); | 417 | pci_msix_shutdown(pci_dev); |
401 | 418 | ||
419 | #ifdef CONFIG_KEXEC | ||
402 | /* | 420 | /* |
403 | * Turn off Bus Master bit on the device to tell it to not | 421 | * If this is a kexec reboot, turn off Bus Master bit on the |
404 | * continue to do DMA. Don't touch devices in D3cold or unknown states. | 422 | * device to tell it to not continue to do DMA. Don't touch |
423 | * devices in D3cold or unknown states. | ||
424 | * If it is not a kexec reboot, firmware will hit the PCI | ||
425 | * devices with big hammer and stop their DMA any way. | ||
405 | */ | 426 | */ |
406 | if (pci_dev->current_state <= PCI_D3hot) | 427 | if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) |
407 | pci_clear_master(pci_dev); | 428 | pci_clear_master(pci_dev); |
429 | #endif | ||
408 | } | 430 | } |
409 | 431 | ||
410 | #ifdef CONFIG_PM | 432 | #ifdef CONFIG_PM |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index cf611ab2193a..4d6991794fa2 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
@@ -50,14 +50,37 @@ struct aer_hest_parse_info { | |||
50 | int firmware_first; | 50 | int firmware_first; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr) | ||
54 | { | ||
55 | if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT || | ||
56 | hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT || | ||
57 | hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE) | ||
58 | return 1; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
53 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) | 62 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) |
54 | { | 63 | { |
55 | struct aer_hest_parse_info *info = data; | 64 | struct aer_hest_parse_info *info = data; |
56 | struct acpi_hest_aer_common *p; | 65 | struct acpi_hest_aer_common *p; |
57 | int ff; | 66 | int ff; |
58 | 67 | ||
68 | if (!hest_source_is_pcie_aer(hest_hdr)) | ||
69 | return 0; | ||
70 | |||
59 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | 71 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); |
60 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | 72 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); |
73 | |||
74 | /* | ||
75 | * If no specific device is supplied, determine whether | ||
76 | * FIRMWARE_FIRST is set for *any* PCIe device. | ||
77 | */ | ||
78 | if (!info->pci_dev) { | ||
79 | info->firmware_first |= ff; | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | /* Otherwise, check the specific device */ | ||
61 | if (p->flags & ACPI_HEST_GLOBAL) { | 84 | if (p->flags & ACPI_HEST_GLOBAL) { |
62 | if (hest_match_type(hest_hdr, info->pci_dev)) | 85 | if (hest_match_type(hest_hdr, info->pci_dev)) |
63 | info->firmware_first = ff; | 86 | info->firmware_first = ff; |
@@ -97,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev) | |||
97 | 120 | ||
98 | static bool aer_firmware_first; | 121 | static bool aer_firmware_first; |
99 | 122 | ||
100 | static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data) | ||
101 | { | ||
102 | struct acpi_hest_aer_common *p; | ||
103 | |||
104 | if (aer_firmware_first) | ||
105 | return 0; | ||
106 | |||
107 | switch (hest_hdr->type) { | ||
108 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
109 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
110 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
111 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | ||
112 | aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
113 | default: | ||
114 | return 0; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /** | 123 | /** |
119 | * aer_acpi_firmware_first - Check if APEI should control AER. | 124 | * aer_acpi_firmware_first - Check if APEI should control AER. |
120 | */ | 125 | */ |
121 | bool aer_acpi_firmware_first(void) | 126 | bool aer_acpi_firmware_first(void) |
122 | { | 127 | { |
123 | static bool parsed = false; | 128 | static bool parsed = false; |
129 | struct aer_hest_parse_info info = { | ||
130 | .pci_dev = NULL, /* Check all PCIe devices */ | ||
131 | .firmware_first = 0, | ||
132 | }; | ||
124 | 133 | ||
125 | if (!parsed) { | 134 | if (!parsed) { |
126 | apei_hest_parse(aer_hest_parse_aff, NULL); | 135 | apei_hest_parse(aer_hest_parse, &info); |
136 | aer_firmware_first = info.firmware_first; | ||
127 | parsed = true; | 137 | parsed = true; |
128 | } | 138 | } |
129 | return aer_firmware_first; | 139 | return aer_firmware_first; |
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 2c7c9f5f592c..34ff7026440c 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
@@ -124,6 +124,21 @@ static const char *aer_agent_string[] = { | |||
124 | "Transmitter ID" | 124 | "Transmitter ID" |
125 | }; | 125 | }; |
126 | 126 | ||
127 | static void __print_tlp_header(struct pci_dev *dev, | ||
128 | struct aer_header_log_regs *t) | ||
129 | { | ||
130 | unsigned char *tlp = (unsigned char *)&t; | ||
131 | |||
132 | dev_err(&dev->dev, " TLP Header:" | ||
133 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
134 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
135 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
136 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
137 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
138 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
139 | *(tlp + 13), *(tlp + 12)); | ||
140 | } | ||
141 | |||
127 | static void __aer_print_error(struct pci_dev *dev, | 142 | static void __aer_print_error(struct pci_dev *dev, |
128 | struct aer_err_info *info) | 143 | struct aer_err_info *info) |
129 | { | 144 | { |
@@ -153,48 +168,39 @@ static void __aer_print_error(struct pci_dev *dev, | |||
153 | 168 | ||
154 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | 169 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) |
155 | { | 170 | { |
171 | int layer, agent; | ||
156 | int id = ((dev->bus->number << 8) | dev->devfn); | 172 | int id = ((dev->bus->number << 8) | dev->devfn); |
157 | 173 | ||
158 | if (info->status == 0) { | 174 | if (!info->status) { |
159 | dev_err(&dev->dev, | 175 | dev_err(&dev->dev, |
160 | "PCIe Bus Error: severity=%s, type=Unaccessible, " | 176 | "PCIe Bus Error: severity=%s, type=Unaccessible, " |
161 | "id=%04x(Unregistered Agent ID)\n", | 177 | "id=%04x(Unregistered Agent ID)\n", |
162 | aer_error_severity_string[info->severity], id); | 178 | aer_error_severity_string[info->severity], id); |
163 | } else { | 179 | goto out; |
164 | int layer, agent; | 180 | } |
165 | 181 | ||
166 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); | 182 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); |
167 | agent = AER_GET_AGENT(info->severity, info->status); | 183 | agent = AER_GET_AGENT(info->severity, info->status); |
168 | 184 | ||
169 | dev_err(&dev->dev, | 185 | dev_err(&dev->dev, |
170 | "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", | 186 | "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", |
171 | aer_error_severity_string[info->severity], | 187 | aer_error_severity_string[info->severity], |
172 | aer_error_layer[layer], id, aer_agent_string[agent]); | 188 | aer_error_layer[layer], id, aer_agent_string[agent]); |
173 | 189 | ||
174 | dev_err(&dev->dev, | 190 | dev_err(&dev->dev, |
175 | " device [%04x:%04x] error status/mask=%08x/%08x\n", | 191 | " device [%04x:%04x] error status/mask=%08x/%08x\n", |
176 | dev->vendor, dev->device, | 192 | dev->vendor, dev->device, |
177 | info->status, info->mask); | 193 | info->status, info->mask); |
178 | 194 | ||
179 | __aer_print_error(dev, info); | 195 | __aer_print_error(dev, info); |
180 | |||
181 | if (info->tlp_header_valid) { | ||
182 | unsigned char *tlp = (unsigned char *) &info->tlp; | ||
183 | dev_err(&dev->dev, " TLP Header:" | ||
184 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
185 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
186 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
187 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
188 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
189 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
190 | *(tlp + 13), *(tlp + 12)); | ||
191 | } | ||
192 | } | ||
193 | 196 | ||
197 | if (info->tlp_header_valid) | ||
198 | __print_tlp_header(dev, &info->tlp); | ||
199 | |||
200 | out: | ||
194 | if (info->id && info->error_dev_num > 1 && info->id == id) | 201 | if (info->id && info->error_dev_num > 1 && info->id == id) |
195 | dev_err(&dev->dev, | 202 | dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id); |
196 | " Error of this Agent(%04x) is reported first\n", | 203 | |
197 | id); | ||
198 | trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), | 204 | trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), |
199 | info->severity); | 205 | info->severity); |
200 | } | 206 | } |
@@ -228,6 +234,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity, | |||
228 | const char **status_strs; | 234 | const char **status_strs; |
229 | 235 | ||
230 | aer_severity = cper_severity_to_aer(cper_severity); | 236 | aer_severity = cper_severity_to_aer(cper_severity); |
237 | |||
231 | if (aer_severity == AER_CORRECTABLE) { | 238 | if (aer_severity == AER_CORRECTABLE) { |
232 | status = aer->cor_status; | 239 | status = aer->cor_status; |
233 | mask = aer->cor_mask; | 240 | mask = aer->cor_mask; |
@@ -240,28 +247,22 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity, | |||
240 | status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); | 247 | status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); |
241 | tlp_header_valid = status & AER_LOG_TLP_MASKS; | 248 | tlp_header_valid = status & AER_LOG_TLP_MASKS; |
242 | } | 249 | } |
250 | |||
243 | layer = AER_GET_LAYER_ERROR(aer_severity, status); | 251 | layer = AER_GET_LAYER_ERROR(aer_severity, status); |
244 | agent = AER_GET_AGENT(aer_severity, status); | 252 | agent = AER_GET_AGENT(aer_severity, status); |
245 | dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", | 253 | |
246 | status, mask); | 254 | dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); |
247 | cper_print_bits("", status, status_strs, status_strs_size); | 255 | cper_print_bits("", status, status_strs, status_strs_size); |
248 | dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", | 256 | dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", |
249 | aer_error_layer[layer], aer_agent_string[agent]); | 257 | aer_error_layer[layer], aer_agent_string[agent]); |
258 | |||
250 | if (aer_severity != AER_CORRECTABLE) | 259 | if (aer_severity != AER_CORRECTABLE) |
251 | dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", | 260 | dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", |
252 | aer->uncor_severity); | 261 | aer->uncor_severity); |
253 | if (tlp_header_valid) { | 262 | |
254 | const unsigned char *tlp; | 263 | if (tlp_header_valid) |
255 | tlp = (const unsigned char *)&aer->header_log; | 264 | __print_tlp_header(dev, &aer->header_log); |
256 | dev_err(&dev->dev, "aer_tlp_header:" | 265 | |
257 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
258 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
259 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
260 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
261 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
262 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
263 | *(tlp + 13), *(tlp + 12)); | ||
264 | } | ||
265 | trace_aer_event(dev_name(&dev->dev), (status & ~mask), | 266 | trace_aer_event(dev_name(&dev->dev), (status & ~mask), |
266 | aer_severity); | 267 | aer_severity); |
267 | } | 268 | } |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 0b6e76604068..ce9d9ae17bfd 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -554,7 +554,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) | |||
554 | if (pcie_ports_disabled) | 554 | if (pcie_ports_disabled) |
555 | return -ENODEV; | 555 | return -ENODEV; |
556 | 556 | ||
557 | new->driver.name = (char *)new->name; | 557 | new->driver.name = new->name; |
558 | new->driver.bus = &pcie_port_bus_type; | 558 | new->driver.bus = &pcie_port_bus_type; |
559 | new->driver.probe = pcie_port_probe_service; | 559 | new->driver.probe = pcie_port_probe_service; |
560 | new->driver.remove = pcie_port_remove_service; | 560 | new->driver.remove = pcie_port_remove_service; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 38e403dddf6e..12ec56c9a913 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1154,6 +1154,18 @@ static void pci_release_capabilities(struct pci_dev *dev) | |||
1154 | pci_free_cap_save_buffers(dev); | 1154 | pci_free_cap_save_buffers(dev); |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | static void pci_free_resources(struct pci_dev *dev) | ||
1158 | { | ||
1159 | int i; | ||
1160 | |||
1161 | pci_cleanup_rom(dev); | ||
1162 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
1163 | struct resource *res = dev->resource + i; | ||
1164 | if (res->parent) | ||
1165 | release_resource(res); | ||
1166 | } | ||
1167 | } | ||
1168 | |||
1157 | /** | 1169 | /** |
1158 | * pci_release_dev - free a pci device structure when all users of it are finished. | 1170 | * pci_release_dev - free a pci device structure when all users of it are finished. |
1159 | * @dev: device that's been disconnected | 1171 | * @dev: device that's been disconnected |
@@ -1163,9 +1175,14 @@ static void pci_release_capabilities(struct pci_dev *dev) | |||
1163 | */ | 1175 | */ |
1164 | static void pci_release_dev(struct device *dev) | 1176 | static void pci_release_dev(struct device *dev) |
1165 | { | 1177 | { |
1166 | struct pci_dev *pci_dev; | 1178 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1179 | |||
1180 | down_write(&pci_bus_sem); | ||
1181 | list_del(&pci_dev->bus_list); | ||
1182 | up_write(&pci_bus_sem); | ||
1183 | |||
1184 | pci_free_resources(pci_dev); | ||
1167 | 1185 | ||
1168 | pci_dev = to_pci_dev(dev); | ||
1169 | pci_release_capabilities(pci_dev); | 1186 | pci_release_capabilities(pci_dev); |
1170 | pci_release_of_node(pci_dev); | 1187 | pci_release_of_node(pci_dev); |
1171 | pcibios_release_device(pci_dev); | 1188 | pcibios_release_device(pci_dev); |
@@ -1381,8 +1398,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
1381 | dev->match_driver = false; | 1398 | dev->match_driver = false; |
1382 | ret = device_add(&dev->dev); | 1399 | ret = device_add(&dev->dev); |
1383 | WARN_ON(ret < 0); | 1400 | WARN_ON(ret < 0); |
1384 | |||
1385 | pci_proc_attach_device(dev); | ||
1386 | } | 1401 | } |
1387 | 1402 | ||
1388 | struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) | 1403 | struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) |
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 1576851028db..f452148e6d55 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c | |||
@@ -3,20 +3,6 @@ | |||
3 | #include <linux/pci-aspm.h> | 3 | #include <linux/pci-aspm.h> |
4 | #include "pci.h" | 4 | #include "pci.h" |
5 | 5 | ||
6 | static void pci_free_resources(struct pci_dev *dev) | ||
7 | { | ||
8 | int i; | ||
9 | |||
10 | msi_remove_pci_irq_vectors(dev); | ||
11 | |||
12 | pci_cleanup_rom(dev); | ||
13 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
14 | struct resource *res = dev->resource + i; | ||
15 | if (res->parent) | ||
16 | release_resource(res); | ||
17 | } | ||
18 | } | ||
19 | |||
20 | static void pci_stop_dev(struct pci_dev *dev) | 6 | static void pci_stop_dev(struct pci_dev *dev) |
21 | { | 7 | { |
22 | pci_pme_active(dev, false); | 8 | pci_pme_active(dev, false); |
@@ -24,7 +10,7 @@ static void pci_stop_dev(struct pci_dev *dev) | |||
24 | if (dev->is_added) { | 10 | if (dev->is_added) { |
25 | pci_proc_detach_device(dev); | 11 | pci_proc_detach_device(dev); |
26 | pci_remove_sysfs_dev_files(dev); | 12 | pci_remove_sysfs_dev_files(dev); |
27 | device_del(&dev->dev); | 13 | device_release_driver(&dev->dev); |
28 | dev->is_added = 0; | 14 | dev->is_added = 0; |
29 | } | 15 | } |
30 | 16 | ||
@@ -34,11 +20,8 @@ static void pci_stop_dev(struct pci_dev *dev) | |||
34 | 20 | ||
35 | static void pci_destroy_dev(struct pci_dev *dev) | 21 | static void pci_destroy_dev(struct pci_dev *dev) |
36 | { | 22 | { |
37 | down_write(&pci_bus_sem); | 23 | device_del(&dev->dev); |
38 | list_del(&dev->bus_list); | ||
39 | up_write(&pci_bus_sem); | ||
40 | 24 | ||
41 | pci_free_resources(dev); | ||
42 | put_device(&dev->dev); | 25 | put_device(&dev->dev); |
43 | } | 26 | } |
44 | 27 | ||
@@ -126,7 +109,7 @@ void pci_stop_root_bus(struct pci_bus *bus) | |||
126 | pci_stop_bus_device(child); | 109 | pci_stop_bus_device(child); |
127 | 110 | ||
128 | /* stop the host bridge */ | 111 | /* stop the host bridge */ |
129 | device_del(&host_bridge->dev); | 112 | device_release_driver(&host_bridge->dev); |
130 | } | 113 | } |
131 | 114 | ||
132 | void pci_remove_root_bus(struct pci_bus *bus) | 115 | void pci_remove_root_bus(struct pci_bus *bus) |
@@ -145,5 +128,5 @@ void pci_remove_root_bus(struct pci_bus *bus) | |||
145 | host_bridge->bus = NULL; | 128 | host_bridge->bus = NULL; |
146 | 129 | ||
147 | /* remove the host bridge */ | 130 | /* remove the host bridge */ |
148 | put_device(&host_bridge->dev); | 131 | device_unregister(&host_bridge->dev); |
149 | } | 132 | } |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 219a4106480a..2e344a5581ae 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -538,7 +538,8 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
538 | struct pci_bus_region region; | 538 | struct pci_bus_region region; |
539 | unsigned long io_mask; | 539 | unsigned long io_mask; |
540 | u8 io_base_lo, io_limit_lo; | 540 | u8 io_base_lo, io_limit_lo; |
541 | u32 l, io_upper16; | 541 | u16 l; |
542 | u32 io_upper16; | ||
542 | 543 | ||
543 | io_mask = PCI_IO_RANGE_MASK; | 544 | io_mask = PCI_IO_RANGE_MASK; |
544 | if (bridge->io_window_1k) | 545 | if (bridge->io_window_1k) |
@@ -548,11 +549,10 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
548 | res = bus->resource[0]; | 549 | res = bus->resource[0]; |
549 | pcibios_resource_to_bus(bridge, ®ion, res); | 550 | pcibios_resource_to_bus(bridge, ®ion, res); |
550 | if (res->flags & IORESOURCE_IO) { | 551 | if (res->flags & IORESOURCE_IO) { |
551 | pci_read_config_dword(bridge, PCI_IO_BASE, &l); | 552 | pci_read_config_word(bridge, PCI_IO_BASE, &l); |
552 | l &= 0xffff0000; | ||
553 | io_base_lo = (region.start >> 8) & io_mask; | 553 | io_base_lo = (region.start >> 8) & io_mask; |
554 | io_limit_lo = (region.end >> 8) & io_mask; | 554 | io_limit_lo = (region.end >> 8) & io_mask; |
555 | l |= ((u32) io_limit_lo << 8) | io_base_lo; | 555 | l = ((u16) io_limit_lo << 8) | io_base_lo; |
556 | /* Set up upper 16 bits of I/O base/limit. */ | 556 | /* Set up upper 16 bits of I/O base/limit. */ |
557 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 557 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
558 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 558 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
@@ -564,7 +564,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
564 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ | 564 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ |
565 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); | 565 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); |
566 | /* Update lower 16 bits of I/O base/limit. */ | 566 | /* Update lower 16 bits of I/O base/limit. */ |
567 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | 567 | pci_write_config_word(bridge, PCI_IO_BASE, l); |
568 | /* Update upper 16 bits of I/O base/limit. */ | 568 | /* Update upper 16 bits of I/O base/limit. */ |
569 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 569 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
570 | } | 570 | } |
@@ -665,21 +665,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
665 | 665 | ||
666 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | 666 | pci_read_config_word(bridge, PCI_IO_BASE, &io); |
667 | if (!io) { | 667 | if (!io) { |
668 | pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); | 668 | pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); |
669 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | 669 | pci_read_config_word(bridge, PCI_IO_BASE, &io); |
670 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); | 670 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); |
671 | } | 671 | } |
672 | if (io) | 672 | if (io) |
673 | b_res[0].flags |= IORESOURCE_IO; | 673 | b_res[0].flags |= IORESOURCE_IO; |
674 | |||
674 | /* DECchip 21050 pass 2 errata: the bridge may miss an address | 675 | /* DECchip 21050 pass 2 errata: the bridge may miss an address |
675 | disconnect boundary by one PCI data phase. | 676 | disconnect boundary by one PCI data phase. |
676 | Workaround: do not use prefetching on this device. */ | 677 | Workaround: do not use prefetching on this device. */ |
677 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) | 678 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) |
678 | return; | 679 | return; |
680 | |||
679 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | 681 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); |
680 | if (!pmem) { | 682 | if (!pmem) { |
681 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, | 683 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, |
682 | 0xfff0fff0); | 684 | 0xffe0fff0); |
683 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | 685 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); |
684 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); | 686 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); |
685 | } | 687 | } |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d78d28a733b1..5fd33dc1fe3a 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |||
198 | extern size_t vmcoreinfo_size; | 198 | extern size_t vmcoreinfo_size; |
199 | extern size_t vmcoreinfo_max_size; | 199 | extern size_t vmcoreinfo_max_size; |
200 | 200 | ||
201 | /* flag to track if kexec reboot is in progress */ | ||
202 | extern bool kexec_in_progress; | ||
203 | |||
201 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, | 204 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, |
202 | unsigned long long *crash_size, unsigned long long *crash_base); | 205 | unsigned long long *crash_size, unsigned long long *crash_base); |
203 | int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, | 206 | int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 009b02481436..92a2f991262a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -60,10 +60,10 @@ void arch_teardown_msi_irq(unsigned int irq); | |||
60 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 60 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
61 | void arch_teardown_msi_irqs(struct pci_dev *dev); | 61 | void arch_teardown_msi_irqs(struct pci_dev *dev); |
62 | int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); | 62 | int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); |
63 | void arch_restore_msi_irqs(struct pci_dev *dev, int irq); | 63 | void arch_restore_msi_irqs(struct pci_dev *dev); |
64 | 64 | ||
65 | void default_teardown_msi_irqs(struct pci_dev *dev); | 65 | void default_teardown_msi_irqs(struct pci_dev *dev); |
66 | void default_restore_msi_irqs(struct pci_dev *dev, int irq); | 66 | void default_restore_msi_irqs(struct pci_dev *dev); |
67 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); | 67 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
68 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); | 68 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); |
69 | 69 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 1084a15175e0..eb8078aeadc8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1567,65 +1567,65 @@ enum pci_fixup_pass { | |||
1567 | /* Anonymous variables would be nice... */ | 1567 | /* Anonymous variables would be nice... */ |
1568 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ | 1568 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ |
1569 | class_shift, hook) \ | 1569 | class_shift, hook) \ |
1570 | static const struct pci_fixup __pci_fixup_##name __used \ | 1570 | static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ |
1571 | __attribute__((__section__(#section), aligned((sizeof(void *))))) \ | 1571 | __attribute__((__section__(#section), aligned((sizeof(void *))))) \ |
1572 | = { vendor, device, class, class_shift, hook }; | 1572 | = { vendor, device, class, class_shift, hook }; |
1573 | 1573 | ||
1574 | #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ | 1574 | #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ |
1575 | class_shift, hook) \ | 1575 | class_shift, hook) \ |
1576 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 1576 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
1577 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1577 | hook, vendor, device, class, class_shift, hook) |
1578 | #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ | 1578 | #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ |
1579 | class_shift, hook) \ | 1579 | class_shift, hook) \ |
1580 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ | 1580 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ |
1581 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1581 | hook, vendor, device, class, class_shift, hook) |
1582 | #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ | 1582 | #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ |
1583 | class_shift, hook) \ | 1583 | class_shift, hook) \ |
1584 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ | 1584 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ |
1585 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1585 | hook, vendor, device, class, class_shift, hook) |
1586 | #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ | 1586 | #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ |
1587 | class_shift, hook) \ | 1587 | class_shift, hook) \ |
1588 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ | 1588 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ |
1589 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1589 | hook, vendor, device, class, class_shift, hook) |
1590 | #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ | 1590 | #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ |
1591 | class_shift, hook) \ | 1591 | class_shift, hook) \ |
1592 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1592 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1593 | resume##vendor##device##hook, vendor, device, class, \ | 1593 | resume##hook, vendor, device, class, \ |
1594 | class_shift, hook) | 1594 | class_shift, hook) |
1595 | #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ | 1595 | #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ |
1596 | class_shift, hook) \ | 1596 | class_shift, hook) \ |
1597 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1597 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1598 | resume_early##vendor##device##hook, vendor, device, \ | 1598 | resume_early##hook, vendor, device, \ |
1599 | class, class_shift, hook) | 1599 | class, class_shift, hook) |
1600 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ | 1600 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ |
1601 | class_shift, hook) \ | 1601 | class_shift, hook) \ |
1602 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1602 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1603 | suspend##vendor##device##hook, vendor, device, class, \ | 1603 | suspend##hook, vendor, device, class, \ |
1604 | class_shift, hook) | 1604 | class_shift, hook) |
1605 | 1605 | ||
1606 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ | 1606 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ |
1607 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 1607 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
1608 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1608 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1609 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ | 1609 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ |
1610 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ | 1610 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ |
1611 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1611 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1612 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ | 1612 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ |
1613 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ | 1613 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ |
1614 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1614 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1615 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ | 1615 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ |
1616 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ | 1616 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ |
1617 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1617 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1618 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ | 1618 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ |
1619 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1619 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1620 | resume##vendor##device##hook, vendor, device, \ | 1620 | resume##hook, vendor, device, \ |
1621 | PCI_ANY_ID, 0, hook) | 1621 | PCI_ANY_ID, 0, hook) |
1622 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ | 1622 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ |
1623 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1623 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1624 | resume_early##vendor##device##hook, vendor, device, \ | 1624 | resume_early##hook, vendor, device, \ |
1625 | PCI_ANY_ID, 0, hook) | 1625 | PCI_ANY_ID, 0, hook) |
1626 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ | 1626 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ |
1627 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1627 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1628 | suspend##vendor##device##hook, vendor, device, \ | 1628 | suspend##hook, vendor, device, \ |
1629 | PCI_ANY_ID, 0, hook) | 1629 | PCI_ANY_ID, 0, hook) |
1630 | 1630 | ||
1631 | #ifdef CONFIG_PCI_QUIRKS | 1631 | #ifdef CONFIG_PCI_QUIRKS |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 490afc03627e..d0d8fca54065 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |||
47 | size_t vmcoreinfo_size; | 47 | size_t vmcoreinfo_size; |
48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | 48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); |
49 | 49 | ||
50 | /* Flag to indicate we are going to kexec a new kernel */ | ||
51 | bool kexec_in_progress = false; | ||
52 | |||
50 | /* Location of the reserved area for the crash kernel */ | 53 | /* Location of the reserved area for the crash kernel */ |
51 | struct resource crashk_res = { | 54 | struct resource crashk_res = { |
52 | .name = "Crash kernel", | 55 | .name = "Crash kernel", |
@@ -1675,6 +1678,7 @@ int kernel_kexec(void) | |||
1675 | } else | 1678 | } else |
1676 | #endif | 1679 | #endif |
1677 | { | 1680 | { |
1681 | kexec_in_progress = true; | ||
1678 | kernel_restart_prepare(NULL); | 1682 | kernel_restart_prepare(NULL); |
1679 | printk(KERN_EMERG "Starting new kernel\n"); | 1683 | printk(KERN_EMERG "Starting new kernel\n"); |
1680 | machine_shutdown(); | 1684 | machine_shutdown(); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 987293d03ebc..5690b8eabfbc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2840,19 +2840,6 @@ already_gone: | |||
2840 | return false; | 2840 | return false; |
2841 | } | 2841 | } |
2842 | 2842 | ||
2843 | static bool __flush_work(struct work_struct *work) | ||
2844 | { | ||
2845 | struct wq_barrier barr; | ||
2846 | |||
2847 | if (start_flush_work(work, &barr)) { | ||
2848 | wait_for_completion(&barr.done); | ||
2849 | destroy_work_on_stack(&barr.work); | ||
2850 | return true; | ||
2851 | } else { | ||
2852 | return false; | ||
2853 | } | ||
2854 | } | ||
2855 | |||
2856 | /** | 2843 | /** |
2857 | * flush_work - wait for a work to finish executing the last queueing instance | 2844 | * flush_work - wait for a work to finish executing the last queueing instance |
2858 | * @work: the work to flush | 2845 | * @work: the work to flush |
@@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work) | |||
2866 | */ | 2853 | */ |
2867 | bool flush_work(struct work_struct *work) | 2854 | bool flush_work(struct work_struct *work) |
2868 | { | 2855 | { |
2856 | struct wq_barrier barr; | ||
2857 | |||
2869 | lock_map_acquire(&work->lockdep_map); | 2858 | lock_map_acquire(&work->lockdep_map); |
2870 | lock_map_release(&work->lockdep_map); | 2859 | lock_map_release(&work->lockdep_map); |
2871 | 2860 | ||
2872 | return __flush_work(work); | 2861 | if (start_flush_work(work, &barr)) { |
2862 | wait_for_completion(&barr.done); | ||
2863 | destroy_work_on_stack(&barr.work); | ||
2864 | return true; | ||
2865 | } else { | ||
2866 | return false; | ||
2867 | } | ||
2873 | } | 2868 | } |
2874 | EXPORT_SYMBOL_GPL(flush_work); | 2869 | EXPORT_SYMBOL_GPL(flush_work); |
2875 | 2870 | ||
@@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | |||
4814 | 4809 | ||
4815 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); | 4810 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
4816 | schedule_work_on(cpu, &wfc.work); | 4811 | schedule_work_on(cpu, &wfc.work); |
4817 | 4812 | flush_work(&wfc.work); | |
4818 | /* | ||
4819 | * The work item is on-stack and can't lead to deadlock through | ||
4820 | * flushing. Use __flush_work() to avoid spurious lockdep warnings | ||
4821 | * when work_on_cpu()s are nested. | ||
4822 | */ | ||
4823 | __flush_work(&wfc.work); | ||
4824 | |||
4825 | return wfc.ret; | 4813 | return wfc.ret; |
4826 | } | 4814 | } |
4827 | EXPORT_SYMBOL_GPL(work_on_cpu); | 4815 | EXPORT_SYMBOL_GPL(work_on_cpu); |