aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--MAINTAINERS33
-rw-r--r--arch/alpha/kernel/pci_iommu.c2
-rw-r--r--arch/arm/common/it8152.c4
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c6
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c24
-rw-r--r--arch/parisc/kernel/drivers.c22
-rw-r--r--arch/sparc/kernel/iommu.c2
-rw-r--r--arch/sparc/kernel/ioport.c5
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--drivers/eisa/eisa-bus.c4
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c1
-rw-r--r--drivers/pci/host/pci-imx6.c225
-rw-r--r--drivers/pci/host/pci-mvebu.c5
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c12
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pcie-designware.c59
-rw-r--r--drivers/pci/hotplug/pciehp.h14
-rw-r--r--drivers/pci/hotplug/pciehp_core.c15
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c90
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c380
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c6
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/msi.c19
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c38
-rw-r--r--drivers/pci/pci.c102
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c48
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c95
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/remove.c25
-rw-r--r--drivers/pci/setup-bus.c16
-rw-r--r--drivers/pci/vc.c434
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c12
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/msi.h4
-rw-r--r--include/linux/pci.h45
-rw-r--r--include/uapi/linux/pci_regs.h37
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/workqueue.c32
47 files changed, 1196 insertions, 677 deletions
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index d5d26d443693..d6fae13ff062 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -19,6 +19,8 @@ Required properties:
19 to define the mapping of the PCIe interface to interrupt 19 to define the mapping of the PCIe interface to interrupt
20 numbers. 20 numbers.
21- num-lanes: number of lanes to use 21- num-lanes: number of lanes to use
22
23Optional properties:
22- reset-gpio: gpio pin number of power good signal 24- reset-gpio: gpio pin number of power good signal
23 25
24Optional properties for fsl,imx6q-pcie 26Optional properties for fsl,imx6q-pcie
diff --git a/MAINTAINERS b/MAINTAINERS
index 8285ed4676b6..624e6516fdd3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6449,19 +6449,52 @@ F: drivers/pci/
6449F: include/linux/pci* 6449F: include/linux/pci*
6450F: arch/x86/pci/ 6450F: arch/x86/pci/
6451 6451
6452PCI DRIVER FOR IMX6
6453M: Richard Zhu <r65037@freescale.com>
6454M: Shawn Guo <shawn.guo@linaro.org>
6455L: linux-pci@vger.kernel.org
6456L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6457S: Maintained
6458F: drivers/pci/host/*imx6*
6459
6460PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
6461M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6462M: Jason Cooper <jason@lakedaemon.net>
6463L: linux-pci@vger.kernel.org
6464L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6465S: Maintained
6466F: drivers/pci/host/*mvebu*
6467
6452PCI DRIVER FOR NVIDIA TEGRA 6468PCI DRIVER FOR NVIDIA TEGRA
6453M: Thierry Reding <thierry.reding@gmail.com> 6469M: Thierry Reding <thierry.reding@gmail.com>
6454L: linux-tegra@vger.kernel.org 6470L: linux-tegra@vger.kernel.org
6471L: linux-pci@vger.kernel.org
6455S: Supported 6472S: Supported
6456F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt 6473F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
6457F: drivers/pci/host/pci-tegra.c 6474F: drivers/pci/host/pci-tegra.c
6458 6475
6476PCI DRIVER FOR RENESAS R-CAR
6477M: Simon Horman <horms@verge.net.au>
6478L: linux-pci@vger.kernel.org
6479L: linux-sh@vger.kernel.org
6480S: Maintained
6481F: drivers/pci/host/*rcar*
6482
6459PCI DRIVER FOR SAMSUNG EXYNOS 6483PCI DRIVER FOR SAMSUNG EXYNOS
6460M: Jingoo Han <jg1.han@samsung.com> 6484M: Jingoo Han <jg1.han@samsung.com>
6461L: linux-pci@vger.kernel.org 6485L: linux-pci@vger.kernel.org
6486L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6487L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
6462S: Maintained 6488S: Maintained
6463F: drivers/pci/host/pci-exynos.c 6489F: drivers/pci/host/pci-exynos.c
6464 6490
6491PCI DRIVER FOR SYNOPSIS DESIGNWARE
6492M: Mohit Kumar <mohit.kumar@st.com>
6493M: Jingoo Han <jg1.han@samsung.com>
6494L: linux-pci@vger.kernel.org
6495S: Maintained
6496F: drivers/pci/host/*designware*
6497
6465PCMCIA SUBSYSTEM 6498PCMCIA SUBSYSTEM
6466P: Linux PCMCIA Team 6499P: Linux PCMCIA Team
6467L: linux-pcmcia@lists.infradead.org 6500L: linux-pcmcia@lists.infradead.org
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index a21d0ab3b19e..eddee7720343 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -325,7 +325,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
325/* Helper for generic DMA-mapping functions. */ 325/* Helper for generic DMA-mapping functions. */
326static struct pci_dev *alpha_gendev_to_pci(struct device *dev) 326static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
327{ 327{
328 if (dev && dev->bus == &pci_bus_type) 328 if (dev && dev_is_pci(dev))
329 return to_pci_dev(dev); 329 return to_pci_dev(dev);
330 330
331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA, 331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 001f4913799c..5114b68e99d5 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -257,7 +257,7 @@ static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s
257 */ 257 */
258static int it8152_pci_platform_notify(struct device *dev) 258static int it8152_pci_platform_notify(struct device *dev)
259{ 259{
260 if (dev->bus == &pci_bus_type) { 260 if (dev_is_pci(dev)) {
261 if (dev->dma_mask) 261 if (dev->dma_mask)
262 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 262 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
263 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 263 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
@@ -268,7 +268,7 @@ static int it8152_pci_platform_notify(struct device *dev)
268 268
269static int it8152_pci_platform_notify_remove(struct device *dev) 269static int it8152_pci_platform_notify_remove(struct device *dev)
270{ 270{
271 if (dev->bus == &pci_bus_type) 271 if (dev_is_pci(dev))
272 dmabounce_unregister_dev(dev); 272 dmabounce_unregister_dev(dev);
273 273
274 return 0; 274 return 0;
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 6d6bde3e15fa..200970d56f6d 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -326,7 +326,7 @@ static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s
326 */ 326 */
327static int ixp4xx_pci_platform_notify(struct device *dev) 327static int ixp4xx_pci_platform_notify(struct device *dev)
328{ 328{
329 if(dev->bus == &pci_bus_type) { 329 if (dev_is_pci(dev)) {
330 *dev->dma_mask = SZ_64M - 1; 330 *dev->dma_mask = SZ_64M - 1;
331 dev->coherent_dma_mask = SZ_64M - 1; 331 dev->coherent_dma_mask = SZ_64M - 1;
332 dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); 332 dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
@@ -336,9 +336,9 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
336 336
337static int ixp4xx_pci_platform_notify_remove(struct device *dev) 337static int ixp4xx_pci_platform_notify_remove(struct device *dev)
338{ 338{
339 if(dev->bus == &pci_bus_type) { 339 if (dev_is_pci(dev))
340 dmabounce_unregister_dev(dev); 340 dmabounce_unregister_dev(dev);
341 } 341
342 return 0; 342 return 0;
343} 343}
344 344
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 4c530a82fc46..8e858b593e4f 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -255,7 +255,7 @@ static u64 prefetch_spill_page;
255#endif 255#endif
256 256
257#ifdef CONFIG_PCI 257#ifdef CONFIG_PCI
258# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ 258# define GET_IOC(dev) ((dev_is_pci(dev)) \
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) 259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
260#else 260#else
261# define GET_IOC(dev) NULL 261# define GET_IOC(dev) NULL
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 3290d6e00c31..d0853e8e8623 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -34,7 +34,7 @@
34 */ 34 */
35static int sn_dma_supported(struct device *dev, u64 mask) 35static int sn_dma_supported(struct device *dev, u64 mask)
36{ 36{
37 BUG_ON(dev->bus != &pci_bus_type); 37 BUG_ON(!dev_is_pci(dev));
38 38
39 if (mask < 0x7fffffff) 39 if (mask < 0x7fffffff)
40 return 0; 40 return 0;
@@ -50,7 +50,7 @@ static int sn_dma_supported(struct device *dev, u64 mask)
50 */ 50 */
51int sn_dma_set_mask(struct device *dev, u64 dma_mask) 51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52{ 52{
53 BUG_ON(dev->bus != &pci_bus_type); 53 BUG_ON(!dev_is_pci(dev));
54 54
55 if (!sn_dma_supported(dev, dma_mask)) 55 if (!sn_dma_supported(dev, dma_mask))
56 return 0; 56 return 0;
@@ -85,7 +85,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
85 struct pci_dev *pdev = to_pci_dev(dev); 85 struct pci_dev *pdev = to_pci_dev(dev);
86 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 86 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
87 87
88 BUG_ON(dev->bus != &pci_bus_type); 88 BUG_ON(!dev_is_pci(dev));
89 89
90 /* 90 /*
91 * Allocate the memory. 91 * Allocate the memory.
@@ -143,7 +143,7 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr
143 struct pci_dev *pdev = to_pci_dev(dev); 143 struct pci_dev *pdev = to_pci_dev(dev);
144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
145 145
146 BUG_ON(dev->bus != &pci_bus_type); 146 BUG_ON(!dev_is_pci(dev));
147 147
148 provider->dma_unmap(pdev, dma_handle, 0); 148 provider->dma_unmap(pdev, dma_handle, 0);
149 free_pages((unsigned long)cpu_addr, get_order(size)); 149 free_pages((unsigned long)cpu_addr, get_order(size));
@@ -187,7 +187,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
187 187
188 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); 188 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
189 189
190 BUG_ON(dev->bus != &pci_bus_type); 190 BUG_ON(!dev_is_pci(dev));
191 191
192 phys_addr = __pa(cpu_addr); 192 phys_addr = __pa(cpu_addr);
193 if (dmabarr) 193 if (dmabarr)
@@ -223,7 +223,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
223 struct pci_dev *pdev = to_pci_dev(dev); 223 struct pci_dev *pdev = to_pci_dev(dev);
224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
225 225
226 BUG_ON(dev->bus != &pci_bus_type); 226 BUG_ON(!dev_is_pci(dev));
227 227
228 provider->dma_unmap(pdev, dma_addr, dir); 228 provider->dma_unmap(pdev, dma_addr, dir);
229} 229}
@@ -247,7 +247,7 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
248 struct scatterlist *sg; 248 struct scatterlist *sg;
249 249
250 BUG_ON(dev->bus != &pci_bus_type); 250 BUG_ON(!dev_is_pci(dev));
251 251
252 for_each_sg(sgl, sg, nhwentries, i) { 252 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, dir); 253 provider->dma_unmap(pdev, sg->dma_address, dir);
@@ -284,7 +284,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
284 284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); 285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
286 286
287 BUG_ON(dev->bus != &pci_bus_type); 287 BUG_ON(!dev_is_pci(dev));
288 288
289 /* 289 /*
290 * Setup a DMA address for each entry in the scatterlist. 290 * Setup a DMA address for each entry in the scatterlist.
@@ -323,26 +323,26 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
324 size_t size, enum dma_data_direction dir) 324 size_t size, enum dma_data_direction dir)
325{ 325{
326 BUG_ON(dev->bus != &pci_bus_type); 326 BUG_ON(!dev_is_pci(dev));
327} 327}
328 328
329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
330 size_t size, 330 size_t size,
331 enum dma_data_direction dir) 331 enum dma_data_direction dir)
332{ 332{
333 BUG_ON(dev->bus != &pci_bus_type); 333 BUG_ON(!dev_is_pci(dev));
334} 334}
335 335
336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
337 int nelems, enum dma_data_direction dir) 337 int nelems, enum dma_data_direction dir)
338{ 338{
339 BUG_ON(dev->bus != &pci_bus_type); 339 BUG_ON(!dev_is_pci(dev));
340} 340}
341 341
342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
343 int nelems, enum dma_data_direction dir) 343 int nelems, enum dma_data_direction dir)
344{ 344{
345 BUG_ON(dev->bus != &pci_bus_type); 345 BUG_ON(!dev_is_pci(dev));
346} 346}
347 347
348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 14285caec71a..dba508fe1683 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -282,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type)
282 return NULL; 282 return NULL;
283} 283}
284 284
285#ifdef CONFIG_PCI
286static inline int is_pci_dev(struct device *dev)
287{
288 return dev->bus == &pci_bus_type;
289}
290#else
291static inline int is_pci_dev(struct device *dev)
292{
293 return 0;
294}
295#endif
296
297/* 285/*
298 * get_node_path fills in @path with the firmware path to the device. 286 * get_node_path fills in @path with the firmware path to the device.
299 * Note that if @node is a parisc device, we don't fill in the 'mod' field. 287 * Note that if @node is a parisc device, we don't fill in the 'mod' field.
@@ -306,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)
306 int i = 5; 294 int i = 5;
307 memset(&path->bc, -1, 6); 295 memset(&path->bc, -1, 6);
308 296
309 if (is_pci_dev(dev)) { 297 if (dev_is_pci(dev)) {
310 unsigned int devfn = to_pci_dev(dev)->devfn; 298 unsigned int devfn = to_pci_dev(dev)->devfn;
311 path->mod = PCI_FUNC(devfn); 299 path->mod = PCI_FUNC(devfn);
312 path->bc[i--] = PCI_SLOT(devfn); 300 path->bc[i--] = PCI_SLOT(devfn);
@@ -314,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)
314 } 302 }
315 303
316 while (dev != &root) { 304 while (dev != &root) {
317 if (is_pci_dev(dev)) { 305 if (dev_is_pci(dev)) {
318 unsigned int devfn = to_pci_dev(dev)->devfn; 306 unsigned int devfn = to_pci_dev(dev)->devfn;
319 path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); 307 path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
320 } else if (dev->bus == &parisc_bus_type) { 308 } else if (dev->bus == &parisc_bus_type) {
@@ -695,7 +683,7 @@ static int check_parent(struct device * dev, void * data)
695 if (dev->bus == &parisc_bus_type) { 683 if (dev->bus == &parisc_bus_type) {
696 if (match_parisc_device(dev, d->index, d->modpath)) 684 if (match_parisc_device(dev, d->index, d->modpath))
697 d->dev = dev; 685 d->dev = dev;
698 } else if (is_pci_dev(dev)) { 686 } else if (dev_is_pci(dev)) {
699 if (match_pci_device(dev, d->index, d->modpath)) 687 if (match_pci_device(dev, d->index, d->modpath))
700 d->dev = dev; 688 d->dev = dev;
701 } else if (dev->bus == NULL) { 689 } else if (dev->bus == NULL) {
@@ -753,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath)
753 if (!parent) 741 if (!parent)
754 return NULL; 742 return NULL;
755 } 743 }
756 if (is_pci_dev(parent)) /* pci devices already parse MOD */ 744 if (dev_is_pci(parent)) /* pci devices already parse MOD */
757 return parent; 745 return parent;
758 else 746 else
759 return parse_tree_node(parent, 6, modpath); 747 return parse_tree_node(parent, 6, modpath);
@@ -772,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path)
772 padev = to_parisc_device(dev); 760 padev = to_parisc_device(dev);
773 get_node_path(dev->parent, path); 761 get_node_path(dev->parent, path);
774 path->mod = padev->hw_path; 762 path->mod = padev->hw_path;
775 } else if (is_pci_dev(dev)) { 763 } else if (dev_is_pci(dev)) {
776 get_node_path(dev, path); 764 get_node_path(dev, path);
777 } 765 }
778} 766}
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 070ed141aac7..76663b019eb5 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
854 return 1; 854 return 1;
855 855
856#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
857 if (dev->bus == &pci_bus_type) 857 if (dev_is_pci(dev))
858 return pci64_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
859#endif 859#endif
860 860
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2096468de9b2..e7e215dfa866 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
666 */ 666 */
667int dma_supported(struct device *dev, u64 mask) 667int dma_supported(struct device *dev, u64 mask)
668{ 668{
669#ifdef CONFIG_PCI 669 if (dev_is_pci(dev))
670 if (dev->bus == &pci_bus_type)
671 return 1; 670 return 1;
672#endif 671
673 return 0; 672 return 0;
674} 673}
675EXPORT_SYMBOL(dma_supported); 674EXPORT_SYMBOL(dma_supported);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 947b5c417e83..0de52c5bf9a2 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -104,7 +104,7 @@ extern void pci_iommu_alloc(void);
104struct msi_desc; 104struct msi_desc;
105int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 105int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
106void native_teardown_msi_irq(unsigned int irq); 106void native_teardown_msi_irq(unsigned int irq);
107void native_restore_msi_irqs(struct pci_dev *dev, int irq); 107void native_restore_msi_irqs(struct pci_dev *dev);
108int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, 108int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
109 unsigned int irq_base, unsigned int irq_offset); 109 unsigned int irq_base, unsigned int irq_offset);
110#else 110#else
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 0f1be11e43d2..e45e4da96bf1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -181,7 +181,7 @@ struct x86_msi_ops {
181 u8 hpet_id); 181 u8 hpet_id);
182 void (*teardown_msi_irq)(unsigned int irq); 182 void (*teardown_msi_irq)(unsigned int irq);
183 void (*teardown_msi_irqs)(struct pci_dev *dev); 183 void (*teardown_msi_irqs)(struct pci_dev *dev);
184 void (*restore_msi_irqs)(struct pci_dev *dev, int irq); 184 void (*restore_msi_irqs)(struct pci_dev *dev);
185 int (*setup_hpet_msi)(unsigned int irq, unsigned int id); 185 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
186 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); 186 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
187 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); 187 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6c0b43bd024b..d359d0fffa50 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1034,9 +1034,7 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
1034 1034
1035 if (!acpi_ioapic) 1035 if (!acpi_ioapic)
1036 return 0; 1036 return 0;
1037 if (!dev) 1037 if (!dev || !dev_is_pci(dev))
1038 return 0;
1039 if (dev->bus != &pci_bus_type)
1040 return 0; 1038 return 0;
1041 1039
1042 pdev = to_pci_dev(dev); 1040 pdev = to_pci_dev(dev);
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 021783b1f46a..e48b674639cc 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -136,9 +136,9 @@ void arch_teardown_msi_irq(unsigned int irq)
136 x86_msi.teardown_msi_irq(irq); 136 x86_msi.teardown_msi_irq(irq);
137} 137}
138 138
139void arch_restore_msi_irqs(struct pci_dev *dev, int irq) 139void arch_restore_msi_irqs(struct pci_dev *dev)
140{ 140{
141 x86_msi.restore_msi_irqs(dev, irq); 141 x86_msi.restore_msi_irqs(dev);
142} 142}
143u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 143u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
144{ 144{
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 5eee4959785d..103e702ec5a7 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -337,7 +337,7 @@ out:
337 return ret; 337 return ret;
338} 338}
339 339
340static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) 340static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
341{ 341{
342 int ret = 0; 342 int ret = 0;
343 343
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 272a3ec35957..8842cde69177 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -232,8 +232,10 @@ static int __init eisa_init_device(struct eisa_root_device *root,
232static int __init eisa_register_device(struct eisa_device *edev) 232static int __init eisa_register_device(struct eisa_device *edev)
233{ 233{
234 int rc = device_register(&edev->dev); 234 int rc = device_register(&edev->dev);
235 if (rc) 235 if (rc) {
236 put_device(&edev->dev);
236 return rc; 237 return rc;
238 }
237 239
238 rc = device_create_file(&edev->dev, &dev_attr_signature); 240 rc = device_create_file(&edev->dev, &dev_attr_signature);
239 if (rc) 241 if (rc)
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 6ebf5bf8e7a7..17d2b07ee67c 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ 5obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o vpd.o setup-bus.o 7 irq.o vpd.o setup-bus.o vc.o
8obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
9obj-$(CONFIG_SYSFS) += slot.o 9obj-$(CONFIG_SYSFS) += slot.o
10 10
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index fc1b74013743..a037d81f21ed 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -176,6 +176,7 @@ int pci_bus_add_device(struct pci_dev *dev)
176 */ 176 */
177 pci_fixup_device(pci_fixup_final, dev); 177 pci_fixup_device(pci_fixup_final, dev);
178 pci_create_sysfs_dev_files(dev); 178 pci_create_sysfs_dev_files(dev);
179 pci_proc_attach_device(dev);
179 180
180 dev->match_driver = true; 181 dev->match_driver = true;
181 retval = device_attach(&dev->dev); 182 retval = device_attach(&dev->dev);
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index bd70af8f31ac..e8663a8c3406 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -44,10 +44,18 @@ struct imx6_pcie {
44 void __iomem *mem_base; 44 void __iomem *mem_base;
45}; 45};
46 46
47/* PCIe Root Complex registers (memory-mapped) */
48#define PCIE_RC_LCR 0x7c
49#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
50#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
51#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
52
47/* PCIe Port Logic registers (memory-mapped) */ 53/* PCIe Port Logic registers (memory-mapped) */
48#define PL_OFFSET 0x700 54#define PL_OFFSET 0x700
49#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 55#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
50#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 56#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
57#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
58#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
51 59
52#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 60#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
53#define PCIE_PHY_CTRL_DATA_LOC 0 61#define PCIE_PHY_CTRL_DATA_LOC 0
@@ -59,6 +67,9 @@ struct imx6_pcie {
59#define PCIE_PHY_STAT (PL_OFFSET + 0x110) 67#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
60#define PCIE_PHY_STAT_ACK_LOC 16 68#define PCIE_PHY_STAT_ACK_LOC 16
61 69
70#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
71#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
72
62/* PHY registers (not memory-mapped) */ 73/* PHY registers (not memory-mapped) */
63#define PCIE_PHY_RX_ASIC_OUT 0x100D 74#define PCIE_PHY_RX_ASIC_OUT 0x100D
64 75
@@ -209,15 +220,9 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
209 220
210 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 221 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
211 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); 222 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
212 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
213 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
214 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 223 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
215 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); 224 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
216 225
217 gpio_set_value(imx6_pcie->reset_gpio, 0);
218 msleep(100);
219 gpio_set_value(imx6_pcie->reset_gpio, 1);
220
221 return 0; 226 return 0;
222} 227}
223 228
@@ -261,6 +266,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
261 /* allow the clocks to stabilize */ 266 /* allow the clocks to stabilize */
262 usleep_range(200, 500); 267 usleep_range(200, 500);
263 268
269 /* Some boards don't have PCIe reset GPIO. */
270 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
271 gpio_set_value(imx6_pcie->reset_gpio, 0);
272 msleep(100);
273 gpio_set_value(imx6_pcie->reset_gpio, 1);
274 }
264 return 0; 275 return 0;
265 276
266err_pcie_axi: 277err_pcie_axi:
@@ -299,11 +310,90 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
299 IMX6Q_GPR8_TX_SWING_LOW, 127 << 25); 310 IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
300} 311}
301 312
302static void imx6_pcie_host_init(struct pcie_port *pp) 313static int imx6_pcie_wait_for_link(struct pcie_port *pp)
314{
315 int count = 200;
316
317 while (!dw_pcie_link_up(pp)) {
318 usleep_range(100, 1000);
319 if (--count)
320 continue;
321
322 dev_err(pp->dev, "phy link never came up\n");
323 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
324 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
325 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
326 return -EINVAL;
327 }
328
329 return 0;
330}
331
332static int imx6_pcie_start_link(struct pcie_port *pp)
303{ 333{
304 int count = 0;
305 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 334 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
335 uint32_t tmp;
336 int ret, count;
306 337
338 /*
339 * Force Gen1 operation when starting the link. In case the link is
340 * started in Gen2 mode, there is a possibility the devices on the
341 * bus will not be detected at all. This happens with PCIe switches.
342 */
343 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
344 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
345 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
346 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
347
348 /* Start LTSSM. */
349 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
350 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
351
352 ret = imx6_pcie_wait_for_link(pp);
353 if (ret)
354 return ret;
355
356 /* Allow Gen2 mode after the link is up. */
357 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
358 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
359 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
360 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
361
362 /*
363 * Start Directed Speed Change so the best possible speed both link
364 * partners support can be negotiated.
365 */
366 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
367 tmp |= PORT_LOGIC_SPEED_CHANGE;
368 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
369
370 count = 200;
371 while (count--) {
372 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
373 /* Test if the speed change finished. */
374 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
375 break;
376 usleep_range(100, 1000);
377 }
378
379 /* Make sure link training is finished as well! */
380 if (count)
381 ret = imx6_pcie_wait_for_link(pp);
382 else
383 ret = -EINVAL;
384
385 if (ret) {
386 dev_err(pp->dev, "Failed to bring link up!\n");
387 } else {
388 tmp = readl(pp->dbi_base + 0x80);
389 dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
390 }
391
392 return ret;
393}
394
395static void imx6_pcie_host_init(struct pcie_port *pp)
396{
307 imx6_pcie_assert_core_reset(pp); 397 imx6_pcie_assert_core_reset(pp);
308 398
309 imx6_pcie_init_phy(pp); 399 imx6_pcie_init_phy(pp);
@@ -312,33 +402,41 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
312 402
313 dw_pcie_setup_rc(pp); 403 dw_pcie_setup_rc(pp);
314 404
315 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 405 imx6_pcie_start_link(pp);
316 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); 406}
317 407
318 while (!dw_pcie_link_up(pp)) { 408static void imx6_pcie_reset_phy(struct pcie_port *pp)
319 usleep_range(100, 1000); 409{
320 count++; 410 uint32_t temp;
321 if (count >= 200) { 411
322 dev_err(pp->dev, "phy link never came up\n"); 412 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
323 dev_dbg(pp->dev, 413 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
324 "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 414 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
325 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), 415 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
326 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); 416
327 break; 417 usleep_range(2000, 3000);
328 }
329 }
330 418
331 return; 419 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
420 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
421 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
422 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
332} 423}
333 424
334static int imx6_pcie_link_up(struct pcie_port *pp) 425static int imx6_pcie_link_up(struct pcie_port *pp)
335{ 426{
336 u32 rc, ltssm, rx_valid, temp; 427 u32 rc, ltssm, rx_valid;
337 428
338 /* link is debug bit 36, debug register 1 starts at bit 32 */ 429 /*
339 rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32)); 430 * Test if the PHY reports that the link is up and also that
340 if (rc) 431 * the link training finished. It might happen that the PHY
341 return -EAGAIN; 432 * reports the link is already up, but the link training bit
433 * is still set, so make sure to check the training is done
434 * as well here.
435 */
436 rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
437 if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
438 !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
439 return 1;
342 440
343 /* 441 /*
344 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. 442 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
@@ -358,21 +456,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
358 456
359 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n"); 457 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
360 458
361 pcie_phy_read(pp->dbi_base, 459 imx6_pcie_reset_phy(pp);
362 PHY_RX_OVRD_IN_LO, &temp);
363 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
364 | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
365 pcie_phy_write(pp->dbi_base,
366 PHY_RX_OVRD_IN_LO, temp);
367
368 usleep_range(2000, 3000);
369
370 pcie_phy_read(pp->dbi_base,
371 PHY_RX_OVRD_IN_LO, &temp);
372 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
373 | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
374 pcie_phy_write(pp->dbi_base,
375 PHY_RX_OVRD_IN_LO, temp);
376 460
377 return 0; 461 return 0;
378} 462}
@@ -426,30 +510,19 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
426 "imprecise external abort"); 510 "imprecise external abort");
427 511
428 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 512 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
429 if (!dbi_base) {
430 dev_err(&pdev->dev, "dbi_base memory resource not found\n");
431 return -ENODEV;
432 }
433
434 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); 513 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
435 if (IS_ERR(pp->dbi_base)) { 514 if (IS_ERR(pp->dbi_base))
436 ret = PTR_ERR(pp->dbi_base); 515 return PTR_ERR(pp->dbi_base);
437 goto err;
438 }
439 516
440 /* Fetch GPIOs */ 517 /* Fetch GPIOs */
441 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); 518 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
442 if (!gpio_is_valid(imx6_pcie->reset_gpio)) { 519 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
443 dev_err(&pdev->dev, "no reset-gpio defined\n"); 520 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
444 ret = -ENODEV; 521 GPIOF_OUT_INIT_LOW, "PCIe reset");
445 } 522 if (ret) {
446 ret = devm_gpio_request_one(&pdev->dev, 523 dev_err(&pdev->dev, "unable to get reset gpio\n");
447 imx6_pcie->reset_gpio, 524 return ret;
448 GPIOF_OUT_INIT_LOW, 525 }
449 "PCIe reset");
450 if (ret) {
451 dev_err(&pdev->dev, "unable to get reset gpio\n");
452 goto err;
453 } 526 }
454 527
455 imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); 528 imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
@@ -460,7 +533,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
460 "PCIe power enable"); 533 "PCIe power enable");
461 if (ret) { 534 if (ret) {
462 dev_err(&pdev->dev, "unable to get power-on gpio\n"); 535 dev_err(&pdev->dev, "unable to get power-on gpio\n");
463 goto err; 536 return ret;
464 } 537 }
465 } 538 }
466 539
@@ -472,7 +545,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
472 "PCIe wake up"); 545 "PCIe wake up");
473 if (ret) { 546 if (ret) {
474 dev_err(&pdev->dev, "unable to get wake-up gpio\n"); 547 dev_err(&pdev->dev, "unable to get wake-up gpio\n");
475 goto err; 548 return ret;
476 } 549 }
477 } 550 }
478 551
@@ -484,7 +557,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
484 "PCIe disable endpoint"); 557 "PCIe disable endpoint");
485 if (ret) { 558 if (ret) {
486 dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); 559 dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
487 goto err; 560 return ret;
488 } 561 }
489 } 562 }
490 563
@@ -493,32 +566,28 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
493 if (IS_ERR(imx6_pcie->lvds_gate)) { 566 if (IS_ERR(imx6_pcie->lvds_gate)) {
494 dev_err(&pdev->dev, 567 dev_err(&pdev->dev,
495 "lvds_gate clock select missing or invalid\n"); 568 "lvds_gate clock select missing or invalid\n");
496 ret = PTR_ERR(imx6_pcie->lvds_gate); 569 return PTR_ERR(imx6_pcie->lvds_gate);
497 goto err;
498 } 570 }
499 571
500 imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); 572 imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
501 if (IS_ERR(imx6_pcie->sata_ref_100m)) { 573 if (IS_ERR(imx6_pcie->sata_ref_100m)) {
502 dev_err(&pdev->dev, 574 dev_err(&pdev->dev,
503 "sata_ref_100m clock source missing or invalid\n"); 575 "sata_ref_100m clock source missing or invalid\n");
504 ret = PTR_ERR(imx6_pcie->sata_ref_100m); 576 return PTR_ERR(imx6_pcie->sata_ref_100m);
505 goto err;
506 } 577 }
507 578
508 imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); 579 imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
509 if (IS_ERR(imx6_pcie->pcie_ref_125m)) { 580 if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
510 dev_err(&pdev->dev, 581 dev_err(&pdev->dev,
511 "pcie_ref_125m clock source missing or invalid\n"); 582 "pcie_ref_125m clock source missing or invalid\n");
512 ret = PTR_ERR(imx6_pcie->pcie_ref_125m); 583 return PTR_ERR(imx6_pcie->pcie_ref_125m);
513 goto err;
514 } 584 }
515 585
516 imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); 586 imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
517 if (IS_ERR(imx6_pcie->pcie_axi)) { 587 if (IS_ERR(imx6_pcie->pcie_axi)) {
518 dev_err(&pdev->dev, 588 dev_err(&pdev->dev,
519 "pcie_axi clock source missing or invalid\n"); 589 "pcie_axi clock source missing or invalid\n");
520 ret = PTR_ERR(imx6_pcie->pcie_axi); 590 return PTR_ERR(imx6_pcie->pcie_axi);
521 goto err;
522 } 591 }
523 592
524 /* Grab GPR config register range */ 593 /* Grab GPR config register range */
@@ -526,19 +595,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
526 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); 595 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
527 if (IS_ERR(imx6_pcie->iomuxc_gpr)) { 596 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
528 dev_err(&pdev->dev, "unable to find iomuxc registers\n"); 597 dev_err(&pdev->dev, "unable to find iomuxc registers\n");
529 ret = PTR_ERR(imx6_pcie->iomuxc_gpr); 598 return PTR_ERR(imx6_pcie->iomuxc_gpr);
530 goto err;
531 } 599 }
532 600
533 ret = imx6_add_pcie_port(pp, pdev); 601 ret = imx6_add_pcie_port(pp, pdev);
534 if (ret < 0) 602 if (ret < 0)
535 goto err; 603 return ret;
536 604
537 platform_set_drvdata(pdev, imx6_pcie); 605 platform_set_drvdata(pdev, imx6_pcie);
538 return 0; 606 return 0;
539
540err:
541 return ret;
542} 607}
543 608
544static const struct of_device_id imx6_pcie_of_match[] = { 609static const struct of_device_id imx6_pcie_of_match[] = {
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index afd2af04980c..bedc0b1562b9 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -463,6 +463,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
463 *value = 0; 463 *value = 0;
464 break; 464 break;
465 465
466 case PCI_INTERRUPT_LINE:
467 /* LINE PIN MIN_GNT MAX_LAT */
468 *value = 0;
469 break;
470
466 default: 471 default:
467 *value = 0xffffffff; 472 *value = 0xffffffff;
468 return PCIBIOS_BAD_REGISTER_NUMBER; 473 return PCIBIOS_BAD_REGISTER_NUMBER;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index cbaa5c4397e3..ceec147baec3 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21 22
22/* AHB-PCI Bridge PCI communication registers */ 23/* AHB-PCI Bridge PCI communication registers */
@@ -77,6 +78,7 @@
77#define RCAR_PCI_NR_CONTROLLERS 3 78#define RCAR_PCI_NR_CONTROLLERS 3
78 79
79struct rcar_pci_priv { 80struct rcar_pci_priv {
81 struct device *dev;
80 void __iomem *reg; 82 void __iomem *reg;
81 struct resource io_res; 83 struct resource io_res;
82 struct resource mem_res; 84 struct resource mem_res;
@@ -169,8 +171,11 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
169 void __iomem *reg = priv->reg; 171 void __iomem *reg = priv->reg;
170 u32 val; 172 u32 val;
171 173
174 pm_runtime_enable(priv->dev);
175 pm_runtime_get_sync(priv->dev);
176
172 val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); 177 val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
173 pr_info("PCI: bus%u revision %x\n", sys->busnr, val); 178 dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
174 179
175 /* Disable Direct Power Down State and assert reset */ 180 /* Disable Direct Power Down State and assert reset */
176 val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; 181 val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@@ -276,8 +281,8 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
276 281
277 cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 282 cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
278 reg = devm_ioremap_resource(&pdev->dev, cfg_res); 283 reg = devm_ioremap_resource(&pdev->dev, cfg_res);
279 if (!reg) 284 if (IS_ERR(reg))
280 return -ENODEV; 285 return PTR_ERR(reg);
281 286
282 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 287 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
283 if (!mem_res || !mem_res->start) 288 if (!mem_res || !mem_res->start)
@@ -301,6 +306,7 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
301 306
302 priv->irq = platform_get_irq(pdev, 0); 307 priv->irq = platform_get_irq(pdev, 0);
303 priv->reg = reg; 308 priv->reg = reg;
309 priv->dev = &pdev->dev;
304 310
305 return rcar_pci_add_controller(priv); 311 return rcar_pci_add_controller(priv);
306} 312}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0afbbbc55c81..b8ba2f794559 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -805,7 +805,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
805 afi_writel(pcie, value, AFI_PCIE_CONFIG); 805 afi_writel(pcie, value, AFI_PCIE_CONFIG);
806 806
807 value = afi_readl(pcie, AFI_FUSE); 807 value = afi_readl(pcie, AFI_FUSE);
808 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; 808 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
809 afi_writel(pcie, value, AFI_FUSE); 809 afi_writel(pcie, value, AFI_FUSE);
810 810
811 /* initialize internal PHY, enable up to 16 PCIE lanes */ 811 /* initialize internal PHY, enable up to 16 PCIE lanes */
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index e33b68be0391..1c92833a4ed3 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -209,6 +209,23 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
209 return 0; 209 return 0;
210} 210}
211 211
212static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
213 unsigned int nvec, unsigned int pos)
214{
215 unsigned int i, res, bit, val;
216
217 for (i = 0; i < nvec; i++) {
218 irq_set_msi_desc_off(irq_base, i, NULL);
219 clear_bit(pos + i, pp->msi_irq_in_use);
220 /* Disable corresponding interrupt on MSI interrupt controller */
221 res = ((pos + i) / 32) * 12;
222 bit = (pos + i) % 32;
223 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
224 val &= ~(1 << bit);
225 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
226 }
227}
228
212static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 229static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
213{ 230{
214 int res, bit, irq, pos0, pos1, i; 231 int res, bit, irq, pos0, pos1, i;
@@ -242,18 +259,25 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
242 if (!irq) 259 if (!irq)
243 goto no_valid_irq; 260 goto no_valid_irq;
244 261
245 i = 0; 262 /*
246 while (i < no_irqs) { 263 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
264 * descs so there is no need to allocate descs here. We can therefore
265 * assume that if irq_find_mapping above returns non-zero, then the
266 * descs are also successfully allocated.
267 */
268
269 for (i = 0; i < no_irqs; i++) {
270 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
271 clear_irq_range(pp, irq, i, pos0);
272 goto no_valid_irq;
273 }
247 set_bit(pos0 + i, pp->msi_irq_in_use); 274 set_bit(pos0 + i, pp->msi_irq_in_use);
248 irq_alloc_descs((irq + i), (irq + i), 1, 0);
249 irq_set_msi_desc(irq + i, desc);
250 /*Enable corresponding interrupt in MSI interrupt controller */ 275 /*Enable corresponding interrupt in MSI interrupt controller */
251 res = ((pos0 + i) / 32) * 12; 276 res = ((pos0 + i) / 32) * 12;
252 bit = (pos0 + i) % 32; 277 bit = (pos0 + i) % 32;
253 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); 278 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
254 val |= 1 << bit; 279 val |= 1 << bit;
255 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); 280 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
256 i++;
257 } 281 }
258 282
259 *pos = pos0; 283 *pos = pos0;
@@ -266,7 +290,7 @@ no_valid_irq:
266 290
267static void clear_irq(unsigned int irq) 291static void clear_irq(unsigned int irq)
268{ 292{
269 int res, bit, val, pos; 293 unsigned int pos, nvec;
270 struct irq_desc *desc; 294 struct irq_desc *desc;
271 struct msi_desc *msi; 295 struct msi_desc *msi;
272 struct pcie_port *pp; 296 struct pcie_port *pp;
@@ -281,18 +305,15 @@ static void clear_irq(unsigned int irq)
281 return; 305 return;
282 } 306 }
283 307
308 /* undo what was done in assign_irq */
284 pos = data->hwirq; 309 pos = data->hwirq;
310 nvec = 1 << msi->msi_attrib.multiple;
285 311
286 irq_free_desc(irq); 312 clear_irq_range(pp, irq, nvec, pos);
287 313
288 clear_bit(pos, pp->msi_irq_in_use); 314 /* all irqs cleared; reset attributes */
289 315 msi->irq = 0;
290 /* Disable corresponding interrupt on MSI interrupt controller */ 316 msi->msi_attrib.multiple = 0;
291 res = (pos / 32) * 12;
292 bit = pos % 32;
293 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
294 val &= ~(1 << bit);
295 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
296} 317}
297 318
298static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 319static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
@@ -320,10 +341,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
320 if (irq < 0) 341 if (irq < 0)
321 return irq; 342 return irq;
322 343
323 msg_ctr &= ~PCI_MSI_FLAGS_QSIZE; 344 /*
324 msg_ctr |= msgvec << 4; 345 * write_msi_msg() will update PCI_MSI_FLAGS so there is
325 pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, 346 * no need to explicitly call pci_write_config_word().
326 msg_ctr); 347 */
327 desc->msi_attrib.multiple = msgvec; 348 desc->msi_attrib.multiple = msgvec;
328 349
329 msg.address_lo = virt_to_phys((void *)pp->msi_data); 350 msg.address_lo = virt_to_phys((void *)pp->msi_data);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 21e865ded1dc..ffe6a6b336cf 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -140,15 +140,15 @@ struct controller *pcie_init(struct pcie_device *dev);
140int pcie_init_notification(struct controller *ctrl); 140int pcie_init_notification(struct controller *ctrl);
141int pciehp_enable_slot(struct slot *p_slot); 141int pciehp_enable_slot(struct slot *p_slot);
142int pciehp_disable_slot(struct slot *p_slot); 142int pciehp_disable_slot(struct slot *p_slot);
143int pcie_enable_notification(struct controller *ctrl); 143void pcie_enable_notification(struct controller *ctrl);
144int pciehp_power_on_slot(struct slot *slot); 144int pciehp_power_on_slot(struct slot *slot);
145int pciehp_power_off_slot(struct slot *slot); 145void pciehp_power_off_slot(struct slot *slot);
146int pciehp_get_power_status(struct slot *slot, u8 *status); 146void pciehp_get_power_status(struct slot *slot, u8 *status);
147int pciehp_get_attention_status(struct slot *slot, u8 *status); 147void pciehp_get_attention_status(struct slot *slot, u8 *status);
148 148
149int pciehp_set_attention_status(struct slot *slot, u8 status); 149void pciehp_set_attention_status(struct slot *slot, u8 status);
150int pciehp_get_latch_status(struct slot *slot, u8 *status); 150void pciehp_get_latch_status(struct slot *slot, u8 *status);
151int pciehp_get_adapter_status(struct slot *slot, u8 *status); 151void pciehp_get_adapter_status(struct slot *slot, u8 *status);
152int pciehp_query_power_fault(struct slot *slot); 152int pciehp_query_power_fault(struct slot *slot);
153void pciehp_green_led_on(struct slot *slot); 153void pciehp_green_led_on(struct slot *slot);
154void pciehp_green_led_off(struct slot *slot); 154void pciehp_green_led_off(struct slot *slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index bbd48bbe4e9b..143a389d81fa 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -160,7 +160,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
160 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 160 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
161 __func__, slot_name(slot)); 161 __func__, slot_name(slot));
162 162
163 return pciehp_set_attention_status(slot, status); 163 pciehp_set_attention_status(slot, status);
164 return 0;
164} 165}
165 166
166 167
@@ -192,7 +193,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
192 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 193 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
193 __func__, slot_name(slot)); 194 __func__, slot_name(slot));
194 195
195 return pciehp_get_power_status(slot, value); 196 pciehp_get_power_status(slot, value);
197 return 0;
196} 198}
197 199
198static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) 200static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -202,7 +204,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
202 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 204 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
203 __func__, slot_name(slot)); 205 __func__, slot_name(slot));
204 206
205 return pciehp_get_attention_status(slot, value); 207 pciehp_get_attention_status(slot, value);
208 return 0;
206} 209}
207 210
208static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) 211static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -212,7 +215,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
212 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 215 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
213 __func__, slot_name(slot)); 216 __func__, slot_name(slot));
214 217
215 return pciehp_get_latch_status(slot, value); 218 pciehp_get_latch_status(slot, value);
219 return 0;
216} 220}
217 221
218static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) 222static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -222,7 +226,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
222 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 226 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
223 __func__, slot_name(slot)); 227 __func__, slot_name(slot));
224 228
225 return pciehp_get_adapter_status(slot, value); 229 pciehp_get_adapter_status(slot, value);
230 return 0;
226} 231}
227 232
228static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) 233static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 38f018679175..50628487597d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -158,11 +158,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
158{ 158{
159 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 159 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
160 if (POWER_CTRL(ctrl)) { 160 if (POWER_CTRL(ctrl)) {
161 if (pciehp_power_off_slot(pslot)) { 161 pciehp_power_off_slot(pslot);
162 ctrl_err(ctrl, 162
163 "Issue of Slot Power Off command failed\n");
164 return;
165 }
166 /* 163 /*
167 * After turning power off, we must wait for at least 1 second 164 * After turning power off, we must wait for at least 1 second
168 * before taking any action that relies on power having been 165 * before taking any action that relies on power having been
@@ -171,16 +168,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
171 msleep(1000); 168 msleep(1000);
172 } 169 }
173 170
174 if (PWR_LED(ctrl)) 171 pciehp_green_led_off(pslot);
175 pciehp_green_led_off(pslot); 172 pciehp_set_attention_status(pslot, 1);
176
177 if (ATTN_LED(ctrl)) {
178 if (pciehp_set_attention_status(pslot, 1)) {
179 ctrl_err(ctrl,
180 "Issue of Set Attention Led command failed\n");
181 return;
182 }
183 }
184} 173}
185 174
186/** 175/**
@@ -203,8 +192,7 @@ static int board_added(struct slot *p_slot)
203 return retval; 192 return retval;
204 } 193 }
205 194
206 if (PWR_LED(ctrl)) 195 pciehp_green_led_blink(p_slot);
207 pciehp_green_led_blink(p_slot);
208 196
209 /* Check link training status */ 197 /* Check link training status */
210 retval = pciehp_check_link_status(ctrl); 198 retval = pciehp_check_link_status(ctrl);
@@ -227,9 +215,7 @@ static int board_added(struct slot *p_slot)
227 goto err_exit; 215 goto err_exit;
228 } 216 }
229 217
230 if (PWR_LED(ctrl)) 218 pciehp_green_led_on(p_slot);
231 pciehp_green_led_on(p_slot);
232
233 return 0; 219 return 0;
234 220
235err_exit: 221err_exit:
@@ -243,7 +229,7 @@ err_exit:
243 */ 229 */
244static int remove_board(struct slot *p_slot) 230static int remove_board(struct slot *p_slot)
245{ 231{
246 int retval = 0; 232 int retval;
247 struct controller *ctrl = p_slot->ctrl; 233 struct controller *ctrl = p_slot->ctrl;
248 234
249 retval = pciehp_unconfigure_device(p_slot); 235 retval = pciehp_unconfigure_device(p_slot);
@@ -251,13 +237,8 @@ static int remove_board(struct slot *p_slot)
251 return retval; 237 return retval;
252 238
253 if (POWER_CTRL(ctrl)) { 239 if (POWER_CTRL(ctrl)) {
254 /* power off slot */ 240 pciehp_power_off_slot(p_slot);
255 retval = pciehp_power_off_slot(p_slot); 241
256 if (retval) {
257 ctrl_err(ctrl,
258 "Issue of Slot Disable command failed\n");
259 return retval;
260 }
261 /* 242 /*
262 * After turning power off, we must wait for at least 1 second 243 * After turning power off, we must wait for at least 1 second
263 * before taking any action that relies on power having been 244 * before taking any action that relies on power having been
@@ -267,9 +248,7 @@ static int remove_board(struct slot *p_slot)
267 } 248 }
268 249
269 /* turn off Green LED */ 250 /* turn off Green LED */
270 if (PWR_LED(ctrl)) 251 pciehp_green_led_off(p_slot);
271 pciehp_green_led_off(p_slot);
272
273 return 0; 252 return 0;
274} 253}
275 254
@@ -305,7 +284,7 @@ static void pciehp_power_thread(struct work_struct *work)
305 break; 284 break;
306 case POWERON_STATE: 285 case POWERON_STATE:
307 mutex_unlock(&p_slot->lock); 286 mutex_unlock(&p_slot->lock);
308 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) 287 if (pciehp_enable_slot(p_slot))
309 pciehp_green_led_off(p_slot); 288 pciehp_green_led_off(p_slot);
310 mutex_lock(&p_slot->lock); 289 mutex_lock(&p_slot->lock);
311 p_slot->state = STATIC_STATE; 290 p_slot->state = STATIC_STATE;
@@ -372,11 +351,8 @@ static void handle_button_press_event(struct slot *p_slot)
372 "press.\n", slot_name(p_slot)); 351 "press.\n", slot_name(p_slot));
373 } 352 }
374 /* blink green LED and turn off amber */ 353 /* blink green LED and turn off amber */
375 if (PWR_LED(ctrl)) 354 pciehp_green_led_blink(p_slot);
376 pciehp_green_led_blink(p_slot); 355 pciehp_set_attention_status(p_slot, 0);
377 if (ATTN_LED(ctrl))
378 pciehp_set_attention_status(p_slot, 0);
379
380 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); 356 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
381 break; 357 break;
382 case BLINKINGOFF_STATE: 358 case BLINKINGOFF_STATE:
@@ -389,14 +365,11 @@ static void handle_button_press_event(struct slot *p_slot)
389 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); 365 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
390 cancel_delayed_work(&p_slot->work); 366 cancel_delayed_work(&p_slot->work);
391 if (p_slot->state == BLINKINGOFF_STATE) { 367 if (p_slot->state == BLINKINGOFF_STATE) {
392 if (PWR_LED(ctrl)) 368 pciehp_green_led_on(p_slot);
393 pciehp_green_led_on(p_slot);
394 } else { 369 } else {
395 if (PWR_LED(ctrl)) 370 pciehp_green_led_off(p_slot);
396 pciehp_green_led_off(p_slot);
397 } 371 }
398 if (ATTN_LED(ctrl)) 372 pciehp_set_attention_status(p_slot, 0);
399 pciehp_set_attention_status(p_slot, 0);
400 ctrl_info(ctrl, "PCI slot #%s - action canceled " 373 ctrl_info(ctrl, "PCI slot #%s - action canceled "
401 "due to button press\n", slot_name(p_slot)); 374 "due to button press\n", slot_name(p_slot));
402 p_slot->state = STATIC_STATE; 375 p_slot->state = STATIC_STATE;
@@ -456,10 +429,8 @@ static void interrupt_event_handler(struct work_struct *work)
456 case INT_POWER_FAULT: 429 case INT_POWER_FAULT:
457 if (!POWER_CTRL(ctrl)) 430 if (!POWER_CTRL(ctrl))
458 break; 431 break;
459 if (ATTN_LED(ctrl)) 432 pciehp_set_attention_status(p_slot, 1);
460 pciehp_set_attention_status(p_slot, 1); 433 pciehp_green_led_off(p_slot);
461 if (PWR_LED(ctrl))
462 pciehp_green_led_off(p_slot);
463 break; 434 break;
464 case INT_PRESENCE_ON: 435 case INT_PRESENCE_ON:
465 case INT_PRESENCE_OFF: 436 case INT_PRESENCE_OFF:
@@ -482,14 +453,14 @@ int pciehp_enable_slot(struct slot *p_slot)
482 int rc; 453 int rc;
483 struct controller *ctrl = p_slot->ctrl; 454 struct controller *ctrl = p_slot->ctrl;
484 455
485 rc = pciehp_get_adapter_status(p_slot, &getstatus); 456 pciehp_get_adapter_status(p_slot, &getstatus);
486 if (rc || !getstatus) { 457 if (!getstatus) {
487 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); 458 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
488 return -ENODEV; 459 return -ENODEV;
489 } 460 }
490 if (MRL_SENS(p_slot->ctrl)) { 461 if (MRL_SENS(p_slot->ctrl)) {
491 rc = pciehp_get_latch_status(p_slot, &getstatus); 462 pciehp_get_latch_status(p_slot, &getstatus);
492 if (rc || getstatus) { 463 if (getstatus) {
493 ctrl_info(ctrl, "Latch open on slot(%s)\n", 464 ctrl_info(ctrl, "Latch open on slot(%s)\n",
494 slot_name(p_slot)); 465 slot_name(p_slot));
495 return -ENODEV; 466 return -ENODEV;
@@ -497,8 +468,8 @@ int pciehp_enable_slot(struct slot *p_slot)
497 } 468 }
498 469
499 if (POWER_CTRL(p_slot->ctrl)) { 470 if (POWER_CTRL(p_slot->ctrl)) {
500 rc = pciehp_get_power_status(p_slot, &getstatus); 471 pciehp_get_power_status(p_slot, &getstatus);
501 if (rc || getstatus) { 472 if (getstatus) {
502 ctrl_info(ctrl, "Already enabled on slot(%s)\n", 473 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
503 slot_name(p_slot)); 474 slot_name(p_slot));
504 return -EINVAL; 475 return -EINVAL;
@@ -518,15 +489,14 @@ int pciehp_enable_slot(struct slot *p_slot)
518int pciehp_disable_slot(struct slot *p_slot) 489int pciehp_disable_slot(struct slot *p_slot)
519{ 490{
520 u8 getstatus = 0; 491 u8 getstatus = 0;
521 int ret = 0;
522 struct controller *ctrl = p_slot->ctrl; 492 struct controller *ctrl = p_slot->ctrl;
523 493
524 if (!p_slot->ctrl) 494 if (!p_slot->ctrl)
525 return 1; 495 return 1;
526 496
527 if (!HP_SUPR_RM(p_slot->ctrl)) { 497 if (!HP_SUPR_RM(p_slot->ctrl)) {
528 ret = pciehp_get_adapter_status(p_slot, &getstatus); 498 pciehp_get_adapter_status(p_slot, &getstatus);
529 if (ret || !getstatus) { 499 if (!getstatus) {
530 ctrl_info(ctrl, "No adapter on slot(%s)\n", 500 ctrl_info(ctrl, "No adapter on slot(%s)\n",
531 slot_name(p_slot)); 501 slot_name(p_slot));
532 return -ENODEV; 502 return -ENODEV;
@@ -534,8 +504,8 @@ int pciehp_disable_slot(struct slot *p_slot)
534 } 504 }
535 505
536 if (MRL_SENS(p_slot->ctrl)) { 506 if (MRL_SENS(p_slot->ctrl)) {
537 ret = pciehp_get_latch_status(p_slot, &getstatus); 507 pciehp_get_latch_status(p_slot, &getstatus);
538 if (ret || getstatus) { 508 if (getstatus) {
539 ctrl_info(ctrl, "Latch open on slot(%s)\n", 509 ctrl_info(ctrl, "Latch open on slot(%s)\n",
540 slot_name(p_slot)); 510 slot_name(p_slot));
541 return -ENODEV; 511 return -ENODEV;
@@ -543,8 +513,8 @@ int pciehp_disable_slot(struct slot *p_slot)
543 } 513 }
544 514
545 if (POWER_CTRL(p_slot->ctrl)) { 515 if (POWER_CTRL(p_slot->ctrl)) {
546 ret = pciehp_get_power_status(p_slot, &getstatus); 516 pciehp_get_power_status(p_slot, &getstatus);
547 if (ret || !getstatus) { 517 if (!getstatus) {
548 ctrl_info(ctrl, "Already disabled on slot(%s)\n", 518 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
549 slot_name(p_slot)); 519 slot_name(p_slot));
550 return -EINVAL; 520 return -EINVAL;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 3eea3fdd4b0b..14acfccb7670 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -41,34 +41,11 @@
41#include "../pci.h" 41#include "../pci.h"
42#include "pciehp.h" 42#include "pciehp.h"
43 43
44static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 44static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
45{ 45{
46 struct pci_dev *dev = ctrl->pcie->port; 46 return ctrl->pcie->port;
47 return pcie_capability_read_word(dev, reg, value);
48} 47}
49 48
50static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
51{
52 struct pci_dev *dev = ctrl->pcie->port;
53 return pcie_capability_read_dword(dev, reg, value);
54}
55
56static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
57{
58 struct pci_dev *dev = ctrl->pcie->port;
59 return pcie_capability_write_word(dev, reg, value);
60}
61
62static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
63{
64 struct pci_dev *dev = ctrl->pcie->port;
65 return pcie_capability_write_dword(dev, reg, value);
66}
67
68/* Power Control Command */
69#define POWER_ON 0
70#define POWER_OFF PCI_EXP_SLTCTL_PCC
71
72static irqreturn_t pcie_isr(int irq, void *dev_id); 49static irqreturn_t pcie_isr(int irq, void *dev_id);
73static void start_int_poll_timer(struct controller *ctrl, int sec); 50static void start_int_poll_timer(struct controller *ctrl, int sec);
74 51
@@ -129,20 +106,23 @@ static inline void pciehp_free_irq(struct controller *ctrl)
129 106
130static int pcie_poll_cmd(struct controller *ctrl) 107static int pcie_poll_cmd(struct controller *ctrl)
131{ 108{
109 struct pci_dev *pdev = ctrl_dev(ctrl);
132 u16 slot_status; 110 u16 slot_status;
133 int err, timeout = 1000; 111 int timeout = 1000;
134 112
135 err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 113 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
136 if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { 114 if (slot_status & PCI_EXP_SLTSTA_CC) {
137 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); 115 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
116 PCI_EXP_SLTSTA_CC);
138 return 1; 117 return 1;
139 } 118 }
140 while (timeout > 0) { 119 while (timeout > 0) {
141 msleep(10); 120 msleep(10);
142 timeout -= 10; 121 timeout -= 10;
143 err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 122 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
144 if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { 123 if (slot_status & PCI_EXP_SLTSTA_CC) {
145 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); 124 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
125 PCI_EXP_SLTSTA_CC);
146 return 1; 126 return 1;
147 } 127 }
148 } 128 }
@@ -169,21 +149,15 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
169 * @cmd: command value written to slot control register 149 * @cmd: command value written to slot control register
170 * @mask: bitmask of slot control register to be modified 150 * @mask: bitmask of slot control register to be modified
171 */ 151 */
172static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) 152static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
173{ 153{
174 int retval = 0; 154 struct pci_dev *pdev = ctrl_dev(ctrl);
175 u16 slot_status; 155 u16 slot_status;
176 u16 slot_ctrl; 156 u16 slot_ctrl;
177 157
178 mutex_lock(&ctrl->ctrl_lock); 158 mutex_lock(&ctrl->ctrl_lock);
179 159
180 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 160 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
181 if (retval) {
182 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
183 __func__);
184 goto out;
185 }
186
187 if (slot_status & PCI_EXP_SLTSTA_CC) { 161 if (slot_status & PCI_EXP_SLTSTA_CC) {
188 if (!ctrl->no_cmd_complete) { 162 if (!ctrl->no_cmd_complete) {
189 /* 163 /*
@@ -207,24 +181,17 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
207 } 181 }
208 } 182 }
209 183
210 retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 184 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
211 if (retval) {
212 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
213 goto out;
214 }
215
216 slot_ctrl &= ~mask; 185 slot_ctrl &= ~mask;
217 slot_ctrl |= (cmd & mask); 186 slot_ctrl |= (cmd & mask);
218 ctrl->cmd_busy = 1; 187 ctrl->cmd_busy = 1;
219 smp_mb(); 188 smp_mb();
220 retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl); 189 pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
221 if (retval)
222 ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
223 190
224 /* 191 /*
225 * Wait for command completion. 192 * Wait for command completion.
226 */ 193 */
227 if (!retval && !ctrl->no_cmd_complete) { 194 if (!ctrl->no_cmd_complete) {
228 int poll = 0; 195 int poll = 0;
229 /* 196 /*
230 * if hotplug interrupt is not enabled or command 197 * if hotplug interrupt is not enabled or command
@@ -236,19 +203,16 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
236 poll = 1; 203 poll = 1;
237 pcie_wait_cmd(ctrl, poll); 204 pcie_wait_cmd(ctrl, poll);
238 } 205 }
239 out:
240 mutex_unlock(&ctrl->ctrl_lock); 206 mutex_unlock(&ctrl->ctrl_lock);
241 return retval;
242} 207}
243 208
244static bool check_link_active(struct controller *ctrl) 209static bool check_link_active(struct controller *ctrl)
245{ 210{
246 bool ret = false; 211 struct pci_dev *pdev = ctrl_dev(ctrl);
247 u16 lnk_status; 212 u16 lnk_status;
213 bool ret;
248 214
249 if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status)) 215 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
250 return ret;
251
252 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); 216 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
253 217
254 if (ret) 218 if (ret)
@@ -311,9 +275,9 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
311 275
312int pciehp_check_link_status(struct controller *ctrl) 276int pciehp_check_link_status(struct controller *ctrl)
313{ 277{
278 struct pci_dev *pdev = ctrl_dev(ctrl);
279 bool found;
314 u16 lnk_status; 280 u16 lnk_status;
315 int retval = 0;
316 bool found = false;
317 281
318 /* 282 /*
319 * Data Link Layer Link Active Reporting must be capable for 283 * Data Link Layer Link Active Reporting must be capable for
@@ -330,52 +294,37 @@ int pciehp_check_link_status(struct controller *ctrl)
330 found = pci_bus_check_dev(ctrl->pcie->port->subordinate, 294 found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
331 PCI_DEVFN(0, 0)); 295 PCI_DEVFN(0, 0));
332 296
333 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 297 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
334 if (retval) {
335 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
336 return retval;
337 }
338
339 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); 298 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
340 if ((lnk_status & PCI_EXP_LNKSTA_LT) || 299 if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
341 !(lnk_status & PCI_EXP_LNKSTA_NLW)) { 300 !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
342 ctrl_err(ctrl, "Link Training Error occurs \n"); 301 ctrl_err(ctrl, "Link Training Error occurs \n");
343 retval = -1; 302 return -1;
344 return retval;
345 } 303 }
346 304
347 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); 305 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
348 306
349 if (!found && !retval) 307 if (!found)
350 retval = -1; 308 return -1;
351 309
352 return retval; 310 return 0;
353} 311}
354 312
355static int __pciehp_link_set(struct controller *ctrl, bool enable) 313static int __pciehp_link_set(struct controller *ctrl, bool enable)
356{ 314{
315 struct pci_dev *pdev = ctrl_dev(ctrl);
357 u16 lnk_ctrl; 316 u16 lnk_ctrl;
358 int retval = 0;
359 317
360 retval = pciehp_readw(ctrl, PCI_EXP_LNKCTL, &lnk_ctrl); 318 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
361 if (retval) {
362 ctrl_err(ctrl, "Cannot read LNKCTRL register\n");
363 return retval;
364 }
365 319
366 if (enable) 320 if (enable)
367 lnk_ctrl &= ~PCI_EXP_LNKCTL_LD; 321 lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
368 else 322 else
369 lnk_ctrl |= PCI_EXP_LNKCTL_LD; 323 lnk_ctrl |= PCI_EXP_LNKCTL_LD;
370 324
371 retval = pciehp_writew(ctrl, PCI_EXP_LNKCTL, lnk_ctrl); 325 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
372 if (retval) {
373 ctrl_err(ctrl, "Cannot write LNKCTRL register\n");
374 return retval;
375 }
376 ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl); 326 ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
377 327 return 0;
378 return retval;
379} 328}
380 329
381static int pciehp_link_enable(struct controller *ctrl) 330static int pciehp_link_enable(struct controller *ctrl)
@@ -388,223 +337,165 @@ static int pciehp_link_disable(struct controller *ctrl)
388 return __pciehp_link_set(ctrl, false); 337 return __pciehp_link_set(ctrl, false);
389} 338}
390 339
391int pciehp_get_attention_status(struct slot *slot, u8 *status) 340void pciehp_get_attention_status(struct slot *slot, u8 *status)
392{ 341{
393 struct controller *ctrl = slot->ctrl; 342 struct controller *ctrl = slot->ctrl;
343 struct pci_dev *pdev = ctrl_dev(ctrl);
394 u16 slot_ctrl; 344 u16 slot_ctrl;
395 u8 atten_led_state;
396 int retval = 0;
397
398 retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
399 if (retval) {
400 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
401 return retval;
402 }
403 345
346 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
404 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, 347 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
405 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); 348 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
406 349
407 atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; 350 switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
408 351 case PCI_EXP_SLTCTL_ATTN_IND_ON:
409 switch (atten_led_state) {
410 case 0:
411 *status = 0xFF; /* Reserved */
412 break;
413 case 1:
414 *status = 1; /* On */ 352 *status = 1; /* On */
415 break; 353 break;
416 case 2: 354 case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
417 *status = 2; /* Blink */ 355 *status = 2; /* Blink */
418 break; 356 break;
419 case 3: 357 case PCI_EXP_SLTCTL_ATTN_IND_OFF:
420 *status = 0; /* Off */ 358 *status = 0; /* Off */
421 break; 359 break;
422 default: 360 default:
423 *status = 0xFF; 361 *status = 0xFF;
424 break; 362 break;
425 } 363 }
426
427 return 0;
428} 364}
429 365
430int pciehp_get_power_status(struct slot *slot, u8 *status) 366void pciehp_get_power_status(struct slot *slot, u8 *status)
431{ 367{
432 struct controller *ctrl = slot->ctrl; 368 struct controller *ctrl = slot->ctrl;
369 struct pci_dev *pdev = ctrl_dev(ctrl);
433 u16 slot_ctrl; 370 u16 slot_ctrl;
434 u8 pwr_state;
435 int retval = 0;
436 371
437 retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 372 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
438 if (retval) {
439 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
440 return retval;
441 }
442 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, 373 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
443 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); 374 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
444 375
445 pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; 376 switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
446 377 case PCI_EXP_SLTCTL_PWR_ON:
447 switch (pwr_state) { 378 *status = 1; /* On */
448 case 0:
449 *status = 1;
450 break; 379 break;
451 case 1: 380 case PCI_EXP_SLTCTL_PWR_OFF:
452 *status = 0; 381 *status = 0; /* Off */
453 break; 382 break;
454 default: 383 default:
455 *status = 0xFF; 384 *status = 0xFF;
456 break; 385 break;
457 } 386 }
458
459 return retval;
460} 387}
461 388
462int pciehp_get_latch_status(struct slot *slot, u8 *status) 389void pciehp_get_latch_status(struct slot *slot, u8 *status)
463{ 390{
464 struct controller *ctrl = slot->ctrl; 391 struct pci_dev *pdev = ctrl_dev(slot->ctrl);
465 u16 slot_status; 392 u16 slot_status;
466 int retval;
467 393
468 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 394 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
469 if (retval) {
470 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
471 __func__);
472 return retval;
473 }
474 *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); 395 *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
475 return 0;
476} 396}
477 397
478int pciehp_get_adapter_status(struct slot *slot, u8 *status) 398void pciehp_get_adapter_status(struct slot *slot, u8 *status)
479{ 399{
480 struct controller *ctrl = slot->ctrl; 400 struct pci_dev *pdev = ctrl_dev(slot->ctrl);
481 u16 slot_status; 401 u16 slot_status;
482 int retval;
483 402
484 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 403 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
485 if (retval) {
486 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
487 __func__);
488 return retval;
489 }
490 *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); 404 *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
491 return 0;
492} 405}
493 406
494int pciehp_query_power_fault(struct slot *slot) 407int pciehp_query_power_fault(struct slot *slot)
495{ 408{
496 struct controller *ctrl = slot->ctrl; 409 struct pci_dev *pdev = ctrl_dev(slot->ctrl);
497 u16 slot_status; 410 u16 slot_status;
498 int retval;
499 411
500 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 412 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
501 if (retval) {
502 ctrl_err(ctrl, "Cannot check for power fault\n");
503 return retval;
504 }
505 return !!(slot_status & PCI_EXP_SLTSTA_PFD); 413 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
506} 414}
507 415
508int pciehp_set_attention_status(struct slot *slot, u8 value) 416void pciehp_set_attention_status(struct slot *slot, u8 value)
509{ 417{
510 struct controller *ctrl = slot->ctrl; 418 struct controller *ctrl = slot->ctrl;
511 u16 slot_cmd; 419 u16 slot_cmd;
512 u16 cmd_mask;
513 420
514 cmd_mask = PCI_EXP_SLTCTL_AIC; 421 if (!ATTN_LED(ctrl))
422 return;
423
515 switch (value) { 424 switch (value) {
516 case 0 : /* turn off */ 425 case 0 : /* turn off */
517 slot_cmd = 0x00C0; 426 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
518 break; 427 break;
519 case 1: /* turn on */ 428 case 1: /* turn on */
520 slot_cmd = 0x0040; 429 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
521 break; 430 break;
522 case 2: /* turn blink */ 431 case 2: /* turn blink */
523 slot_cmd = 0x0080; 432 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
524 break; 433 break;
525 default: 434 default:
526 return -EINVAL; 435 return;
527 } 436 }
528 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 437 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
529 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 438 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
530 return pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 439 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
531} 440}
532 441
533void pciehp_green_led_on(struct slot *slot) 442void pciehp_green_led_on(struct slot *slot)
534{ 443{
535 struct controller *ctrl = slot->ctrl; 444 struct controller *ctrl = slot->ctrl;
536 u16 slot_cmd;
537 u16 cmd_mask;
538 445
539 slot_cmd = 0x0100; 446 if (!PWR_LED(ctrl))
540 cmd_mask = PCI_EXP_SLTCTL_PIC; 447 return;
541 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 448
449 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC);
542 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 450 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
543 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 451 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
452 PCI_EXP_SLTCTL_PWR_IND_ON);
544} 453}
545 454
546void pciehp_green_led_off(struct slot *slot) 455void pciehp_green_led_off(struct slot *slot)
547{ 456{
548 struct controller *ctrl = slot->ctrl; 457 struct controller *ctrl = slot->ctrl;
549 u16 slot_cmd;
550 u16 cmd_mask;
551 458
552 slot_cmd = 0x0300; 459 if (!PWR_LED(ctrl))
553 cmd_mask = PCI_EXP_SLTCTL_PIC; 460 return;
554 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 461
462 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC);
555 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 463 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
556 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 464 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
465 PCI_EXP_SLTCTL_PWR_IND_OFF);
557} 466}
558 467
559void pciehp_green_led_blink(struct slot *slot) 468void pciehp_green_led_blink(struct slot *slot)
560{ 469{
561 struct controller *ctrl = slot->ctrl; 470 struct controller *ctrl = slot->ctrl;
562 u16 slot_cmd;
563 u16 cmd_mask;
564 471
565 slot_cmd = 0x0200; 472 if (!PWR_LED(ctrl))
566 cmd_mask = PCI_EXP_SLTCTL_PIC; 473 return;
567 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 474
475 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC);
568 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 476 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
569 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 477 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
478 PCI_EXP_SLTCTL_PWR_IND_BLINK);
570} 479}
571 480
572int pciehp_power_on_slot(struct slot * slot) 481int pciehp_power_on_slot(struct slot * slot)
573{ 482{
574 struct controller *ctrl = slot->ctrl; 483 struct controller *ctrl = slot->ctrl;
575 u16 slot_cmd; 484 struct pci_dev *pdev = ctrl_dev(ctrl);
576 u16 cmd_mask;
577 u16 slot_status; 485 u16 slot_status;
578 int retval = 0; 486 int retval;
579 487
580 /* Clear sticky power-fault bit from previous power failures */ 488 /* Clear sticky power-fault bit from previous power failures */
581 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 489 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
582 if (retval) { 490 if (slot_status & PCI_EXP_SLTSTA_PFD)
583 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", 491 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
584 __func__); 492 PCI_EXP_SLTSTA_PFD);
585 return retval;
586 }
587 slot_status &= PCI_EXP_SLTSTA_PFD;
588 if (slot_status) {
589 retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
590 if (retval) {
591 ctrl_err(ctrl,
592 "%s: Cannot write to SLOTSTATUS register\n",
593 __func__);
594 return retval;
595 }
596 }
597 ctrl->power_fault_detected = 0; 493 ctrl->power_fault_detected = 0;
598 494
599 slot_cmd = POWER_ON; 495 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
600 cmd_mask = PCI_EXP_SLTCTL_PCC;
601 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
602 if (retval) {
603 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
604 return retval;
605 }
606 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 496 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
607 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 497 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
498 PCI_EXP_SLTCTL_PWR_ON);
608 499
609 retval = pciehp_link_enable(ctrl); 500 retval = pciehp_link_enable(ctrl);
610 if (retval) 501 if (retval)
@@ -613,12 +504,9 @@ int pciehp_power_on_slot(struct slot * slot)
613 return retval; 504 return retval;
614} 505}
615 506
616int pciehp_power_off_slot(struct slot * slot) 507void pciehp_power_off_slot(struct slot * slot)
617{ 508{
618 struct controller *ctrl = slot->ctrl; 509 struct controller *ctrl = slot->ctrl;
619 u16 slot_cmd;
620 u16 cmd_mask;
621 int retval;
622 510
623 /* Disable the link at first */ 511 /* Disable the link at first */
624 pciehp_link_disable(ctrl); 512 pciehp_link_disable(ctrl);
@@ -628,21 +516,16 @@ int pciehp_power_off_slot(struct slot * slot)
628 else 516 else
629 msleep(1000); 517 msleep(1000);
630 518
631 slot_cmd = POWER_OFF; 519 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
632 cmd_mask = PCI_EXP_SLTCTL_PCC;
633 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
634 if (retval) {
635 ctrl_err(ctrl, "Write command failed!\n");
636 return retval;
637 }
638 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 520 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
639 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 521 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
640 return 0; 522 PCI_EXP_SLTCTL_PWR_OFF);
641} 523}
642 524
643static irqreturn_t pcie_isr(int irq, void *dev_id) 525static irqreturn_t pcie_isr(int irq, void *dev_id)
644{ 526{
645 struct controller *ctrl = (struct controller *)dev_id; 527 struct controller *ctrl = (struct controller *)dev_id;
528 struct pci_dev *pdev = ctrl_dev(ctrl);
646 struct slot *slot = ctrl->slot; 529 struct slot *slot = ctrl->slot;
647 u16 detected, intr_loc; 530 u16 detected, intr_loc;
648 531
@@ -653,11 +536,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
653 */ 536 */
654 intr_loc = 0; 537 intr_loc = 0;
655 do { 538 do {
656 if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) { 539 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
657 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
658 __func__);
659 return IRQ_NONE;
660 }
661 540
662 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 541 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
663 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 542 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
@@ -666,11 +545,9 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
666 intr_loc |= detected; 545 intr_loc |= detected;
667 if (!intr_loc) 546 if (!intr_loc)
668 return IRQ_NONE; 547 return IRQ_NONE;
669 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) { 548 if (detected)
670 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 549 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
671 __func__); 550 intr_loc);
672 return IRQ_NONE;
673 }
674 } while (detected); 551 } while (detected);
675 552
676 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); 553 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
@@ -705,7 +582,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
705 return IRQ_HANDLED; 582 return IRQ_HANDLED;
706} 583}
707 584
708int pcie_enable_notification(struct controller *ctrl) 585void pcie_enable_notification(struct controller *ctrl)
709{ 586{
710 u16 cmd, mask; 587 u16 cmd, mask;
711 588
@@ -731,22 +608,18 @@ int pcie_enable_notification(struct controller *ctrl)
731 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 608 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
732 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); 609 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
733 610
734 if (pcie_write_cmd(ctrl, cmd, mask)) { 611 pcie_write_cmd(ctrl, cmd, mask);
735 ctrl_err(ctrl, "Cannot enable software notification\n");
736 return -1;
737 }
738 return 0;
739} 612}
740 613
741static void pcie_disable_notification(struct controller *ctrl) 614static void pcie_disable_notification(struct controller *ctrl)
742{ 615{
743 u16 mask; 616 u16 mask;
617
744 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | 618 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
745 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 619 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
746 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | 620 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
747 PCI_EXP_SLTCTL_DLLSCE); 621 PCI_EXP_SLTCTL_DLLSCE);
748 if (pcie_write_cmd(ctrl, 0, mask)) 622 pcie_write_cmd(ctrl, 0, mask);
749 ctrl_warn(ctrl, "Cannot disable software notification\n");
750} 623}
751 624
752/* 625/*
@@ -758,6 +631,7 @@ static void pcie_disable_notification(struct controller *ctrl)
758int pciehp_reset_slot(struct slot *slot, int probe) 631int pciehp_reset_slot(struct slot *slot, int probe)
759{ 632{
760 struct controller *ctrl = slot->ctrl; 633 struct controller *ctrl = slot->ctrl;
634 struct pci_dev *pdev = ctrl_dev(ctrl);
761 635
762 if (probe) 636 if (probe)
763 return 0; 637 return 0;
@@ -771,7 +645,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
771 pci_reset_bridge_secondary_bus(ctrl->pcie->port); 645 pci_reset_bridge_secondary_bus(ctrl->pcie->port);
772 646
773 if (HP_SUPR_RM(ctrl)) { 647 if (HP_SUPR_RM(ctrl)) {
774 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); 648 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
649 PCI_EXP_SLTSTA_PDC);
775 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE); 650 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
776 if (pciehp_poll_mode) 651 if (pciehp_poll_mode)
777 int_poll_timeout(ctrl->poll_timer.data); 652 int_poll_timeout(ctrl->poll_timer.data);
@@ -784,10 +659,7 @@ int pcie_init_notification(struct controller *ctrl)
784{ 659{
785 if (pciehp_request_irq(ctrl)) 660 if (pciehp_request_irq(ctrl))
786 return -1; 661 return -1;
787 if (pcie_enable_notification(ctrl)) { 662 pcie_enable_notification(ctrl);
788 pciehp_free_irq(ctrl);
789 return -1;
790 }
791 ctrl->notification_enabled = 1; 663 ctrl->notification_enabled = 1;
792 return 0; 664 return 0;
793} 665}
@@ -875,12 +747,14 @@ static inline void dbg_ctrl(struct controller *ctrl)
875 EMI(ctrl) ? "yes" : "no"); 747 EMI(ctrl) ? "yes" : "no");
876 ctrl_info(ctrl, " Command Completed : %3s\n", 748 ctrl_info(ctrl, " Command Completed : %3s\n",
877 NO_CMD_CMPL(ctrl) ? "no" : "yes"); 749 NO_CMD_CMPL(ctrl) ? "no" : "yes");
878 pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16); 750 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
879 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); 751 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
880 pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16); 752 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
881 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); 753 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
882} 754}
883 755
756#define FLAG(x,y) (((x) & (y)) ? '+' : '-')
757
884struct controller *pcie_init(struct pcie_device *dev) 758struct controller *pcie_init(struct pcie_device *dev)
885{ 759{
886 struct controller *ctrl; 760 struct controller *ctrl;
@@ -893,11 +767,7 @@ struct controller *pcie_init(struct pcie_device *dev)
893 goto abort; 767 goto abort;
894 } 768 }
895 ctrl->pcie = dev; 769 ctrl->pcie = dev;
896 if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { 770 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
897 ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
898 goto abort_ctrl;
899 }
900
901 ctrl->slot_cap = slot_cap; 771 ctrl->slot_cap = slot_cap;
902 mutex_init(&ctrl->ctrl_lock); 772 mutex_init(&ctrl->ctrl_lock);
903 init_waitqueue_head(&ctrl->queue); 773 init_waitqueue_head(&ctrl->queue);
@@ -913,25 +783,31 @@ struct controller *pcie_init(struct pcie_device *dev)
913 ctrl->no_cmd_complete = 1; 783 ctrl->no_cmd_complete = 1;
914 784
915 /* Check if Data Link Layer Link Active Reporting is implemented */ 785 /* Check if Data Link Layer Link Active Reporting is implemented */
916 if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { 786 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
917 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
918 goto abort_ctrl;
919 }
920 if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { 787 if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
921 ctrl_dbg(ctrl, "Link Active Reporting supported\n"); 788 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
922 ctrl->link_active_reporting = 1; 789 ctrl->link_active_reporting = 1;
923 } 790 }
924 791
925 /* Clear all remaining event bits in Slot Status register */ 792 /* Clear all remaining event bits in Slot Status register */
926 if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) 793 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
927 goto abort_ctrl; 794 PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
795 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
796 PCI_EXP_SLTSTA_CC);
928 797
929 /* Disable software notification */ 798 /* Disable software notification */
930 pcie_disable_notification(ctrl); 799 pcie_disable_notification(ctrl);
931 800
932 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 801 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
933 pdev->vendor, pdev->device, pdev->subsystem_vendor, 802 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
934 pdev->subsystem_device); 803 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
804 FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
805 FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
806 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
807 FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
808 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
809 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
810 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
935 811
936 if (pcie_init_slot(ctrl)) 812 if (pcie_init_slot(ctrl))
937 goto abort_ctrl; 813 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 0e0d0f7f63fd..198355112ee7 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -78,7 +78,7 @@ int pciehp_configure_device(struct slot *p_slot)
78 78
79int pciehp_unconfigure_device(struct slot *p_slot) 79int pciehp_unconfigure_device(struct slot *p_slot)
80{ 80{
81 int ret, rc = 0; 81 int rc = 0;
82 u8 bctl = 0; 82 u8 bctl = 0;
83 u8 presence = 0; 83 u8 presence = 0;
84 struct pci_dev *dev, *temp; 84 struct pci_dev *dev, *temp;
@@ -88,9 +88,7 @@ int pciehp_unconfigure_device(struct slot *p_slot)
88 88
89 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", 89 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
90 __func__, pci_domain_nr(parent), parent->number); 90 __func__, pci_domain_nr(parent), parent->number);
91 ret = pciehp_get_adapter_status(p_slot, &presence); 91 pciehp_get_adapter_status(p_slot, &presence);
92 if (ret)
93 presence = 0;
94 92
95 /* 93 /*
96 * Stopping an SR-IOV PF device removes all the associated VFs, 94 * Stopping an SR-IOV PF device removes all the associated VFs,
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1fe2d6fb19d5..68311ec849ee 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -441,6 +441,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
441 441
442found: 442found:
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); 443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
444 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 446 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
446 if (!offset || (total > 1 && !stride)) 447 if (!offset || (total > 1 && !stride))
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3fcd67a16677..51bf0400a889 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -116,7 +116,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
116 return default_teardown_msi_irqs(dev); 116 return default_teardown_msi_irqs(dev);
117} 117}
118 118
119void default_restore_msi_irqs(struct pci_dev *dev, int irq) 119static void default_restore_msi_irq(struct pci_dev *dev, int irq)
120{ 120{
121 struct msi_desc *entry; 121 struct msi_desc *entry;
122 122
@@ -134,9 +134,9 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)
134 write_msi_msg(irq, &entry->msg); 134 write_msi_msg(irq, &entry->msg);
135} 135}
136 136
137void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq) 137void __weak arch_restore_msi_irqs(struct pci_dev *dev)
138{ 138{
139 return default_restore_msi_irqs(dev, irq); 139 return default_restore_msi_irqs(dev);
140} 140}
141 141
142static void msi_set_enable(struct pci_dev *dev, int enable) 142static void msi_set_enable(struct pci_dev *dev, int enable)
@@ -262,6 +262,15 @@ void unmask_msi_irq(struct irq_data *data)
262 msi_set_mask_bit(data, 0); 262 msi_set_mask_bit(data, 0);
263} 263}
264 264
265void default_restore_msi_irqs(struct pci_dev *dev)
266{
267 struct msi_desc *entry;
268
269 list_for_each_entry(entry, &dev->msi_list, list) {
270 default_restore_msi_irq(dev, entry->irq);
271 }
272}
273
265void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 274void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
266{ 275{
267 BUG_ON(entry->dev->current_state != PCI_D0); 276 BUG_ON(entry->dev->current_state != PCI_D0);
@@ -430,7 +439,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
430 439
431 pci_intx_for_msi(dev, 0); 440 pci_intx_for_msi(dev, 0);
432 msi_set_enable(dev, 0); 441 msi_set_enable(dev, 0);
433 arch_restore_msi_irqs(dev, dev->irq); 442 arch_restore_msi_irqs(dev);
434 443
435 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 444 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
436 msi_mask_irq(entry, msi_capable_mask(control), entry->masked); 445 msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -455,8 +464,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
455 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; 464 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
456 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); 465 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
457 466
467 arch_restore_msi_irqs(dev);
458 list_for_each_entry(entry, &dev->msi_list, list) { 468 list_for_each_entry(entry, &dev->msi_list, list) {
459 arch_restore_msi_irqs(dev, entry->irq);
460 msix_mask_irq(entry, entry->masked); 469 msix_mask_irq(entry, entry->masked);
461 } 470 }
462 471
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..e0431f1af33b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -358,7 +358,7 @@ static void pci_acpi_cleanup(struct device *dev)
358 358
359static bool pci_acpi_bus_match(struct device *dev) 359static bool pci_acpi_bus_match(struct device *dev)
360{ 360{
361 return dev->bus == &pci_bus_type; 361 return dev_is_pci(dev);
362} 362}
363 363
364static struct acpi_bus_type acpi_pci_bus = { 364static struct acpi_bus_type acpi_pci_bus = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9042fdbd7244..25f0bc659164 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/kexec.h>
22#include "pci.h" 23#include "pci.h"
23 24
24struct pci_dynid { 25struct pci_dynid {
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
288 int error, node; 289 int error, node;
289 struct drv_dev_and_id ddi = { drv, dev, id }; 290 struct drv_dev_and_id ddi = { drv, dev, id };
290 291
291 /* Execute driver initialization on node where the device's 292 /*
292 bus is attached to. This way the driver likely allocates 293 * Execute driver initialization on node where the device is
293 its local memory on the right node without any need to 294 * attached. This way the driver likely allocates its local memory
294 change it. */ 295 * on the right node.
296 */
295 node = dev_to_node(&dev->dev); 297 node = dev_to_node(&dev->dev);
296 if (node >= 0) { 298
299 /*
300 * On NUMA systems, we are likely to call a PF probe function using
301 * work_on_cpu(). If that probe calls pci_enable_sriov() (which
302 * adds the VF devices via pci_bus_add_device()), we may re-enter
303 * this function to call the VF probe function. Calling
304 * work_on_cpu() again will cause a lockdep warning. Since VFs are
305 * always on the same node as the PF, we can work around this by
306 * avoiding work_on_cpu() when we're already on the correct node.
307 *
308 * Preemption is enabled, so it's theoretically unsafe to use
309 * numa_node_id(), but even if we run the probe function on the
310 * wrong node, it should be functionally correct.
311 */
312 if (node >= 0 && node != numa_node_id()) {
297 int cpu; 313 int cpu;
298 314
299 get_online_cpus(); 315 get_online_cpus();
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
305 put_online_cpus(); 321 put_online_cpus();
306 } else 322 } else
307 error = local_pci_probe(&ddi); 323 error = local_pci_probe(&ddi);
324
308 return error; 325 return error;
309} 326}
310 327
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)
399 pci_msi_shutdown(pci_dev); 416 pci_msi_shutdown(pci_dev);
400 pci_msix_shutdown(pci_dev); 417 pci_msix_shutdown(pci_dev);
401 418
419#ifdef CONFIG_KEXEC
402 /* 420 /*
403 * Turn off Bus Master bit on the device to tell it to not 421 * If this is a kexec reboot, turn off Bus Master bit on the
404 * continue to do DMA. Don't touch devices in D3cold or unknown states. 422 * device to tell it to not continue to do DMA. Don't touch
423 * devices in D3cold or unknown states.
424 * If it is not a kexec reboot, firmware will hit the PCI
425 * devices with big hammer and stop their DMA any way.
405 */ 426 */
406 if (pci_dev->current_state <= PCI_D3hot) 427 if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
407 pci_clear_master(pci_dev); 428 pci_clear_master(pci_dev);
429#endif
408} 430}
409 431
410#ifdef CONFIG_PM 432#ifdef CONFIG_PM
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 33120d156668..508e560b7d2a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -431,6 +431,32 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
431} 431}
432 432
433/** 433/**
434 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
435 * @dev: the PCI device to operate on
436 * @pos: config space offset of status word
437 * @mask: mask of bit(s) to care about in status word
438 *
439 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
440 */
441int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
442{
443 int i;
444
445 /* Wait for Transaction Pending bit clean */
446 for (i = 0; i < 4; i++) {
447 u16 status;
448 if (i)
449 msleep((1 << (i - 1)) * 100);
450
451 pci_read_config_word(dev, pos, &status);
452 if (!(status & mask))
453 return 1;
454 }
455
456 return 0;
457}
458
459/**
434 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 460 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
435 * @dev: PCI device to have its BARs restored 461 * @dev: PCI device to have its BARs restored
436 * 462 *
@@ -835,18 +861,28 @@ EXPORT_SYMBOL(pci_choose_state);
835#define PCI_EXP_SAVE_REGS 7 861#define PCI_EXP_SAVE_REGS 7
836 862
837 863
838static struct pci_cap_saved_state *pci_find_saved_cap( 864static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
839 struct pci_dev *pci_dev, char cap) 865 u16 cap, bool extended)
840{ 866{
841 struct pci_cap_saved_state *tmp; 867 struct pci_cap_saved_state *tmp;
842 868
843 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 869 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
844 if (tmp->cap.cap_nr == cap) 870 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
845 return tmp; 871 return tmp;
846 } 872 }
847 return NULL; 873 return NULL;
848} 874}
849 875
876struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
877{
878 return _pci_find_saved_cap(dev, cap, false);
879}
880
881struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
882{
883 return _pci_find_saved_cap(dev, cap, true);
884}
885
850static int pci_save_pcie_state(struct pci_dev *dev) 886static int pci_save_pcie_state(struct pci_dev *dev)
851{ 887{
852 int i = 0; 888 int i = 0;
@@ -948,6 +984,8 @@ pci_save_state(struct pci_dev *dev)
948 return i; 984 return i;
949 if ((i = pci_save_pcix_state(dev)) != 0) 985 if ((i = pci_save_pcix_state(dev)) != 0)
950 return i; 986 return i;
987 if ((i = pci_save_vc_state(dev)) != 0)
988 return i;
951 return 0; 989 return 0;
952} 990}
953 991
@@ -1010,6 +1048,7 @@ void pci_restore_state(struct pci_dev *dev)
1010 /* PCI Express register must be restored first */ 1048 /* PCI Express register must be restored first */
1011 pci_restore_pcie_state(dev); 1049 pci_restore_pcie_state(dev);
1012 pci_restore_ats_state(dev); 1050 pci_restore_ats_state(dev);
1051 pci_restore_vc_state(dev);
1013 1052
1014 pci_restore_config_space(dev); 1053 pci_restore_config_space(dev);
1015 1054
@@ -1087,7 +1126,7 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1087 while (cap->size) { 1126 while (cap->size) {
1088 struct pci_cap_saved_state *tmp; 1127 struct pci_cap_saved_state *tmp;
1089 1128
1090 tmp = pci_find_saved_cap(dev, cap->cap_nr); 1129 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1091 if (!tmp || tmp->cap.size != cap->size) 1130 if (!tmp || tmp->cap.size != cap->size)
1092 return -EINVAL; 1131 return -EINVAL;
1093 1132
@@ -2021,18 +2060,24 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
2021} 2060}
2022 2061
2023/** 2062/**
2024 * pci_add_cap_save_buffer - allocate buffer for saving given capability registers 2063 * _pci_add_cap_save_buffer - allocate buffer for saving given
2064 * capability registers
2025 * @dev: the PCI device 2065 * @dev: the PCI device
2026 * @cap: the capability to allocate the buffer for 2066 * @cap: the capability to allocate the buffer for
2067 * @extended: Standard or Extended capability ID
2027 * @size: requested size of the buffer 2068 * @size: requested size of the buffer
2028 */ 2069 */
2029static int pci_add_cap_save_buffer( 2070static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2030 struct pci_dev *dev, char cap, unsigned int size) 2071 bool extended, unsigned int size)
2031{ 2072{
2032 int pos; 2073 int pos;
2033 struct pci_cap_saved_state *save_state; 2074 struct pci_cap_saved_state *save_state;
2034 2075
2035 pos = pci_find_capability(dev, cap); 2076 if (extended)
2077 pos = pci_find_ext_capability(dev, cap);
2078 else
2079 pos = pci_find_capability(dev, cap);
2080
2036 if (pos <= 0) 2081 if (pos <= 0)
2037 return 0; 2082 return 0;
2038 2083
@@ -2041,12 +2086,23 @@ static int pci_add_cap_save_buffer(
2041 return -ENOMEM; 2086 return -ENOMEM;
2042 2087
2043 save_state->cap.cap_nr = cap; 2088 save_state->cap.cap_nr = cap;
2089 save_state->cap.cap_extended = extended;
2044 save_state->cap.size = size; 2090 save_state->cap.size = size;
2045 pci_add_saved_cap(dev, save_state); 2091 pci_add_saved_cap(dev, save_state);
2046 2092
2047 return 0; 2093 return 0;
2048} 2094}
2049 2095
2096int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2097{
2098 return _pci_add_cap_save_buffer(dev, cap, false, size);
2099}
2100
2101int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2102{
2103 return _pci_add_cap_save_buffer(dev, cap, true, size);
2104}
2105
2050/** 2106/**
2051 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 2107 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2052 * @dev: the PCI device 2108 * @dev: the PCI device
@@ -2065,6 +2121,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2065 if (error) 2121 if (error)
2066 dev_err(&dev->dev, 2122 dev_err(&dev->dev,
2067 "unable to preallocate PCI-X save buffer\n"); 2123 "unable to preallocate PCI-X save buffer\n");
2124
2125 pci_allocate_vc_save_buffers(dev);
2068} 2126}
2069 2127
2070void pci_free_cap_save_buffers(struct pci_dev *dev) 2128void pci_free_cap_save_buffers(struct pci_dev *dev)
@@ -3204,20 +3262,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3204 */ 3262 */
3205int pci_wait_for_pending_transaction(struct pci_dev *dev) 3263int pci_wait_for_pending_transaction(struct pci_dev *dev)
3206{ 3264{
3207 int i; 3265 if (!pci_is_pcie(dev))
3208 u16 status; 3266 return 1;
3209
3210 /* Wait for Transaction Pending bit clean */
3211 for (i = 0; i < 4; i++) {
3212 if (i)
3213 msleep((1 << (i - 1)) * 100);
3214
3215 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3216 if (!(status & PCI_EXP_DEVSTA_TRPND))
3217 return 1;
3218 }
3219 3267
3220 return 0; 3268 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
3221} 3269}
3222EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3270EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3223 3271
@@ -3244,10 +3292,8 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3244 3292
3245static int pci_af_flr(struct pci_dev *dev, int probe) 3293static int pci_af_flr(struct pci_dev *dev, int probe)
3246{ 3294{
3247 int i;
3248 int pos; 3295 int pos;
3249 u8 cap; 3296 u8 cap;
3250 u8 status;
3251 3297
3252 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 3298 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3253 if (!pos) 3299 if (!pos)
@@ -3261,14 +3307,8 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3261 return 0; 3307 return 0;
3262 3308
3263 /* Wait for Transaction Pending bit clean */ 3309 /* Wait for Transaction Pending bit clean */
3264 for (i = 0; i < 4; i++) { 3310 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
3265 if (i) 3311 goto clear;
3266 msleep((1 << (i - 1)) * 100);
3267
3268 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3269 if (!(status & PCI_AF_STATUS_TP))
3270 goto clear;
3271 }
3272 3312
3273 dev_err(&dev->dev, "transaction is not cleared; " 3313 dev_err(&dev->dev, "transaction is not cleared; "
3274 "proceeding with reset anyway\n"); 3314 "proceeding with reset anyway\n");
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index cf611ab2193a..4d6991794fa2 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -50,14 +50,37 @@ struct aer_hest_parse_info {
50 int firmware_first; 50 int firmware_first;
51}; 51};
52 52
53static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
54{
55 if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
56 hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
57 hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
58 return 1;
59 return 0;
60}
61
53static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) 62static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
54{ 63{
55 struct aer_hest_parse_info *info = data; 64 struct aer_hest_parse_info *info = data;
56 struct acpi_hest_aer_common *p; 65 struct acpi_hest_aer_common *p;
57 int ff; 66 int ff;
58 67
68 if (!hest_source_is_pcie_aer(hest_hdr))
69 return 0;
70
59 p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 71 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
60 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 72 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
73
74 /*
75 * If no specific device is supplied, determine whether
76 * FIRMWARE_FIRST is set for *any* PCIe device.
77 */
78 if (!info->pci_dev) {
79 info->firmware_first |= ff;
80 return 0;
81 }
82
83 /* Otherwise, check the specific device */
61 if (p->flags & ACPI_HEST_GLOBAL) { 84 if (p->flags & ACPI_HEST_GLOBAL) {
62 if (hest_match_type(hest_hdr, info->pci_dev)) 85 if (hest_match_type(hest_hdr, info->pci_dev))
63 info->firmware_first = ff; 86 info->firmware_first = ff;
@@ -97,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
97 120
98static bool aer_firmware_first; 121static bool aer_firmware_first;
99 122
100static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
101{
102 struct acpi_hest_aer_common *p;
103
104 if (aer_firmware_first)
105 return 0;
106
107 switch (hest_hdr->type) {
108 case ACPI_HEST_TYPE_AER_ROOT_PORT:
109 case ACPI_HEST_TYPE_AER_ENDPOINT:
110 case ACPI_HEST_TYPE_AER_BRIDGE:
111 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
112 aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
113 default:
114 return 0;
115 }
116}
117
118/** 123/**
119 * aer_acpi_firmware_first - Check if APEI should control AER. 124 * aer_acpi_firmware_first - Check if APEI should control AER.
120 */ 125 */
121bool aer_acpi_firmware_first(void) 126bool aer_acpi_firmware_first(void)
122{ 127{
123 static bool parsed = false; 128 static bool parsed = false;
129 struct aer_hest_parse_info info = {
130 .pci_dev = NULL, /* Check all PCIe devices */
131 .firmware_first = 0,
132 };
124 133
125 if (!parsed) { 134 if (!parsed) {
126 apei_hest_parse(aer_hest_parse_aff, NULL); 135 apei_hest_parse(aer_hest_parse, &info);
136 aer_firmware_first = info.firmware_first;
127 parsed = true; 137 parsed = true;
128 } 138 }
129 return aer_firmware_first; 139 return aer_firmware_first;
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 2c7c9f5f592c..34ff7026440c 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -124,6 +124,21 @@ static const char *aer_agent_string[] = {
124 "Transmitter ID" 124 "Transmitter ID"
125}; 125};
126 126
127static void __print_tlp_header(struct pci_dev *dev,
128 struct aer_header_log_regs *t)
129{
130 unsigned char *tlp = (unsigned char *)&t;
131
132 dev_err(&dev->dev, " TLP Header:"
133 " %02x%02x%02x%02x %02x%02x%02x%02x"
134 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
135 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
136 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
137 *(tlp + 11), *(tlp + 10), *(tlp + 9),
138 *(tlp + 8), *(tlp + 15), *(tlp + 14),
139 *(tlp + 13), *(tlp + 12));
140}
141
127static void __aer_print_error(struct pci_dev *dev, 142static void __aer_print_error(struct pci_dev *dev,
128 struct aer_err_info *info) 143 struct aer_err_info *info)
129{ 144{
@@ -153,48 +168,39 @@ static void __aer_print_error(struct pci_dev *dev,
153 168
154void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) 169void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
155{ 170{
171 int layer, agent;
156 int id = ((dev->bus->number << 8) | dev->devfn); 172 int id = ((dev->bus->number << 8) | dev->devfn);
157 173
158 if (info->status == 0) { 174 if (!info->status) {
159 dev_err(&dev->dev, 175 dev_err(&dev->dev,
160 "PCIe Bus Error: severity=%s, type=Unaccessible, " 176 "PCIe Bus Error: severity=%s, type=Unaccessible, "
161 "id=%04x(Unregistered Agent ID)\n", 177 "id=%04x(Unregistered Agent ID)\n",
162 aer_error_severity_string[info->severity], id); 178 aer_error_severity_string[info->severity], id);
163 } else { 179 goto out;
164 int layer, agent; 180 }
165 181
166 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 182 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
167 agent = AER_GET_AGENT(info->severity, info->status); 183 agent = AER_GET_AGENT(info->severity, info->status);
168 184
169 dev_err(&dev->dev, 185 dev_err(&dev->dev,
170 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 186 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
171 aer_error_severity_string[info->severity], 187 aer_error_severity_string[info->severity],
172 aer_error_layer[layer], id, aer_agent_string[agent]); 188 aer_error_layer[layer], id, aer_agent_string[agent]);
173 189
174 dev_err(&dev->dev, 190 dev_err(&dev->dev,
175 " device [%04x:%04x] error status/mask=%08x/%08x\n", 191 " device [%04x:%04x] error status/mask=%08x/%08x\n",
176 dev->vendor, dev->device, 192 dev->vendor, dev->device,
177 info->status, info->mask); 193 info->status, info->mask);
178 194
179 __aer_print_error(dev, info); 195 __aer_print_error(dev, info);
180
181 if (info->tlp_header_valid) {
182 unsigned char *tlp = (unsigned char *) &info->tlp;
183 dev_err(&dev->dev, " TLP Header:"
184 " %02x%02x%02x%02x %02x%02x%02x%02x"
185 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
186 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
187 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
188 *(tlp + 11), *(tlp + 10), *(tlp + 9),
189 *(tlp + 8), *(tlp + 15), *(tlp + 14),
190 *(tlp + 13), *(tlp + 12));
191 }
192 }
193 196
197 if (info->tlp_header_valid)
198 __print_tlp_header(dev, &info->tlp);
199
200out:
194 if (info->id && info->error_dev_num > 1 && info->id == id) 201 if (info->id && info->error_dev_num > 1 && info->id == id)
195 dev_err(&dev->dev, 202 dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id);
196 " Error of this Agent(%04x) is reported first\n", 203
197 id);
198 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), 204 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
199 info->severity); 205 info->severity);
200} 206}
@@ -228,6 +234,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,
228 const char **status_strs; 234 const char **status_strs;
229 235
230 aer_severity = cper_severity_to_aer(cper_severity); 236 aer_severity = cper_severity_to_aer(cper_severity);
237
231 if (aer_severity == AER_CORRECTABLE) { 238 if (aer_severity == AER_CORRECTABLE) {
232 status = aer->cor_status; 239 status = aer->cor_status;
233 mask = aer->cor_mask; 240 mask = aer->cor_mask;
@@ -240,28 +247,22 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,
240 status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); 247 status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
241 tlp_header_valid = status & AER_LOG_TLP_MASKS; 248 tlp_header_valid = status & AER_LOG_TLP_MASKS;
242 } 249 }
250
243 layer = AER_GET_LAYER_ERROR(aer_severity, status); 251 layer = AER_GET_LAYER_ERROR(aer_severity, status);
244 agent = AER_GET_AGENT(aer_severity, status); 252 agent = AER_GET_AGENT(aer_severity, status);
245 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", 253
246 status, mask); 254 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
247 cper_print_bits("", status, status_strs, status_strs_size); 255 cper_print_bits("", status, status_strs, status_strs_size);
248 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", 256 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
249 aer_error_layer[layer], aer_agent_string[agent]); 257 aer_error_layer[layer], aer_agent_string[agent]);
258
250 if (aer_severity != AER_CORRECTABLE) 259 if (aer_severity != AER_CORRECTABLE)
251 dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", 260 dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
252 aer->uncor_severity); 261 aer->uncor_severity);
253 if (tlp_header_valid) { 262
254 const unsigned char *tlp; 263 if (tlp_header_valid)
255 tlp = (const unsigned char *)&aer->header_log; 264 __print_tlp_header(dev, &aer->header_log);
256 dev_err(&dev->dev, "aer_tlp_header:" 265
257 " %02x%02x%02x%02x %02x%02x%02x%02x"
258 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
259 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
260 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
261 *(tlp + 11), *(tlp + 10), *(tlp + 9),
262 *(tlp + 8), *(tlp + 15), *(tlp + 14),
263 *(tlp + 13), *(tlp + 12));
264 }
265 trace_aer_event(dev_name(&dev->dev), (status & ~mask), 266 trace_aer_event(dev_name(&dev->dev), (status & ~mask),
266 aer_severity); 267 aer_severity);
267} 268}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 0b6e76604068..ce9d9ae17bfd 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -554,7 +554,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
554 if (pcie_ports_disabled) 554 if (pcie_ports_disabled)
555 return -ENODEV; 555 return -ENODEV;
556 556
557 new->driver.name = (char *)new->name; 557 new->driver.name = new->name;
558 new->driver.bus = &pcie_port_bus_type; 558 new->driver.bus = &pcie_port_bus_type;
559 new->driver.probe = pcie_port_probe_service; 559 new->driver.probe = pcie_port_probe_service;
560 new->driver.remove = pcie_port_remove_service; 560 new->driver.remove = pcie_port_remove_service;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 38e403dddf6e..12ec56c9a913 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1154,6 +1154,18 @@ static void pci_release_capabilities(struct pci_dev *dev)
1154 pci_free_cap_save_buffers(dev); 1154 pci_free_cap_save_buffers(dev);
1155} 1155}
1156 1156
1157static void pci_free_resources(struct pci_dev *dev)
1158{
1159 int i;
1160
1161 pci_cleanup_rom(dev);
1162 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1163 struct resource *res = dev->resource + i;
1164 if (res->parent)
1165 release_resource(res);
1166 }
1167}
1168
1157/** 1169/**
1158 * pci_release_dev - free a pci device structure when all users of it are finished. 1170 * pci_release_dev - free a pci device structure when all users of it are finished.
1159 * @dev: device that's been disconnected 1171 * @dev: device that's been disconnected
@@ -1163,9 +1175,14 @@ static void pci_release_capabilities(struct pci_dev *dev)
1163 */ 1175 */
1164static void pci_release_dev(struct device *dev) 1176static void pci_release_dev(struct device *dev)
1165{ 1177{
1166 struct pci_dev *pci_dev; 1178 struct pci_dev *pci_dev = to_pci_dev(dev);
1179
1180 down_write(&pci_bus_sem);
1181 list_del(&pci_dev->bus_list);
1182 up_write(&pci_bus_sem);
1183
1184 pci_free_resources(pci_dev);
1167 1185
1168 pci_dev = to_pci_dev(dev);
1169 pci_release_capabilities(pci_dev); 1186 pci_release_capabilities(pci_dev);
1170 pci_release_of_node(pci_dev); 1187 pci_release_of_node(pci_dev);
1171 pcibios_release_device(pci_dev); 1188 pcibios_release_device(pci_dev);
@@ -1381,8 +1398,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1381 dev->match_driver = false; 1398 dev->match_driver = false;
1382 ret = device_add(&dev->dev); 1399 ret = device_add(&dev->dev);
1383 WARN_ON(ret < 0); 1400 WARN_ON(ret < 0);
1384
1385 pci_proc_attach_device(dev);
1386} 1401}
1387 1402
1388struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) 1403struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 1576851028db..f452148e6d55 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -3,20 +3,6 @@
3#include <linux/pci-aspm.h> 3#include <linux/pci-aspm.h>
4#include "pci.h" 4#include "pci.h"
5 5
6static void pci_free_resources(struct pci_dev *dev)
7{
8 int i;
9
10 msi_remove_pci_irq_vectors(dev);
11
12 pci_cleanup_rom(dev);
13 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
14 struct resource *res = dev->resource + i;
15 if (res->parent)
16 release_resource(res);
17 }
18}
19
20static void pci_stop_dev(struct pci_dev *dev) 6static void pci_stop_dev(struct pci_dev *dev)
21{ 7{
22 pci_pme_active(dev, false); 8 pci_pme_active(dev, false);
@@ -24,7 +10,7 @@ static void pci_stop_dev(struct pci_dev *dev)
24 if (dev->is_added) { 10 if (dev->is_added) {
25 pci_proc_detach_device(dev); 11 pci_proc_detach_device(dev);
26 pci_remove_sysfs_dev_files(dev); 12 pci_remove_sysfs_dev_files(dev);
27 device_del(&dev->dev); 13 device_release_driver(&dev->dev);
28 dev->is_added = 0; 14 dev->is_added = 0;
29 } 15 }
30 16
@@ -34,11 +20,8 @@ static void pci_stop_dev(struct pci_dev *dev)
34 20
35static void pci_destroy_dev(struct pci_dev *dev) 21static void pci_destroy_dev(struct pci_dev *dev)
36{ 22{
37 down_write(&pci_bus_sem); 23 device_del(&dev->dev);
38 list_del(&dev->bus_list);
39 up_write(&pci_bus_sem);
40 24
41 pci_free_resources(dev);
42 put_device(&dev->dev); 25 put_device(&dev->dev);
43} 26}
44 27
@@ -126,7 +109,7 @@ void pci_stop_root_bus(struct pci_bus *bus)
126 pci_stop_bus_device(child); 109 pci_stop_bus_device(child);
127 110
128 /* stop the host bridge */ 111 /* stop the host bridge */
129 device_del(&host_bridge->dev); 112 device_release_driver(&host_bridge->dev);
130} 113}
131 114
132void pci_remove_root_bus(struct pci_bus *bus) 115void pci_remove_root_bus(struct pci_bus *bus)
@@ -145,5 +128,5 @@ void pci_remove_root_bus(struct pci_bus *bus)
145 host_bridge->bus = NULL; 128 host_bridge->bus = NULL;
146 129
147 /* remove the host bridge */ 130 /* remove the host bridge */
148 put_device(&host_bridge->dev); 131 device_unregister(&host_bridge->dev);
149} 132}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 219a4106480a..2e344a5581ae 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -538,7 +538,8 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
538 struct pci_bus_region region; 538 struct pci_bus_region region;
539 unsigned long io_mask; 539 unsigned long io_mask;
540 u8 io_base_lo, io_limit_lo; 540 u8 io_base_lo, io_limit_lo;
541 u32 l, io_upper16; 541 u16 l;
542 u32 io_upper16;
542 543
543 io_mask = PCI_IO_RANGE_MASK; 544 io_mask = PCI_IO_RANGE_MASK;
544 if (bridge->io_window_1k) 545 if (bridge->io_window_1k)
@@ -548,11 +549,10 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
548 res = bus->resource[0]; 549 res = bus->resource[0];
549 pcibios_resource_to_bus(bridge, &region, res); 550 pcibios_resource_to_bus(bridge, &region, res);
550 if (res->flags & IORESOURCE_IO) { 551 if (res->flags & IORESOURCE_IO) {
551 pci_read_config_dword(bridge, PCI_IO_BASE, &l); 552 pci_read_config_word(bridge, PCI_IO_BASE, &l);
552 l &= 0xffff0000;
553 io_base_lo = (region.start >> 8) & io_mask; 553 io_base_lo = (region.start >> 8) & io_mask;
554 io_limit_lo = (region.end >> 8) & io_mask; 554 io_limit_lo = (region.end >> 8) & io_mask;
555 l |= ((u32) io_limit_lo << 8) | io_base_lo; 555 l = ((u16) io_limit_lo << 8) | io_base_lo;
556 /* Set up upper 16 bits of I/O base/limit. */ 556 /* Set up upper 16 bits of I/O base/limit. */
557 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 557 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
558 dev_info(&bridge->dev, " bridge window %pR\n", res); 558 dev_info(&bridge->dev, " bridge window %pR\n", res);
@@ -564,7 +564,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
564 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 564 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
565 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 565 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
566 /* Update lower 16 bits of I/O base/limit. */ 566 /* Update lower 16 bits of I/O base/limit. */
567 pci_write_config_dword(bridge, PCI_IO_BASE, l); 567 pci_write_config_word(bridge, PCI_IO_BASE, l);
568 /* Update upper 16 bits of I/O base/limit. */ 568 /* Update upper 16 bits of I/O base/limit. */
569 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); 569 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
570} 570}
@@ -665,21 +665,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
665 665
666 pci_read_config_word(bridge, PCI_IO_BASE, &io); 666 pci_read_config_word(bridge, PCI_IO_BASE, &io);
667 if (!io) { 667 if (!io) {
668 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); 668 pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
669 pci_read_config_word(bridge, PCI_IO_BASE, &io); 669 pci_read_config_word(bridge, PCI_IO_BASE, &io);
670 pci_write_config_word(bridge, PCI_IO_BASE, 0x0); 670 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
671 } 671 }
672 if (io) 672 if (io)
673 b_res[0].flags |= IORESOURCE_IO; 673 b_res[0].flags |= IORESOURCE_IO;
674
674 /* DECchip 21050 pass 2 errata: the bridge may miss an address 675 /* DECchip 21050 pass 2 errata: the bridge may miss an address
675 disconnect boundary by one PCI data phase. 676 disconnect boundary by one PCI data phase.
676 Workaround: do not use prefetching on this device. */ 677 Workaround: do not use prefetching on this device. */
677 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) 678 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
678 return; 679 return;
680
679 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 681 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
680 if (!pmem) { 682 if (!pmem) {
681 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 683 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
682 0xfff0fff0); 684 0xffe0fff0);
683 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 685 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
684 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); 686 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
685 } 687 }
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
new file mode 100644
index 000000000000..7e1304d2e389
--- /dev/null
+++ b/drivers/pci/vc.c
@@ -0,0 +1,434 @@
1/*
2 * PCI Virtual Channel support
3 *
4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16#include <linux/pci_regs.h>
17#include <linux/types.h>
18
19/**
20 * pci_vc_save_restore_dwords - Save or restore a series of dwords
21 * @dev: device
22 * @pos: starting config space position
23 * @buf: buffer to save to or restore from
24 * @dwords: number of dwords to save/restore
25 * @save: whether to save or restore
26 */
27static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos,
28 u32 *buf, int dwords, bool save)
29{
30 int i;
31
32 for (i = 0; i < dwords; i++, buf++) {
33 if (save)
34 pci_read_config_dword(dev, pos + (i * 4), buf);
35 else
36 pci_write_config_dword(dev, pos + (i * 4), *buf);
37 }
38}
39
40/**
41 * pci_vc_load_arb_table - load and wait for VC arbitration table
42 * @dev: device
43 * @pos: starting position of VC capability (VC/VC9/MFVC)
44 *
45 * Set Load VC Arbitration Table bit requesting hardware to apply the VC
46 * Arbitration Table (previously loaded). When the VC Arbitration Table
47 * Status clears, hardware has latched the table into VC arbitration logic.
48 */
49static void pci_vc_load_arb_table(struct pci_dev *dev, int pos)
50{
51 u16 ctrl;
52
53 pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl);
54 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
55 ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE);
56 if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS,
57 PCI_VC_PORT_STATUS_TABLE))
58 return;
59
60 dev_err(&dev->dev, "VC arbitration table failed to load\n");
61}
62
63/**
64 * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table
65 * @dev: device
66 * @pos: starting position of VC capability (VC/VC9/MFVC)
67 * @res: VC resource number, ie. VCn (0-7)
68 *
69 * Set Load Port Arbitration Table bit requesting hardware to apply the Port
70 * Arbitration Table (previously loaded). When the Port Arbitration Table
71 * Status clears, hardware has latched the table into port arbitration logic.
72 */
73static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res)
74{
75 int ctrl_pos, status_pos;
76 u32 ctrl;
77
78 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
79 status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
80
81 pci_read_config_dword(dev, ctrl_pos, &ctrl);
82 pci_write_config_dword(dev, ctrl_pos,
83 ctrl | PCI_VC_RES_CTRL_LOAD_TABLE);
84
85 if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE))
86 return;
87
88 dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res);
89}
90
91/**
92 * pci_vc_enable - Enable virtual channel
93 * @dev: device
94 * @pos: starting position of VC capability (VC/VC9/MFVC)
95 * @res: VC res number, ie. VCn (0-7)
96 *
97 * A VC is enabled by setting the enable bit in matching resource control
98 * registers on both sides of a link. We therefore need to find the opposite
99 * end of the link. To keep this simple we enable from the downstream device.
100 * RC devices do not have an upstream device, nor does it seem that VC9 do
101 * (spec is unclear). Once we find the upstream device, match the VC ID to
102 * get the correct resource, disable and enable on both ends.
103 */
104static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
105{
106 int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2;
107 u32 ctrl, header, cap1, ctrl2;
108 struct pci_dev *link = NULL;
109
110 /* Enable VCs from the downstream device */
111 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
112 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
113 return;
114
115 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
116 status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
117
118 pci_read_config_dword(dev, ctrl_pos, &ctrl);
119 id = ctrl & PCI_VC_RES_CTRL_ID;
120
121 pci_read_config_dword(dev, pos, &header);
122
123 /* If there is no opposite end of the link, skip to enable */
124 if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 ||
125 pci_is_root_bus(dev->bus))
126 goto enable;
127
128 pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC);
129 if (!pos2)
130 goto enable;
131
132 pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1);
133 evcc = cap1 & PCI_VC_CAP1_EVCC;
134
135 /* VC0 is hardwired enabled, so we can start with 1 */
136 for (i = 1; i < evcc + 1; i++) {
137 ctrl_pos2 = pos2 + PCI_VC_RES_CTRL +
138 (i * PCI_CAP_VC_PER_VC_SIZEOF);
139 status_pos2 = pos2 + PCI_VC_RES_STATUS +
140 (i * PCI_CAP_VC_PER_VC_SIZEOF);
141 pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2);
142 if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) {
143 link = dev->bus->self;
144 break;
145 }
146 }
147
148 if (!link)
149 goto enable;
150
151 /* Disable if enabled */
152 if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) {
153 ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE;
154 pci_write_config_dword(link, ctrl_pos2, ctrl2);
155 }
156
157 /* Enable on both ends */
158 ctrl2 |= PCI_VC_RES_CTRL_ENABLE;
159 pci_write_config_dword(link, ctrl_pos2, ctrl2);
160enable:
161 ctrl |= PCI_VC_RES_CTRL_ENABLE;
162 pci_write_config_dword(dev, ctrl_pos, ctrl);
163
164 if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO))
165 dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id);
166
167 if (link && !pci_wait_for_pending(link, status_pos2,
168 PCI_VC_RES_STATUS_NEGO))
169 dev_err(&link->dev, "VC%d negotiation stuck pending\n", id);
170}
171
172/**
173 * pci_vc_do_save_buffer - Size, save, or restore VC state
174 * @dev: device
175 * @pos: starting position of VC capability (VC/VC9/MFVC)
176 * @save_state: buffer for save/restore
177 * @name: for error message
178 * @save: if provided a buffer, this indicates what to do with it
179 *
180 * Walking Virtual Channel config space to size, save, or restore it
181 * is complicated, so we do it all from one function to reduce code and
182 * guarantee ordering matches in the buffer. When called with NULL
183 * @save_state, return the size of the necessary save buffer. When called
184 * with a non-NULL @save_state, @save determines whether we save to the
185 * buffer or restore from it.
186 */
187static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
188 struct pci_cap_saved_state *save_state,
189 bool save)
190{
191 u32 cap1;
192 char evcc, lpevcc, parb_size;
193 int i, len = 0;
194 u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL;
195
196 /* Sanity check buffer size for save/restore */
197 if (buf && save_state->cap.size !=
198 pci_vc_do_save_buffer(dev, pos, NULL, save)) {
199 dev_err(&dev->dev,
200 "VC save buffer size does not match @0x%x\n", pos);
201 return -ENOMEM;
202 }
203
204 pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1);
205 /* Extended VC Count (not counting VC0) */
206 evcc = cap1 & PCI_VC_CAP1_EVCC;
207 /* Low Priority Extended VC Count (not counting VC0) */
208 lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4;
209 /* Port Arbitration Table Entry Size (bits) */
210 parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10);
211
212 /*
213 * Port VC Control Register contains VC Arbitration Select, which
214 * cannot be modified when more than one LPVC is in operation. We
215 * therefore save/restore it first, as only VC0 should be enabled
216 * after device reset.
217 */
218 if (buf) {
219 if (save)
220 pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL,
221 (u16 *)buf);
222 else
223 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
224 *(u16 *)buf);
225 buf += 2;
226 }
227 len += 2;
228
229 /*
230 * If we have any Low Priority VCs and a VC Arbitration Table Offset
231 * in Port VC Capability Register 2 then save/restore it next.
232 */
233 if (lpevcc) {
234 u32 cap2;
235 int vcarb_offset;
236
237 pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2);
238 vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16;
239
240 if (vcarb_offset) {
241 int size, vcarb_phases = 0;
242
243 if (cap2 & PCI_VC_CAP2_128_PHASE)
244 vcarb_phases = 128;
245 else if (cap2 & PCI_VC_CAP2_64_PHASE)
246 vcarb_phases = 64;
247 else if (cap2 & PCI_VC_CAP2_32_PHASE)
248 vcarb_phases = 32;
249
250 /* Fixed 4 bits per phase per lpevcc (plus VC0) */
251 size = ((lpevcc + 1) * vcarb_phases * 4) / 8;
252
253 if (size && buf) {
254 pci_vc_save_restore_dwords(dev,
255 pos + vcarb_offset,
256 (u32 *)buf,
257 size / 4, save);
258 /*
259 * On restore, we need to signal hardware to
260 * re-load the VC Arbitration Table.
261 */
262 if (!save)
263 pci_vc_load_arb_table(dev, pos);
264
265 buf += size;
266 }
267 len += size;
268 }
269 }
270
271 /*
272 * In addition to each VC Resource Control Register, we may have a
273 * Port Arbitration Table attached to each VC. The Port Arbitration
274 * Table Offset in each VC Resource Capability Register tells us if
275 * it exists. The entry size is global from the Port VC Capability
276 * Register1 above. The number of phases is determined per VC.
277 */
278 for (i = 0; i < evcc + 1; i++) {
279 u32 cap;
280 int parb_offset;
281
282 pci_read_config_dword(dev, pos + PCI_VC_RES_CAP +
283 (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap);
284 parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16;
285 if (parb_offset) {
286 int size, parb_phases = 0;
287
288 if (cap & PCI_VC_RES_CAP_256_PHASE)
289 parb_phases = 256;
290 else if (cap & (PCI_VC_RES_CAP_128_PHASE |
291 PCI_VC_RES_CAP_128_PHASE_TB))
292 parb_phases = 128;
293 else if (cap & PCI_VC_RES_CAP_64_PHASE)
294 parb_phases = 64;
295 else if (cap & PCI_VC_RES_CAP_32_PHASE)
296 parb_phases = 32;
297
298 size = (parb_size * parb_phases) / 8;
299
300 if (size && buf) {
301 pci_vc_save_restore_dwords(dev,
302 pos + parb_offset,
303 (u32 *)buf,
304 size / 4, save);
305 buf += size;
306 }
307 len += size;
308 }
309
310 /* VC Resource Control Register */
311 if (buf) {
312 int ctrl_pos = pos + PCI_VC_RES_CTRL +
313 (i * PCI_CAP_VC_PER_VC_SIZEOF);
314 if (save)
315 pci_read_config_dword(dev, ctrl_pos,
316 (u32 *)buf);
317 else {
318 u32 tmp, ctrl = *(u32 *)buf;
319 /*
320 * For an FLR case, the VC config may remain.
321 * Preserve enable bit, restore the rest.
322 */
323 pci_read_config_dword(dev, ctrl_pos, &tmp);
324 tmp &= PCI_VC_RES_CTRL_ENABLE;
325 tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE;
326 pci_write_config_dword(dev, ctrl_pos, tmp);
327 /* Load port arbitration table if used */
328 if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT)
329 pci_vc_load_port_arb_table(dev, pos, i);
330 /* Re-enable if needed */
331 if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE)
332 pci_vc_enable(dev, pos, i);
333 }
334 buf += 4;
335 }
336 len += 4;
337 }
338
339 return buf ? 0 : len;
340}
341
342static struct {
343 u16 id;
344 const char *name;
345} vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" },
346 { PCI_EXT_CAP_ID_VC, "VC" },
347 { PCI_EXT_CAP_ID_VC9, "VC9" } };
348
349/**
350 * pci_save_vc_state - Save VC state to pre-allocate save buffer
351 * @dev: device
352 *
353 * For each type of VC capability, VC/VC9/MFVC, find the capability and
354 * save it to the pre-allocated save buffer.
355 */
356int pci_save_vc_state(struct pci_dev *dev)
357{
358 int i;
359
360 for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
361 int pos, ret;
362 struct pci_cap_saved_state *save_state;
363
364 pos = pci_find_ext_capability(dev, vc_caps[i].id);
365 if (!pos)
366 continue;
367
368 save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
369 if (!save_state) {
370 dev_err(&dev->dev, "%s buffer not found in %s\n",
371 vc_caps[i].name, __func__);
372 return -ENOMEM;
373 }
374
375 ret = pci_vc_do_save_buffer(dev, pos, save_state, true);
376 if (ret) {
377 dev_err(&dev->dev, "%s save unsuccessful %s\n",
378 vc_caps[i].name, __func__);
379 return ret;
380 }
381 }
382
383 return 0;
384}
385
386/**
387 * pci_restore_vc_state - Restore VC state from save buffer
388 * @dev: device
389 *
390 * For each type of VC capability, VC/VC9/MFVC, find the capability and
391 * restore it from the previously saved buffer.
392 */
393void pci_restore_vc_state(struct pci_dev *dev)
394{
395 int i;
396
397 for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
398 int pos;
399 struct pci_cap_saved_state *save_state;
400
401 pos = pci_find_ext_capability(dev, vc_caps[i].id);
402 save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
403 if (!save_state || !pos)
404 continue;
405
406 pci_vc_do_save_buffer(dev, pos, save_state, false);
407 }
408}
409
410/**
411 * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps
412 * @dev: device
413 *
414 * For each type of VC capability, VC/VC9/MFVC, find the capability, size
415 * it, and allocate a buffer for save/restore.
416 */
417
418void pci_allocate_vc_save_buffers(struct pci_dev *dev)
419{
420 int i;
421
422 for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
423 int len, pos = pci_find_ext_capability(dev, vc_caps[i].id);
424
425 if (!pos)
426 continue;
427
428 len = pci_vc_do_save_buffer(dev, pos, NULL, false);
429 if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len))
430 dev_err(&dev->dev,
431 "unable to preallocate %s save buffer\n",
432 vc_caps[i].name);
433 }
434}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index ffd0632c3cbc..83cd1574c810 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -975,20 +975,20 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
975 int ret, evcc, phases, vc_arb; 975 int ret, evcc, phases, vc_arb;
976 int len = PCI_CAP_VC_BASE_SIZEOF; 976 int len = PCI_CAP_VC_BASE_SIZEOF;
977 977
978 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp); 978 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
979 if (ret) 979 if (ret)
980 return pcibios_err_to_errno(ret); 980 return pcibios_err_to_errno(ret);
981 981
982 evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */ 982 evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
983 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp); 983 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
984 if (ret) 984 if (ret)
985 return pcibios_err_to_errno(ret); 985 return pcibios_err_to_errno(ret);
986 986
987 if (tmp & PCI_VC_REG2_128_PHASE) 987 if (tmp & PCI_VC_CAP2_128_PHASE)
988 phases = 128; 988 phases = 128;
989 else if (tmp & PCI_VC_REG2_64_PHASE) 989 else if (tmp & PCI_VC_CAP2_64_PHASE)
990 phases = 64; 990 phases = 64;
991 else if (tmp & PCI_VC_REG2_32_PHASE) 991 else if (tmp & PCI_VC_CAP2_32_PHASE)
992 phases = 32; 992 phases = 32;
993 else 993 else
994 phases = 0; 994 phases = 0;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
198extern size_t vmcoreinfo_size; 198extern size_t vmcoreinfo_size;
199extern size_t vmcoreinfo_max_size; 199extern size_t vmcoreinfo_max_size;
200 200
201/* flag to track if kexec reboot is in progress */
202extern bool kexec_in_progress;
203
201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 204int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
202 unsigned long long *crash_size, unsigned long long *crash_base); 205 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, 206int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 009b02481436..92a2f991262a 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -60,10 +60,10 @@ void arch_teardown_msi_irq(unsigned int irq);
60int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 60int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
61void arch_teardown_msi_irqs(struct pci_dev *dev); 61void arch_teardown_msi_irqs(struct pci_dev *dev);
62int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); 62int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
63void arch_restore_msi_irqs(struct pci_dev *dev, int irq); 63void arch_restore_msi_irqs(struct pci_dev *dev);
64 64
65void default_teardown_msi_irqs(struct pci_dev *dev); 65void default_teardown_msi_irqs(struct pci_dev *dev);
66void default_restore_msi_irqs(struct pci_dev *dev, int irq); 66void default_restore_msi_irqs(struct pci_dev *dev);
67u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 67u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
68u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); 68u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
69 69
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1084a15175e0..f16fb1f01317 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -224,7 +224,8 @@ enum pci_bus_speed {
224}; 224};
225 225
226struct pci_cap_saved_data { 226struct pci_cap_saved_data {
227 char cap_nr; 227 u16 cap_nr;
228 bool cap_extended;
228 unsigned int size; 229 unsigned int size;
229 u32 data[0]; 230 u32 data[0];
230}; 231};
@@ -938,6 +939,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev);
938void pci_msi_off(struct pci_dev *dev); 939void pci_msi_off(struct pci_dev *dev);
939int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); 940int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
940int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); 941int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
942int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
941int pci_wait_for_pending_transaction(struct pci_dev *dev); 943int pci_wait_for_pending_transaction(struct pci_dev *dev);
942int pcix_get_max_mmrbc(struct pci_dev *dev); 944int pcix_get_max_mmrbc(struct pci_dev *dev);
943int pcix_get_mmrbc(struct pci_dev *dev); 945int pcix_get_mmrbc(struct pci_dev *dev);
@@ -976,6 +978,12 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
976int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); 978int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state);
977int pci_load_and_free_saved_state(struct pci_dev *dev, 979int pci_load_and_free_saved_state(struct pci_dev *dev,
978 struct pci_saved_state **state); 980 struct pci_saved_state **state);
981struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
982struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
983 u16 cap);
984int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
985int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
986 u16 cap, unsigned int size);
979int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); 987int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
980int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 988int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
981pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 989pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@@ -997,6 +1005,11 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
997 return __pci_enable_wake(dev, state, false, enable); 1005 return __pci_enable_wake(dev, state, false, enable);
998} 1006}
999 1007
1008/* PCI Virtual Channel */
1009int pci_save_vc_state(struct pci_dev *dev);
1010void pci_restore_vc_state(struct pci_dev *dev);
1011void pci_allocate_vc_save_buffers(struct pci_dev *dev);
1012
1000#define PCI_EXP_IDO_REQUEST (1<<0) 1013#define PCI_EXP_IDO_REQUEST (1<<0)
1001#define PCI_EXP_IDO_COMPLETION (1<<1) 1014#define PCI_EXP_IDO_COMPLETION (1<<1)
1002void pci_enable_ido(struct pci_dev *dev, unsigned long type); 1015void pci_enable_ido(struct pci_dev *dev, unsigned long type);
@@ -1567,65 +1580,65 @@ enum pci_fixup_pass {
1567/* Anonymous variables would be nice... */ 1580/* Anonymous variables would be nice... */
1568#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 1581#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
1569 class_shift, hook) \ 1582 class_shift, hook) \
1570 static const struct pci_fixup __pci_fixup_##name __used \ 1583 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
1571 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 1584 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
1572 = { vendor, device, class, class_shift, hook }; 1585 = { vendor, device, class, class_shift, hook };
1573 1586
1574#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 1587#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
1575 class_shift, hook) \ 1588 class_shift, hook) \
1576 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1589 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1577 vendor##device##hook, vendor, device, class, class_shift, hook) 1590 hook, vendor, device, class, class_shift, hook)
1578#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 1591#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
1579 class_shift, hook) \ 1592 class_shift, hook) \
1580 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1593 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1581 vendor##device##hook, vendor, device, class, class_shift, hook) 1594 hook, vendor, device, class, class_shift, hook)
1582#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 1595#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
1583 class_shift, hook) \ 1596 class_shift, hook) \
1584 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1597 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1585 vendor##device##hook, vendor, device, class, class_shift, hook) 1598 hook, vendor, device, class, class_shift, hook)
1586#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 1599#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
1587 class_shift, hook) \ 1600 class_shift, hook) \
1588 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1601 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1589 vendor##device##hook, vendor, device, class, class_shift, hook) 1602 hook, vendor, device, class, class_shift, hook)
1590#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1603#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1591 class_shift, hook) \ 1604 class_shift, hook) \
1592 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1605 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1593 resume##vendor##device##hook, vendor, device, class, \ 1606 resume##hook, vendor, device, class, \
1594 class_shift, hook) 1607 class_shift, hook)
1595#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1608#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1596 class_shift, hook) \ 1609 class_shift, hook) \
1597 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1610 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1598 resume_early##vendor##device##hook, vendor, device, \ 1611 resume_early##hook, vendor, device, \
1599 class, class_shift, hook) 1612 class, class_shift, hook)
1600#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1613#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1601 class_shift, hook) \ 1614 class_shift, hook) \
1602 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1615 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1603 suspend##vendor##device##hook, vendor, device, class, \ 1616 suspend##hook, vendor, device, class, \
1604 class_shift, hook) 1617 class_shift, hook)
1605 1618
1606#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1619#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1607 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1620 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1608 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1621 hook, vendor, device, PCI_ANY_ID, 0, hook)
1609#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 1622#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1610 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1623 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1611 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1624 hook, vendor, device, PCI_ANY_ID, 0, hook)
1612#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 1625#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1613 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1626 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1614 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1627 hook, vendor, device, PCI_ANY_ID, 0, hook)
1615#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 1628#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1616 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1629 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1617 vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) 1630 hook, vendor, device, PCI_ANY_ID, 0, hook)
1618#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1631#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1619 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1632 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1620 resume##vendor##device##hook, vendor, device, \ 1633 resume##hook, vendor, device, \
1621 PCI_ANY_ID, 0, hook) 1634 PCI_ANY_ID, 0, hook)
1622#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1635#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1623 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1636 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1624 resume_early##vendor##device##hook, vendor, device, \ 1637 resume_early##hook, vendor, device, \
1625 PCI_ANY_ID, 0, hook) 1638 PCI_ANY_ID, 0, hook)
1626#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1639#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1627 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1640 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1628 suspend##vendor##device##hook, vendor, device, \ 1641 suspend##hook, vendor, device, \
1629 PCI_ANY_ID, 0, hook) 1642 PCI_ANY_ID, 0, hook)
1630 1643
1631#ifdef CONFIG_PCI_QUIRKS 1644#ifdef CONFIG_PCI_QUIRKS
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a98e85438a7..ab6b4e7f6657 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -518,8 +518,16 @@
518#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ 518#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
519#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ 519#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
520#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ 520#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
521#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */
522#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
523#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */
521#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ 524#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */
525#define PCI_EXP_SLTCTL_PWR_IND_ON 0x0100 /* Power Indicator on */
526#define PCI_EXP_SLTCTL_PWR_IND_BLINK 0x0200 /* Power Indicator blinking */
527#define PCI_EXP_SLTCTL_PWR_IND_OFF 0x0300 /* Power Indicator off */
522#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ 528#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */
529#define PCI_EXP_SLTCTL_PWR_ON 0x0000 /* Power On */
530#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
523#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ 531#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
524#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ 532#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
525#define PCI_EXP_SLTSTA 26 /* Slot Status */ 533#define PCI_EXP_SLTSTA 26 /* Slot Status */
@@ -677,17 +685,34 @@
677#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ 685#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
678 686
679/* Virtual Channel */ 687/* Virtual Channel */
680#define PCI_VC_PORT_REG1 4 688#define PCI_VC_PORT_CAP1 4
681#define PCI_VC_REG1_EVCC 0x7 /* extended VC count */ 689#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
682#define PCI_VC_PORT_REG2 8 690#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
683#define PCI_VC_REG2_32_PHASE 0x2 691#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
684#define PCI_VC_REG2_64_PHASE 0x4 692#define PCI_VC_PORT_CAP2 8
685#define PCI_VC_REG2_128_PHASE 0x8 693#define PCI_VC_CAP2_32_PHASE 0x00000002
694#define PCI_VC_CAP2_64_PHASE 0x00000004
695#define PCI_VC_CAP2_128_PHASE 0x00000008
696#define PCI_VC_CAP2_ARB_OFF 0xff000000
686#define PCI_VC_PORT_CTRL 12 697#define PCI_VC_PORT_CTRL 12
698#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
687#define PCI_VC_PORT_STATUS 14 699#define PCI_VC_PORT_STATUS 14
700#define PCI_VC_PORT_STATUS_TABLE 0x00000001
688#define PCI_VC_RES_CAP 16 701#define PCI_VC_RES_CAP 16
702#define PCI_VC_RES_CAP_32_PHASE 0x00000002
703#define PCI_VC_RES_CAP_64_PHASE 0x00000004
704#define PCI_VC_RES_CAP_128_PHASE 0x00000008
705#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
706#define PCI_VC_RES_CAP_256_PHASE 0x00000020
707#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
689#define PCI_VC_RES_CTRL 20 708#define PCI_VC_RES_CTRL 20
709#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
710#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
711#define PCI_VC_RES_CTRL_ID 0x07000000
712#define PCI_VC_RES_CTRL_ENABLE 0x80000000
690#define PCI_VC_RES_STATUS 26 713#define PCI_VC_RES_STATUS 26
714#define PCI_VC_RES_STATUS_TABLE 0x00000001
715#define PCI_VC_RES_STATUS_NEGO 0x00000002
691#define PCI_CAP_VC_BASE_SIZEOF 0x10 716#define PCI_CAP_VC_BASE_SIZEOF 0x10
692#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C 717#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
693 718
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..d0d8fca54065 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47size_t vmcoreinfo_size; 47size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 49
50/* Flag to indicate we are going to kexec a new kernel */
51bool kexec_in_progress = false;
52
50/* Location of the reserved area for the crash kernel */ 53/* Location of the reserved area for the crash kernel */
51struct resource crashk_res = { 54struct resource crashk_res = {
52 .name = "Crash kernel", 55 .name = "Crash kernel",
@@ -1675,6 +1678,7 @@ int kernel_kexec(void)
1675 } else 1678 } else
1676#endif 1679#endif
1677 { 1680 {
1681 kexec_in_progress = true;
1678 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1679 printk(KERN_EMERG "Starting new kernel\n"); 1683 printk(KERN_EMERG "Starting new kernel\n");
1680 machine_shutdown(); 1684 machine_shutdown();
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..5690b8eabfbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2840,19 +2840,6 @@ already_gone:
2840 return false; 2840 return false;
2841} 2841}
2842 2842
2843static bool __flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 if (start_flush_work(work, &barr)) {
2848 wait_for_completion(&barr.done);
2849 destroy_work_on_stack(&barr.work);
2850 return true;
2851 } else {
2852 return false;
2853 }
2854}
2855
2856/** 2843/**
2857 * flush_work - wait for a work to finish executing the last queueing instance 2844 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2845 * @work: the work to flush
@@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)
2866 */ 2853 */
2867bool flush_work(struct work_struct *work) 2854bool flush_work(struct work_struct *work)
2868{ 2855{
2856 struct wq_barrier barr;
2857
2869 lock_map_acquire(&work->lockdep_map); 2858 lock_map_acquire(&work->lockdep_map);
2870 lock_map_release(&work->lockdep_map); 2859 lock_map_release(&work->lockdep_map);
2871 2860
2872 return __flush_work(work); 2861 if (start_flush_work(work, &barr)) {
2862 wait_for_completion(&barr.done);
2863 destroy_work_on_stack(&barr.work);
2864 return true;
2865 } else {
2866 return false;
2867 }
2873} 2868}
2874EXPORT_SYMBOL_GPL(flush_work); 2869EXPORT_SYMBOL_GPL(flush_work);
2875 2870
@@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4814 4809
4815 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4810 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4816 schedule_work_on(cpu, &wfc.work); 4811 schedule_work_on(cpu, &wfc.work);
4817 4812 flush_work(&wfc.work);
4818 /*
4819 * The work item is on-stack and can't lead to deadlock through
4820 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4821 * when work_on_cpu()s are nested.
4822 */
4823 __flush_work(&wfc.work);
4824
4825 return wfc.ret; 4813 return wfc.ret;
4826} 4814}
4827EXPORT_SYMBOL_GPL(work_on_cpu); 4815EXPORT_SYMBOL_GPL(work_on_cpu);