summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt22
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt60
-rw-r--r--Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt58
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/alpha/kernel/console.c1
-rw-r--r--arch/alpha/kernel/pci.c2
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/arm/include/asm/pci.h5
-rw-r--r--arch/arm/kernel/bios32.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig1
-rw-r--r--arch/mips/jazz/setup.c8
-rw-r--r--arch/mips/mti-malta/malta-setup.c10
-rw-r--r--arch/powerpc/kernel/pci-common.c10
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c20
-rw-r--r--arch/powerpc/kernel/pci_32.c3
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c7
-rw-r--r--arch/powerpc/platforms/maple/time.c2
-rw-r--r--arch/powerpc/platforms/powermac/feature.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c6
-rw-r--r--arch/powerpc/sysdev/i8259.c6
-rw-r--r--arch/powerpc/sysdev/mv64x60_pci.c4
-rw-r--r--arch/x86/pci/irq.c3
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/ata/pata_ali.c3
-rw-r--r--drivers/block/DAC960.c38
-rw-r--r--drivers/block/DAC960.h4
-rw-r--r--drivers/char/agp/nvidia-agp.c12
-rw-r--r--drivers/char/agp/sworks-agp.c3
-rw-r--r--drivers/firmware/edd.c8
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c16
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c4
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c10
-rw-r--r--drivers/ide/sl82c105.c5
-rw-r--r--drivers/infiniband/hw/qedr/main.c59
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/amd_iommu_init.c9
-rw-r--r--drivers/iommu/amd_iommu_v2.c3
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c18
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h2
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c36
-rw-r--r--drivers/of/Kconfig16
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/address.c9
-rw-r--r--drivers/of/of_pci.c384
-rw-r--r--drivers/of/of_pci_irq.c131
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile14
-rw-r--r--drivers/pci/access.c8
-rw-r--r--drivers/pci/bus.c4
-rw-r--r--drivers/pci/cadence/Kconfig27
-rw-r--r--drivers/pci/cadence/Makefile4
-rw-r--r--drivers/pci/cadence/pcie-cadence-ep.c542
-rw-r--r--drivers/pci/cadence/pcie-cadence-host.c336
-rw-r--r--drivers/pci/cadence/pcie-cadence.c126
-rw-r--r--drivers/pci/cadence/pcie-cadence.h311
-rw-r--r--drivers/pci/dwc/Kconfig68
-rw-r--r--drivers/pci/dwc/Makefile6
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c42
-rw-r--r--drivers/pci/dwc/pci-exynos.c222
-rw-r--r--drivers/pci/dwc/pci-keystone.c5
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c470
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c127
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c15
-rw-r--r--drivers/pci/dwc/pcie-designware.c2
-rw-r--r--drivers/pci/dwc/pcie-designware.h36
-rw-r--r--drivers/pci/dwc/pcie-qcom.c4
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c41
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c65
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c72
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-host-common.c74
-rw-r--r--drivers/pci/host/pci-tegra.c156
-rw-r--r--drivers/pci/host/pci-versatile.c2
-rw-r--r--drivers/pci/host/pci-xgene.c1
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c7
-rw-r--r--drivers/pci/host/pcie-iproc.c8
-rw-r--r--drivers/pci/host/pcie-iproc.h2
-rw-r--r--drivers/pci/host/pcie-rcar.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c18
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c20
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c60
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c38
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c12
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c12
-rw-r--r--drivers/pci/hotplug/pnv_php.c39
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c36
-rw-r--r--drivers/pci/hotplug/shpchp.h8
-rw-r--r--drivers/pci/hotplug/shpchp_core.c5
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c12
-rw-r--r--drivers/pci/iov.c10
-rw-r--r--drivers/pci/irq.c7
-rw-r--r--drivers/pci/msi.c7
-rw-r--r--drivers/pci/of.c565
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-stub.c2
-rw-r--r--drivers/pci/pci-sysfs.c30
-rw-r--r--drivers/pci/pci.c157
-rw-r--r--drivers/pci/pci.h74
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c15
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c26
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c22
-rw-r--r--drivers/pci/pcie/aspm.c80
-rw-r--r--drivers/pci/pcie/pcie-dpc.c251
-rw-r--r--drivers/pci/pcie/pme.c6
-rw-r--r--drivers/pci/pcie/portdrv_core.c4
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/probe.c371
-rw-r--r--drivers/pci/quirks.c210
-rw-r--r--drivers/pci/rom.c8
-rw-r--r--drivers/pci/setup-bus.c48
-rw-r--r--drivers/pci/setup-irq.c4
-rw-r--r--drivers/pci/setup-res.c40
-rw-r--r--drivers/pci/switch/switchtec.c3
-rw-r--r--drivers/pci/syscall.c4
-rw-r--r--drivers/pci/vc.c18
-rw-r--r--drivers/pci/xen-pcifront.c22
-rw-r--r--drivers/sbus/char/openprom.c5
-rw-r--r--drivers/video/console/vgacon.c34
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c4
-rw-r--r--drivers/video/fbdev/nvidia/nv_hw.c11
-rw-r--r--drivers/video/fbdev/nvidia/nv_setup.c3
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/riva/nv_driver.c7
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c20
-rw-r--r--drivers/video/fbdev/riva/riva_hw.h3
-rw-r--r--include/linux/of_pci.h8
-rw-r--r--include/linux/pci-aspm.h35
-rw-r--r--include/linux/pci-dma-compat.h27
-rw-r--r--include/linux/pci-epc.h43
-rw-r--r--include/linux/pci.h441
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/switchtec.h3
-rw-r--r--include/uapi/linux/pci_regs.h30
-rw-r--r--include/uapi/linux/switchtec_ioctl.h3
-rw-r--r--kernel/resource.c29
158 files changed, 4350 insertions, 2596 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6571fbfdb2a1..78cdf6a637fc 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3675,7 +3675,11 @@
3675 [KNL, SMP] Set scheduler's default relax_domain_level. 3675 [KNL, SMP] Set scheduler's default relax_domain_level.
3676 See Documentation/cgroup-v1/cpusets.txt. 3676 See Documentation/cgroup-v1/cpusets.txt.
3677 3677
3678 reserve= [KNL,BUGS] Force the kernel to ignore some iomem area 3678 reserve= [KNL,BUGS] Force kernel to ignore I/O ports or memory
3679 Format: <base1>,<size1>[,<base2>,<size2>,...]
3680 Reserve I/O ports or memory so the kernel won't use
3681 them. If <base> is less than 0x10000, the region
3682 is assumed to be I/O ports; otherwise it is memory.
3679 3683
3680 reservetop= [X86-32] 3684 reservetop= [X86-32]
3681 Format: nn[KMG] 3685 Format: nn[KMG]
diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
index 4e4aee4439ea..979dc7b6cfe8 100644
--- a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
@@ -4,7 +4,10 @@ This PCIe host controller is based on the Synopsys DesignWare PCIe IP
4and thus inherits all the common properties defined in designware-pcie.txt. 4and thus inherits all the common properties defined in designware-pcie.txt.
5 5
6Required properties: 6Required properties:
7- compatible: "axis,artpec6-pcie", "snps,dw-pcie" 7- compatible: "axis,artpec6-pcie", "snps,dw-pcie" for ARTPEC-6 in RC mode;
8 "axis,artpec6-pcie-ep", "snps,dw-pcie" for ARTPEC-6 in EP mode;
9 "axis,artpec7-pcie", "snps,dw-pcie" for ARTPEC-7 in RC mode;
10 "axis,artpec7-pcie-ep", "snps,dw-pcie" for ARTPEC-7 in EP mode;
8- reg: base addresses and lengths of the PCIe controller (DBI), 11- reg: base addresses and lengths of the PCIe controller (DBI),
9 the PHY controller, and configuration address space. 12 the PHY controller, and configuration address space.
10- reg-names: Must include the following entries: 13- reg-names: Must include the following entries:
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
new file mode 100644
index 000000000000..9a305237fa6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
@@ -0,0 +1,22 @@
1* Cadence PCIe endpoint controller
2
3Required properties:
4- compatible: Should contain "cdns,cdns-pcie-ep" to identify the IP used.
5- reg: Should contain the controller register base address and AXI interface
6 region base address respectively.
7- reg-names: Must be "reg" and "mem" respectively.
8- cdns,max-outbound-regions: Set to maximum number of outbound regions
9
10Optional properties:
11- max-functions: Maximum number of functions that can be configured (default 1).
12
13Example:
14
15pcie@fc000000 {
16 compatible = "cdns,cdns-pcie-ep";
17 reg = <0x0 0xfc000000 0x0 0x01000000>,
18 <0x0 0x80000000 0x0 0x40000000>;
19 reg-names = "reg", "mem";
20 cdns,max-outbound-regions = <16>;
21 max-functions = /bits/ 8 <8>;
22};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
new file mode 100644
index 000000000000..20a33f38f69d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
@@ -0,0 +1,60 @@
1* Cadence PCIe host controller
2
3This PCIe controller inherits the base properties defined in
4host-generic-pci.txt.
5
6Required properties:
7- compatible: Should contain "cdns,cdns-pcie-host" to identify the IP used.
8- reg: Should contain the controller register base address, PCIe configuration
9 window base address, and AXI interface region base address respectively.
10- reg-names: Must be "reg", "cfg" and "mem" respectively.
11- #address-cells: Set to <3>
12- #size-cells: Set to <2>
13- device_type: Set to "pci"
14- ranges: Ranges for the PCI memory and I/O regions
15- #interrupt-cells: Set to <1>
16- interrupt-map-mask and interrupt-map: Standard PCI properties to define the
17 mapping of the PCIe interface to interrupt numbers.
18
19Optional properties:
20- cdns,max-outbound-regions: Set to maximum number of outbound regions
21 (default 32)
22- cdns,no-bar-match-nbits: Set into the no BAR match register to configure the
23 number of least significant bits kept during inbound (PCIe -> AXI) address
24 translations (default 32)
25- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
26- device-id: The PCI device ID (16 bits, default is design dependent)
27
28Example:
29
30pcie@fb000000 {
31 compatible = "cdns,cdns-pcie-host";
32 device_type = "pci";
33 #address-cells = <3>;
34 #size-cells = <2>;
35 bus-range = <0x0 0xff>;
36 linux,pci-domain = <0>;
37 cdns,max-outbound-regions = <16>;
38 cdns,no-bar-match-nbits = <32>;
39 vendor-id = /bits/ 16 <0x17cd>;
40 device-id = /bits/ 16 <0x0200>;
41
42 reg = <0x0 0xfb000000 0x0 0x01000000>,
43 <0x0 0x41000000 0x0 0x00001000>,
44 <0x0 0x40000000 0x0 0x04000000>;
45 reg-names = "reg", "cfg", "mem";
46
47 ranges = <0x02000000 0x0 0x42000000 0x0 0x42000000 0x0 0x1000000>,
48 <0x01000000 0x0 0x43000000 0x0 0x43000000 0x0 0x0010000>;
49
50 #interrupt-cells = <0x1>;
51
52 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 14 0x1
53 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 15 0x1
54 0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 16 0x1
55 0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 17 0x1>;
56
57 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
58
59 msi-parent = <&its_pci>;
60};
diff --git a/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt b/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt
index 34a11bfbfb60..651d957d1051 100644
--- a/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt
@@ -6,9 +6,6 @@ and thus inherits all the common properties defined in designware-pcie.txt.
6Required properties: 6Required properties:
7- compatible: "samsung,exynos5440-pcie" 7- compatible: "samsung,exynos5440-pcie"
8- reg: base addresses and lengths of the PCIe controller, 8- reg: base addresses and lengths of the PCIe controller,
9 the PHY controller, additional register for the PHY controller.
10 (Registers for the PHY controller are DEPRECATED.
11 Use the PHY framework.)
12- reg-names : First name should be set to "elbi". 9- reg-names : First name should be set to "elbi".
13 And use the "config" instead of getting the configuration address space 10 And use the "config" instead of getting the configuration address space
14 from "ranges". 11 from "ranges".
@@ -23,49 +20,8 @@ For other common properties, refer to
23 20
24Example: 21Example:
25 22
26SoC-specific DT Entry: 23SoC-specific DT Entry (with using PHY framework):
27 24
28 pcie@290000 {
29 compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
30 reg = <0x290000 0x1000
31 0x270000 0x1000
32 0x271000 0x40>;
33 interrupts = <0 20 0>, <0 21 0>, <0 22 0>;
34 clocks = <&clock 28>, <&clock 27>;
35 clock-names = "pcie", "pcie_bus";
36 #address-cells = <3>;
37 #size-cells = <2>;
38 device_type = "pci";
39 ranges = <0x00000800 0 0x40000000 0x40000000 0 0x00001000 /* configuration space */
40 0x81000000 0 0 0x40001000 0 0x00010000 /* downstream I/O */
41 0x82000000 0 0x40011000 0x40011000 0 0x1ffef000>; /* non-prefetchable memory */
42 #interrupt-cells = <1>;
43 interrupt-map-mask = <0 0 0 0>;
44 interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
45 num-lanes = <4>;
46 };
47
48 pcie@2a0000 {
49 compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
50 reg = <0x2a0000 0x1000
51 0x272000 0x1000
52 0x271040 0x40>;
53 interrupts = <0 23 0>, <0 24 0>, <0 25 0>;
54 clocks = <&clock 29>, <&clock 27>;
55 clock-names = "pcie", "pcie_bus";
56 #address-cells = <3>;
57 #size-cells = <2>;
58 device_type = "pci";
59 ranges = <0x00000800 0 0x60000000 0x60000000 0 0x00001000 /* configuration space */
60 0x81000000 0 0 0x60001000 0 0x00010000 /* downstream I/O */
61 0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>; /* non-prefetchable memory */
62 #interrupt-cells = <1>;
63 interrupt-map-mask = <0 0 0 0>;
64 interrupt-map = <0 0 0 0 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
65 num-lanes = <4>;
66 };
67
68With using PHY framework:
69 pcie_phy0: pcie-phy@270000 { 25 pcie_phy0: pcie-phy@270000 {
70 ... 26 ...
71 reg = <0x270000 0x1000>, <0x271000 0x40>; 27 reg = <0x270000 0x1000>, <0x271000 0x40>;
@@ -74,13 +30,21 @@ With using PHY framework:
74 }; 30 };
75 31
76 pcie@290000 { 32 pcie@290000 {
77 ... 33 compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
78 reg = <0x290000 0x1000>, <0x40000000 0x1000>; 34 reg = <0x290000 0x1000>, <0x40000000 0x1000>;
79 reg-names = "elbi", "config"; 35 reg-names = "elbi", "config";
36 clocks = <&clock 28>, <&clock 27>;
37 clock-names = "pcie", "pcie_bus";
38 #address-cells = <3>;
39 #size-cells = <2>;
40 device_type = "pci";
80 phys = <&pcie_phy0>; 41 phys = <&pcie_phy0>;
81 ranges = <0x81000000 0 0 0x60001000 0 0x00010000 42 ranges = <0x81000000 0 0 0x60001000 0 0x00010000
82 0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>; 43 0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>;
83 ... 44 #interrupt-cells = <1>;
45 interrupt-map-mask = <0 0 0 0>;
46 interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
47 num-lanes = <4>;
84 }; 48 };
85 49
86Board-specific DT Entry: 50Board-specific DT Entry:
diff --git a/MAINTAINERS b/MAINTAINERS
index a6e86e20761e..cc24c74a946e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10402,6 +10402,13 @@ S: Maintained
10402F: Documentation/devicetree/bindings/pci/pci-armada8k.txt 10402F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
10403F: drivers/pci/dwc/pcie-armada8k.c 10403F: drivers/pci/dwc/pcie-armada8k.c
10404 10404
10405PCI DRIVER FOR CADENCE PCIE IP
10406M: Alan Douglas <adouglas@cadence.com>
10407L: linux-pci@vger.kernel.org
10408S: Maintained
10409F: Documentation/devicetree/bindings/pci/cdns,*.txt
10410F: drivers/pci/cadence/pcie-cadence*
10411
10405PCI DRIVER FOR FREESCALE LAYERSCAPE 10412PCI DRIVER FOR FREESCALE LAYERSCAPE
10406M: Minghuan Lian <minghuan.Lian@freescale.com> 10413M: Minghuan Lian <minghuan.Lian@freescale.com>
10407M: Mingkai Hu <mingkai.hu@freescale.com> 10414M: Mingkai Hu <mingkai.hu@freescale.com>
@@ -10552,8 +10559,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
10552S: Supported 10559S: Supported
10553F: Documentation/devicetree/bindings/pci/ 10560F: Documentation/devicetree/bindings/pci/
10554F: Documentation/PCI/ 10561F: Documentation/PCI/
10562F: drivers/acpi/pci*
10555F: drivers/pci/ 10563F: drivers/pci/
10564F: include/asm-generic/pci*
10556F: include/linux/pci* 10565F: include/linux/pci*
10566F: include/uapi/linux/pci*
10567F: lib/pci*
10557F: arch/x86/pci/ 10568F: arch/x86/pci/
10558F: arch/x86/kernel/quirks.c 10569F: arch/x86/kernel/quirks.c
10559 10570
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
index 8e9a41966881..5476279329a6 100644
--- a/arch/alpha/kernel/console.c
+++ b/arch/alpha/kernel/console.c
@@ -21,6 +21,7 @@
21struct pci_controller *pci_vga_hose; 21struct pci_controller *pci_vga_hose;
22static struct resource alpha_vga = { 22static struct resource alpha_vga = {
23 .name = "alpha-vga+", 23 .name = "alpha-vga+",
24 .flags = IORESOURCE_IO,
24 .start = 0x3C0, 25 .start = 0x3C0,
25 .end = 0x3DF 26 .end = 0x3DF
26}; 27};
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 87da00579946..2e86ebb680ae 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -425,7 +425,7 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
425 if (bus == 0 && dfn == 0) { 425 if (bus == 0 && dfn == 0) {
426 hose = pci_isa_hose; 426 hose = pci_isa_hose;
427 } else { 427 } else {
428 dev = pci_get_bus_and_slot(bus, dfn); 428 dev = pci_get_domain_bus_and_slot(0, bus, dfn);
429 if (!dev) 429 if (!dev)
430 return -ENODEV; 430 return -ENODEV;
431 hose = dev->sysdata; 431 hose = dev->sysdata;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 239dc0e601d5..ff4f54b86c7f 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -237,7 +237,7 @@ nautilus_init_pci(void)
237 bus = hose->bus = bridge->bus; 237 bus = hose->bus = bridge->bus;
238 pcibios_claim_one_bus(bus); 238 pcibios_claim_one_bus(bus);
239 239
240 irongate = pci_get_bus_and_slot(0, 0); 240 irongate = pci_get_domain_bus_and_slot(pci_domain_nr(bus), 0, 0);
241 bus->self = irongate; 241 bus->self = irongate;
242 bus->resource[0] = &irongate_io; 242 bus->resource[0] = &irongate_io;
243 bus->resource[1] = &irongate_mem; 243 bus->resource[1] = &irongate_mem;
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 960d9dc4f380..1f0de808d111 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -10,10 +10,7 @@ extern unsigned long pcibios_min_io;
10extern unsigned long pcibios_min_mem; 10extern unsigned long pcibios_min_mem;
11#define PCIBIOS_MIN_MEM pcibios_min_mem 11#define PCIBIOS_MIN_MEM pcibios_min_mem
12 12
13static inline int pcibios_assign_all_busses(void) 13#define pcibios_assign_all_busses() pci_has_flag(PCI_REASSIGN_ALL_BUS)
14{
15 return pci_has_flag(PCI_REASSIGN_ALL_RSRC);
16}
17 14
18#ifdef CONFIG_PCI_DOMAINS 15#ifdef CONFIG_PCI_DOMAINS
19static inline int pci_proc_domain(struct pci_bus *bus) 16static inline int pci_proc_domain(struct pci_bus *bus)
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 0cd0aefb3a8f..ed46ca69813d 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -527,7 +527,7 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
527 struct pci_sys_data *sys; 527 struct pci_sys_data *sys;
528 LIST_HEAD(head); 528 LIST_HEAD(head);
529 529
530 pci_add_flags(PCI_REASSIGN_ALL_RSRC); 530 pci_add_flags(PCI_REASSIGN_ALL_BUS);
531 if (hw->preinit) 531 if (hw->preinit)
532 hw->preinit(); 532 hw->preinit();
533 pcibios_init_hw(parent, hw, &head); 533 pcibios_init_hw(parent, hw, &head);
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 9b49867154bf..6b32dc527edc 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -10,7 +10,6 @@ menuconfig ARCH_MVEBU
10 select ZONE_DMA if ARM_LPAE 10 select ZONE_DMA if ARM_LPAE
11 select GPIOLIB 11 select GPIOLIB
12 select PCI_QUIRKS if PCI 12 select PCI_QUIRKS if PCI
13 select OF_ADDRESS_PCI
14 13
15if ARCH_MVEBU 14if ARCH_MVEBU
16 15
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index e4374a5651ce..448fd41792e4 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -32,22 +32,22 @@ static struct resource jazz_io_resources[] = {
32 .start = 0x00, 32 .start = 0x00,
33 .end = 0x1f, 33 .end = 0x1f,
34 .name = "dma1", 34 .name = "dma1",
35 .flags = IORESOURCE_BUSY 35 .flags = IORESOURCE_IO | IORESOURCE_BUSY
36 }, { 36 }, {
37 .start = 0x40, 37 .start = 0x40,
38 .end = 0x5f, 38 .end = 0x5f,
39 .name = "timer", 39 .name = "timer",
40 .flags = IORESOURCE_BUSY 40 .flags = IORESOURCE_IO | IORESOURCE_BUSY
41 }, { 41 }, {
42 .start = 0x80, 42 .start = 0x80,
43 .end = 0x8f, 43 .end = 0x8f,
44 .name = "dma page reg", 44 .name = "dma page reg",
45 .flags = IORESOURCE_BUSY 45 .flags = IORESOURCE_IO | IORESOURCE_BUSY
46 }, { 46 }, {
47 .start = 0xc0, 47 .start = 0xc0,
48 .end = 0xdf, 48 .end = 0xdf,
49 .name = "dma2", 49 .name = "dma2",
50 .flags = IORESOURCE_BUSY 50 .flags = IORESOURCE_IO | IORESOURCE_BUSY
51 } 51 }
52}; 52};
53 53
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index de34adb76157..7b63914d2e58 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -47,31 +47,31 @@ static struct resource standard_io_resources[] = {
47 .name = "dma1", 47 .name = "dma1",
48 .start = 0x00, 48 .start = 0x00,
49 .end = 0x1f, 49 .end = 0x1f,
50 .flags = IORESOURCE_BUSY 50 .flags = IORESOURCE_IO | IORESOURCE_BUSY
51 }, 51 },
52 { 52 {
53 .name = "timer", 53 .name = "timer",
54 .start = 0x40, 54 .start = 0x40,
55 .end = 0x5f, 55 .end = 0x5f,
56 .flags = IORESOURCE_BUSY 56 .flags = IORESOURCE_IO | IORESOURCE_BUSY
57 }, 57 },
58 { 58 {
59 .name = "keyboard", 59 .name = "keyboard",
60 .start = 0x60, 60 .start = 0x60,
61 .end = 0x6f, 61 .end = 0x6f,
62 .flags = IORESOURCE_BUSY 62 .flags = IORESOURCE_IO | IORESOURCE_BUSY
63 }, 63 },
64 { 64 {
65 .name = "dma page reg", 65 .name = "dma page reg",
66 .start = 0x80, 66 .start = 0x80,
67 .end = 0x8f, 67 .end = 0x8f,
68 .flags = IORESOURCE_BUSY 68 .flags = IORESOURCE_IO | IORESOURCE_BUSY
69 }, 69 },
70 { 70 {
71 .name = "dma2", 71 .name = "dma2",
72 .start = 0xc0, 72 .start = 0xc0,
73 .end = 0xdf, 73 .end = 0xdf,
74 .flags = IORESOURCE_BUSY 74 .flags = IORESOURCE_IO | IORESOURCE_BUSY
75 }, 75 },
76}; 76};
77 77
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0ac7aa346c69..344af823c3c4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -339,8 +339,7 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
339 */ 339 */
340static int pci_read_irq_line(struct pci_dev *pci_dev) 340static int pci_read_irq_line(struct pci_dev *pci_dev)
341{ 341{
342 struct of_phandle_args oirq; 342 unsigned int virq = 0;
343 unsigned int virq;
344 343
345 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); 344 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
346 345
@@ -348,7 +347,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
348 memset(&oirq, 0xff, sizeof(oirq)); 347 memset(&oirq, 0xff, sizeof(oirq));
349#endif 348#endif
350 /* Try to get a mapping from the device-tree */ 349 /* Try to get a mapping from the device-tree */
351 if (of_irq_parse_pci(pci_dev, &oirq)) { 350 if (!of_irq_parse_and_map_pci(pci_dev, 0, 0)) {
352 u8 line, pin; 351 u8 line, pin;
353 352
354 /* If that fails, lets fallback to what is in the config 353 /* If that fails, lets fallback to what is in the config
@@ -372,11 +371,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
372 virq = irq_create_mapping(NULL, line); 371 virq = irq_create_mapping(NULL, line);
373 if (virq) 372 if (virq)
374 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 373 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
375 } else {
376 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %pOF\n",
377 oirq.args_count, oirq.args[0], oirq.args[1], oirq.np);
378
379 virq = irq_create_of_mapping(&oirq);
380 } 374 }
381 375
382 if (!virq) { 376 if (!virq) {
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 2d71269e7dc1..cf47b1aec4c2 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -104,7 +104,7 @@ EXPORT_SYMBOL_GPL(pci_hp_remove_devices);
104 */ 104 */
105void pci_hp_add_devices(struct pci_bus *bus) 105void pci_hp_add_devices(struct pci_bus *bus)
106{ 106{
107 int slotno, mode, pass, max; 107 int slotno, mode, max;
108 struct pci_dev *dev; 108 struct pci_dev *dev;
109 struct pci_controller *phb; 109 struct pci_controller *phb;
110 struct device_node *dn = pci_bus_to_OF_node(bus); 110 struct device_node *dn = pci_bus_to_OF_node(bus);
@@ -133,13 +133,17 @@ void pci_hp_add_devices(struct pci_bus *bus)
133 pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); 133 pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
134 pcibios_setup_bus_devices(bus); 134 pcibios_setup_bus_devices(bus);
135 max = bus->busn_res.start; 135 max = bus->busn_res.start;
136 for (pass = 0; pass < 2; pass++) { 136 /*
137 list_for_each_entry(dev, &bus->devices, bus_list) { 137 * Scan bridges that are already configured. We don't touch
138 if (pci_is_bridge(dev)) 138 * them unless they are misconfigured (which will be done in
139 max = pci_scan_bridge(bus, dev, 139 * the second scan below).
140 max, pass); 140 */
141 } 141 for_each_pci_bridge(dev, bus)
142 } 142 max = pci_scan_bridge(bus, dev, max, 0);
143
144 /* Scan bridges that need to be reconfigured */
145 for_each_pci_bridge(dev, bus)
146 max = pci_scan_bridge(bus, dev, max, 1);
143 } 147 }
144 pcibios_finish_adding_to_bus(bus); 148 pcibios_finish_adding_to_bus(bus);
145} 149}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 1d817f4d97d9..85ad2f78b889 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -96,7 +96,8 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
96 reg = of_get_property(node, "reg", NULL); 96 reg = of_get_property(node, "reg", NULL);
97 if (!reg) 97 if (!reg)
98 continue; 98 continue;
99 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); 99 dev = pci_get_domain_bus_and_slot(0, pci_bus,
100 ((reg[0] >> 8) & 0xff));
100 if (!dev || !dev->subordinate) { 101 if (!dev || !dev->subordinate) {
101 pci_dev_put(dev); 102 pci_dev_put(dev);
102 continue; 103 continue;
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 0d790f8432d2..8bdaa2a6fa62 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -369,11 +369,8 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
369 pcibios_setup_bus_devices(bus); 369 pcibios_setup_bus_devices(bus);
370 370
371 /* Now scan child busses */ 371 /* Now scan child busses */
372 list_for_each_entry(dev, &bus->devices, bus_list) { 372 for_each_pci_bridge(dev, bus)
373 if (pci_is_bridge(dev)) { 373 of_scan_pci_bridge(dev);
374 of_scan_pci_bridge(dev);
375 }
376 }
377} 374}
378 375
379/** 376/**
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c
index 81799d70a1ee..cfddc87f81bf 100644
--- a/arch/powerpc/platforms/maple/time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -134,7 +134,7 @@ int maple_set_rtc_time(struct rtc_time *tm)
134 134
135static struct resource rtc_iores = { 135static struct resource rtc_iores = {
136 .name = "rtc", 136 .name = "rtc",
137 .flags = IORESOURCE_BUSY, 137 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
138}; 138};
139 139
140unsigned long __init maple_get_boot_time(void) 140unsigned long __init maple_get_boot_time(void)
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 9e3f39d36e88..ed8b16694153 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -829,7 +829,7 @@ core99_ata100_enable(struct device_node *node, long value)
829 829
830 if (value) { 830 if (value) {
831 if (pci_device_from_OF_node(node, &pbus, &pid) == 0) 831 if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
832 pdev = pci_get_bus_and_slot(pbus, pid); 832 pdev = pci_get_domain_bus_and_slot(0, pbus, pid);
833 if (pdev == NULL) 833 if (pdev == NULL)
834 return 0; 834 return 0;
835 rc = pci_enable_device(pdev); 835 rc = pci_enable_device(pdev);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 4650fb294e7a..2f7cd0ef3cdc 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1654,14 +1654,14 @@ static int pnv_eeh_restore_vf_config(struct pci_dn *pdn)
1654 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 1654 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1655 2, devctl); 1655 2, devctl);
1656 1656
1657 /* Disable Completion Timeout */ 1657 /* Disable Completion Timeout if possible */
1658 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2, 1658 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
1659 4, &cap2); 1659 4, &cap2);
1660 if (cap2 & 0x10) { 1660 if (cap2 & PCI_EXP_DEVCAP2_COMP_TMOUT_DIS) {
1661 eeh_ops->read_config(pdn, 1661 eeh_ops->read_config(pdn,
1662 edev->pcie_cap + PCI_EXP_DEVCTL2, 1662 edev->pcie_cap + PCI_EXP_DEVCTL2,
1663 4, &cap2); 1663 4, &cap2);
1664 cap2 |= 0x10; 1664 cap2 |= PCI_EXP_DEVCTL2_COMP_TMOUT_DIS;
1665 eeh_ops->write_config(pdn, 1665 eeh_ops->write_config(pdn,
1666 edev->pcie_cap + PCI_EXP_DEVCTL2, 1666 edev->pcie_cap + PCI_EXP_DEVCTL2,
1667 4, cap2); 1667 4, cap2);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index bafb014e1a7e..cb9a8b71fd0f 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -145,21 +145,21 @@ static struct resource pic1_iores = {
145 .name = "8259 (master)", 145 .name = "8259 (master)",
146 .start = 0x20, 146 .start = 0x20,
147 .end = 0x21, 147 .end = 0x21,
148 .flags = IORESOURCE_BUSY, 148 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
149}; 149};
150 150
151static struct resource pic2_iores = { 151static struct resource pic2_iores = {
152 .name = "8259 (slave)", 152 .name = "8259 (slave)",
153 .start = 0xa0, 153 .start = 0xa0,
154 .end = 0xa1, 154 .end = 0xa1,
155 .flags = IORESOURCE_BUSY, 155 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
156}; 156};
157 157
158static struct resource pic_edgectrl_iores = { 158static struct resource pic_edgectrl_iores = {
159 .name = "8259 edge control", 159 .name = "8259 edge control",
160 .start = 0x4d0, 160 .start = 0x4d0,
161 .end = 0x4d1, 161 .end = 0x4d1,
162 .flags = IORESOURCE_BUSY, 162 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
163}; 163};
164 164
165static int i8259_host_match(struct irq_domain *h, struct device_node *node, 165static int i8259_host_match(struct irq_domain *h, struct device_node *node,
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index d52b3b81e05f..6fe9104632a0 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -37,7 +37,7 @@ static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj,
37 if (count < MV64X60_VAL_LEN_MAX) 37 if (count < MV64X60_VAL_LEN_MAX)
38 return -EINVAL; 38 return -EINVAL;
39 39
40 phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 40 phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
41 if (!phb) 41 if (!phb)
42 return -ENODEV; 42 return -ENODEV;
43 pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v); 43 pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v);
@@ -61,7 +61,7 @@ static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
61 if (sscanf(buf, "%i", &v) != 1) 61 if (sscanf(buf, "%i", &v) != 1)
62 return -EINVAL; 62 return -EINVAL;
63 63
64 phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 64 phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
65 if (!phb) 65 if (!phb)
66 return -ENODEV; 66 return -ENODEV;
67 pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v); 67 pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v);
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 0452629148be..52e55108404e 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -839,7 +839,8 @@ static void __init pirq_find_router(struct irq_router *r)
839 DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n", 839 DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n",
840 rt->rtr_vendor, rt->rtr_device); 840 rt->rtr_vendor, rt->rtr_device);
841 841
842 pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); 842 pirq_router_dev = pci_get_domain_bus_and_slot(0, rt->rtr_bus,
843 rt->rtr_devfn);
843 if (!pirq_router_dev) { 844 if (!pirq_router_dev) {
844 DBG(KERN_DEBUG "PCI: Interrupt router not found at " 845 DBG(KERN_DEBUG "PCI: Interrupt router not found at "
845 "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); 846 "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c4b3646bd04c..9542a746dc50 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -409,10 +409,8 @@ int __init pci_xen_init(void)
409 pcibios_enable_irq = xen_pcifront_enable_irq; 409 pcibios_enable_irq = xen_pcifront_enable_irq;
410 pcibios_disable_irq = NULL; 410 pcibios_disable_irq = NULL;
411 411
412#ifdef CONFIG_ACPI
413 /* Keep ACPI out of the picture */ 412 /* Keep ACPI out of the picture */
414 acpi_noirq = 1; 413 acpi_noirq_set();
415#endif
416 414
417#ifdef CONFIG_PCI_MSI 415#ifdef CONFIG_PCI_MSI
418 x86_msi.setup_msi_irqs = xen_setup_msi_irqs; 416 x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
diff --git a/drivers/Makefile b/drivers/Makefile
index e06f7f633f73..8189b1edec00 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -16,10 +16,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
16obj-$(CONFIG_GPIOLIB) += gpio/ 16obj-$(CONFIG_GPIOLIB) += gpio/
17obj-y += pwm/ 17obj-y += pwm/
18 18
19obj-$(CONFIG_PCI) += pci/ 19obj-y += pci/
20obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
21# PCI dwc controller drivers
22obj-y += pci/dwc/
23 20
24obj-$(CONFIG_PARISC) += parisc/ 21obj-$(CONFIG_PARISC) += parisc/
25obj-$(CONFIG_RAPIDIO) += rapidio/ 22obj-$(CONFIG_RAPIDIO) += rapidio/
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index d19cd88ed2d3..0b122f903b8a 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -466,7 +466,8 @@ static void ali_init_chipset(struct pci_dev *pdev)
466 tmp |= 0x01; /* CD_ROM enable for DMA */ 466 tmp |= 0x01; /* CD_ROM enable for DMA */
467 pci_write_config_byte(pdev, 0x53, tmp); 467 pci_write_config_byte(pdev, 0x53, tmp);
468 } 468 }
469 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 469 north = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0,
470 PCI_DEVFN(0, 0));
470 if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) { 471 if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
471 /* Configure the ALi bridge logic. For non ALi rely on BIOS. 472 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
472 Set the south bridge enable bit */ 473 Set the south bridge enable bit */
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 442e777bdfb2..a7dbd184af86 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -268,17 +268,17 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
268 void *AllocationPointer = NULL; 268 void *AllocationPointer = NULL;
269 void *ScatterGatherCPU = NULL; 269 void *ScatterGatherCPU = NULL;
270 dma_addr_t ScatterGatherDMA; 270 dma_addr_t ScatterGatherDMA;
271 struct pci_pool *ScatterGatherPool; 271 struct dma_pool *ScatterGatherPool;
272 void *RequestSenseCPU = NULL; 272 void *RequestSenseCPU = NULL;
273 dma_addr_t RequestSenseDMA; 273 dma_addr_t RequestSenseDMA;
274 struct pci_pool *RequestSensePool = NULL; 274 struct dma_pool *RequestSensePool = NULL;
275 275
276 if (Controller->FirmwareType == DAC960_V1_Controller) 276 if (Controller->FirmwareType == DAC960_V1_Controller)
277 { 277 {
278 CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker); 278 CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker);
279 CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize; 279 CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize;
280 ScatterGatherPool = pci_pool_create("DAC960_V1_ScatterGather", 280 ScatterGatherPool = dma_pool_create("DAC960_V1_ScatterGather",
281 Controller->PCIDevice, 281 &Controller->PCIDevice->dev,
282 DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T), 282 DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T),
283 sizeof(DAC960_V1_ScatterGatherSegment_T), 0); 283 sizeof(DAC960_V1_ScatterGatherSegment_T), 0);
284 if (ScatterGatherPool == NULL) 284 if (ScatterGatherPool == NULL)
@@ -290,18 +290,18 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
290 { 290 {
291 CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker); 291 CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker);
292 CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize; 292 CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize;
293 ScatterGatherPool = pci_pool_create("DAC960_V2_ScatterGather", 293 ScatterGatherPool = dma_pool_create("DAC960_V2_ScatterGather",
294 Controller->PCIDevice, 294 &Controller->PCIDevice->dev,
295 DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T), 295 DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T),
296 sizeof(DAC960_V2_ScatterGatherSegment_T), 0); 296 sizeof(DAC960_V2_ScatterGatherSegment_T), 0);
297 if (ScatterGatherPool == NULL) 297 if (ScatterGatherPool == NULL)
298 return DAC960_Failure(Controller, 298 return DAC960_Failure(Controller,
299 "AUXILIARY STRUCTURE CREATION (SG)"); 299 "AUXILIARY STRUCTURE CREATION (SG)");
300 RequestSensePool = pci_pool_create("DAC960_V2_RequestSense", 300 RequestSensePool = dma_pool_create("DAC960_V2_RequestSense",
301 Controller->PCIDevice, sizeof(DAC960_SCSI_RequestSense_T), 301 &Controller->PCIDevice->dev, sizeof(DAC960_SCSI_RequestSense_T),
302 sizeof(int), 0); 302 sizeof(int), 0);
303 if (RequestSensePool == NULL) { 303 if (RequestSensePool == NULL) {
304 pci_pool_destroy(ScatterGatherPool); 304 dma_pool_destroy(ScatterGatherPool);
305 return DAC960_Failure(Controller, 305 return DAC960_Failure(Controller,
306 "AUXILIARY STRUCTURE CREATION (SG)"); 306 "AUXILIARY STRUCTURE CREATION (SG)");
307 } 307 }
@@ -335,16 +335,16 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
335 Command->Next = Controller->FreeCommands; 335 Command->Next = Controller->FreeCommands;
336 Controller->FreeCommands = Command; 336 Controller->FreeCommands = Command;
337 Controller->Commands[CommandIdentifier-1] = Command; 337 Controller->Commands[CommandIdentifier-1] = Command;
338 ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC, 338 ScatterGatherCPU = dma_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
339 &ScatterGatherDMA); 339 &ScatterGatherDMA);
340 if (ScatterGatherCPU == NULL) 340 if (ScatterGatherCPU == NULL)
341 return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION"); 341 return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
342 342
343 if (RequestSensePool != NULL) { 343 if (RequestSensePool != NULL) {
344 RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC, 344 RequestSenseCPU = dma_pool_alloc(RequestSensePool, GFP_ATOMIC,
345 &RequestSenseDMA); 345 &RequestSenseDMA);
346 if (RequestSenseCPU == NULL) { 346 if (RequestSenseCPU == NULL) {
347 pci_pool_free(ScatterGatherPool, ScatterGatherCPU, 347 dma_pool_free(ScatterGatherPool, ScatterGatherCPU,
348 ScatterGatherDMA); 348 ScatterGatherDMA);
349 return DAC960_Failure(Controller, 349 return DAC960_Failure(Controller,
350 "AUXILIARY STRUCTURE CREATION"); 350 "AUXILIARY STRUCTURE CREATION");
@@ -379,8 +379,8 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
379static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller) 379static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
380{ 380{
381 int i; 381 int i;
382 struct pci_pool *ScatterGatherPool = Controller->ScatterGatherPool; 382 struct dma_pool *ScatterGatherPool = Controller->ScatterGatherPool;
383 struct pci_pool *RequestSensePool = NULL; 383 struct dma_pool *RequestSensePool = NULL;
384 void *ScatterGatherCPU; 384 void *ScatterGatherCPU;
385 dma_addr_t ScatterGatherDMA; 385 dma_addr_t ScatterGatherDMA;
386 void *RequestSenseCPU; 386 void *RequestSenseCPU;
@@ -411,9 +411,9 @@ static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
411 RequestSenseDMA = Command->V2.RequestSenseDMA; 411 RequestSenseDMA = Command->V2.RequestSenseDMA;
412 } 412 }
413 if (ScatterGatherCPU != NULL) 413 if (ScatterGatherCPU != NULL)
414 pci_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA); 414 dma_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
415 if (RequestSenseCPU != NULL) 415 if (RequestSenseCPU != NULL)
416 pci_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA); 416 dma_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
417 417
418 if ((Command->CommandIdentifier 418 if ((Command->CommandIdentifier
419 % Controller->CommandAllocationGroupSize) == 1) { 419 % Controller->CommandAllocationGroupSize) == 1) {
@@ -437,13 +437,11 @@ static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
437 Controller->CurrentStatusBuffer = NULL; 437 Controller->CurrentStatusBuffer = NULL;
438 } 438 }
439 439
440 if (ScatterGatherPool != NULL) 440 dma_pool_destroy(ScatterGatherPool);
441 pci_pool_destroy(ScatterGatherPool);
442 if (Controller->FirmwareType == DAC960_V1_Controller) 441 if (Controller->FirmwareType == DAC960_V1_Controller)
443 return; 442 return;
444 443
445 if (RequestSensePool != NULL) 444 dma_pool_destroy(RequestSensePool);
446 pci_pool_destroy(RequestSensePool);
447 445
448 for (i = 0; i < DAC960_MaxLogicalDrives; i++) { 446 for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
449 kfree(Controller->V2.LogicalDeviceInformation[i]); 447 kfree(Controller->V2.LogicalDeviceInformation[i]);
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
index 6a6226a2b932..21aff470d268 100644
--- a/drivers/block/DAC960.h
+++ b/drivers/block/DAC960.h
@@ -2316,7 +2316,7 @@ typedef struct DAC960_Controller
2316 bool SuppressEnclosureMessages; 2316 bool SuppressEnclosureMessages;
2317 struct timer_list MonitoringTimer; 2317 struct timer_list MonitoringTimer;
2318 struct gendisk *disks[DAC960_MaxLogicalDrives]; 2318 struct gendisk *disks[DAC960_MaxLogicalDrives];
2319 struct pci_pool *ScatterGatherPool; 2319 struct dma_pool *ScatterGatherPool;
2320 DAC960_Command_T *FreeCommands; 2320 DAC960_Command_T *FreeCommands;
2321 unsigned char *CombinedStatusBuffer; 2321 unsigned char *CombinedStatusBuffer;
2322 unsigned char *CurrentStatusBuffer; 2322 unsigned char *CurrentStatusBuffer;
@@ -2429,7 +2429,7 @@ typedef struct DAC960_Controller
2429 bool NeedDeviceSerialNumberInformation; 2429 bool NeedDeviceSerialNumberInformation;
2430 bool StartLogicalDeviceInformationScan; 2430 bool StartLogicalDeviceInformationScan;
2431 bool StartPhysicalDeviceInformationScan; 2431 bool StartPhysicalDeviceInformationScan;
2432 struct pci_pool *RequestSensePool; 2432 struct dma_pool *RequestSensePool;
2433 2433
2434 dma_addr_t FirstCommandMailboxDMA; 2434 dma_addr_t FirstCommandMailboxDMA;
2435 DAC960_V2_CommandMailbox_T *FirstCommandMailbox; 2435 DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 828b34445203..623205bcd04a 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -340,11 +340,17 @@ static int agp_nvidia_probe(struct pci_dev *pdev,
340 u8 cap_ptr; 340 u8 cap_ptr;
341 341
342 nvidia_private.dev_1 = 342 nvidia_private.dev_1 =
343 pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); 343 pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
344 (unsigned int)pdev->bus->number,
345 PCI_DEVFN(0, 1));
344 nvidia_private.dev_2 = 346 nvidia_private.dev_2 =
345 pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2)); 347 pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
348 (unsigned int)pdev->bus->number,
349 PCI_DEVFN(0, 2));
346 nvidia_private.dev_3 = 350 nvidia_private.dev_3 =
347 pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0)); 351 pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
352 (unsigned int)pdev->bus->number,
353 PCI_DEVFN(30, 0));
348 354
349 if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) { 355 if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
350 printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 " 356 printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 03be4ac79b0d..4dbdd3bc9bb8 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -474,7 +474,8 @@ static int agp_serverworks_probe(struct pci_dev *pdev,
474 } 474 }
475 475
476 /* Everything is on func 1 here so we are hardcoding function one */ 476 /* Everything is on func 1 here so we are hardcoding function one */
477 bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, 477 bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
478 (unsigned int)pdev->bus->number,
478 PCI_DEVFN(0, 1)); 479 PCI_DEVFN(0, 1));
479 if (!bridge_dev) { 480 if (!bridge_dev) {
480 dev_info(&pdev->dev, "can't find secondary device\n"); 481 dev_info(&pdev->dev, "can't find secondary device\n");
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index e22957665808..60a8f1363a10 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -669,10 +669,10 @@ edd_get_pci_dev(struct edd_device *edev)
669 struct edd_info *info = edd_dev_get_info(edev); 669 struct edd_info *info = edd_dev_get_info(edev);
670 670
671 if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) { 671 if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
672 return pci_get_bus_and_slot(info->params.interface_path.pci.bus, 672 return pci_get_domain_bus_and_slot(0,
673 PCI_DEVFN(info->params.interface_path.pci.slot, 673 info->params.interface_path.pci.bus,
674 info->params.interface_path.pci. 674 PCI_DEVFN(info->params.interface_path.pci.slot,
675 function)); 675 info->params.interface_path.pci.function));
676 } 676 }
677 return NULL; 677 return NULL;
678} 678}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 14042a64bdd5..6bc8e6640d71 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -719,8 +719,9 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
719 * executes only devices which are in domain 0. Furthermore, the 719 * executes only devices which are in domain 0. Furthermore, the
720 * iBFT spec doesn't have a domain id field :-( 720 * iBFT spec doesn't have a domain id field :-(
721 */ 721 */
722 pci_dev = pci_get_bus_and_slot((nic->pci_bdf & 0xff00) >> 8, 722 pci_dev = pci_get_domain_bus_and_slot(0,
723 (nic->pci_bdf & 0xff)); 723 (nic->pci_bdf & 0xff00) >> 8,
724 (nic->pci_bdf & 0xff));
724 if (pci_dev) { 725 if (pci_dev) {
725 rc = sysfs_create_link(&boot_kobj->kobj, 726 rc = sysfs_create_link(&boot_kobj->kobj,
726 &pci_dev->dev.kobj, "device"); 727 &pci_dev->dev.kobj, "device");
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 8745971a7680..3a3bf752e03a 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -185,21 +185,22 @@ static int cdv_backlight_init(struct drm_device *dev)
185 * for this and the MID devices. 185 * for this and the MID devices.
186 */ 186 */
187 187
188static inline u32 CDV_MSG_READ32(uint port, uint offset) 188static inline u32 CDV_MSG_READ32(int domain, uint port, uint offset)
189{ 189{
190 int mcr = (0x10<<24) | (port << 16) | (offset << 8); 190 int mcr = (0x10<<24) | (port << 16) | (offset << 8);
191 uint32_t ret_val = 0; 191 uint32_t ret_val = 0;
192 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 192 struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
193 pci_write_config_dword(pci_root, 0xD0, mcr); 193 pci_write_config_dword(pci_root, 0xD0, mcr);
194 pci_read_config_dword(pci_root, 0xD4, &ret_val); 194 pci_read_config_dword(pci_root, 0xD4, &ret_val);
195 pci_dev_put(pci_root); 195 pci_dev_put(pci_root);
196 return ret_val; 196 return ret_val;
197} 197}
198 198
199static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value) 199static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
200 u32 value)
200{ 201{
201 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; 202 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
202 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 203 struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
203 pci_write_config_dword(pci_root, 0xD4, value); 204 pci_write_config_dword(pci_root, 0xD4, value);
204 pci_write_config_dword(pci_root, 0xD0, mcr); 205 pci_write_config_dword(pci_root, 0xD0, mcr);
205 pci_dev_put(pci_root); 206 pci_dev_put(pci_root);
@@ -216,11 +217,12 @@ static void cdv_init_pm(struct drm_device *dev)
216{ 217{
217 struct drm_psb_private *dev_priv = dev->dev_private; 218 struct drm_psb_private *dev_priv = dev->dev_private;
218 u32 pwr_cnt; 219 u32 pwr_cnt;
220 int domain = pci_domain_nr(dev->pdev->bus);
219 int i; 221 int i;
220 222
221 dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, 223 dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
222 PSB_APMBA) & 0xFFFF; 224 PSB_APMBA) & 0xFFFF;
223 dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, 225 dev_priv->ospm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
224 PSB_OSPMBA) & 0xFFFF; 226 PSB_OSPMBA) & 0xFFFF;
225 227
226 /* Power status */ 228 /* Power status */
@@ -251,7 +253,7 @@ static void cdv_errata(struct drm_device *dev)
251 * Bonus Launch to work around the issue, by degrading 253 * Bonus Launch to work around the issue, by degrading
252 * performance. 254 * performance.
253 */ 255 */
254 CDV_MSG_WRITE32(3, 0x30, 0x08027108); 256 CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
255} 257}
256 258
257/** 259/**
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 4a295f9ba067..a7fb6de4dd15 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -19,7 +19,9 @@
19void gma_get_core_freq(struct drm_device *dev) 19void gma_get_core_freq(struct drm_device *dev)
20{ 20{
21 uint32_t clock; 21 uint32_t clock;
22 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 22 struct pci_dev *pci_root =
23 pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
24 0, 0);
23 struct drm_psb_private *dev_priv = dev->dev_private; 25 struct drm_psb_private *dev_priv = dev->dev_private;
24 26
25 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/ 27 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 1fa163373a47..7171b7475f58 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -32,7 +32,9 @@
32static void mid_get_fuse_settings(struct drm_device *dev) 32static void mid_get_fuse_settings(struct drm_device *dev)
33{ 33{
34 struct drm_psb_private *dev_priv = dev->dev_private; 34 struct drm_psb_private *dev_priv = dev->dev_private;
35 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 35 struct pci_dev *pci_root =
36 pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
37 0, 0);
36 uint32_t fuse_value = 0; 38 uint32_t fuse_value = 0;
37 uint32_t fuse_value_tmp = 0; 39 uint32_t fuse_value_tmp = 0;
38 40
@@ -104,7 +106,9 @@ static void mid_get_fuse_settings(struct drm_device *dev)
104static void mid_get_pci_revID(struct drm_psb_private *dev_priv) 106static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
105{ 107{
106 uint32_t platform_rev_id = 0; 108 uint32_t platform_rev_id = 0;
107 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); 109 int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
110 struct pci_dev *pci_gfx_root =
111 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
108 112
109 if (pci_gfx_root == NULL) { 113 if (pci_gfx_root == NULL) {
110 WARN_ON(1); 114 WARN_ON(1);
@@ -281,7 +285,9 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
281 u32 addr; 285 u32 addr;
282 u8 __iomem *vbt_virtual; 286 u8 __iomem *vbt_virtual;
283 struct mid_vbt_header vbt_header; 287 struct mid_vbt_header vbt_header;
284 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); 288 struct pci_dev *pci_gfx_root =
289 pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
290 0, PCI_DEVFN(2, 0));
285 int ret = -1; 291 int ret = -1;
286 292
287 /* Get the address of the platform config vbt */ 293 /* Get the address of the platform config vbt */
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 37a3be71acd9..99d6527923de 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -261,7 +261,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
261 goto out_err; 261 goto out_err;
262 262
263 if (IS_MRST(dev)) { 263 if (IS_MRST(dev)) {
264 dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0)); 264 int domain = pci_domain_nr(dev->pdev->bus);
265
266 dev_priv->aux_pdev =
267 pci_get_domain_bus_and_slot(domain, 0,
268 PCI_DEVFN(3, 0));
265 269
266 if (dev_priv->aux_pdev) { 270 if (dev_priv->aux_pdev) {
267 resource_start = pci_resource_start(dev_priv->aux_pdev, 271 resource_start = pci_resource_start(dev_priv->aux_pdev,
@@ -281,7 +285,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
281 } 285 }
282 dev_priv->gmbus_reg = dev_priv->aux_reg; 286 dev_priv->gmbus_reg = dev_priv->aux_reg;
283 287
284 dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0)); 288 dev_priv->lpc_pdev =
289 pci_get_domain_bus_and_slot(domain, 0,
290 PCI_DEVFN(31, 0));
285 if (dev_priv->lpc_pdev) { 291 if (dev_priv->lpc_pdev) {
286 pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA, 292 pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
287 &dev_priv->lpc_gpio_base); 293 &dev_priv->lpc_gpio_base);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 821497dbd3fc..d409e02bf540 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -781,38 +781,40 @@ extern const struct psb_ops cdv_chip_ops;
781extern int drm_idle_check_interval; 781extern int drm_idle_check_interval;
782 782
783/* Utilities */ 783/* Utilities */
784static inline u32 MRST_MSG_READ32(uint port, uint offset) 784static inline u32 MRST_MSG_READ32(int domain, uint port, uint offset)
785{ 785{
786 int mcr = (0xD0<<24) | (port << 16) | (offset << 8); 786 int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
787 uint32_t ret_val = 0; 787 uint32_t ret_val = 0;
788 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 788 struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
789 pci_write_config_dword(pci_root, 0xD0, mcr); 789 pci_write_config_dword(pci_root, 0xD0, mcr);
790 pci_read_config_dword(pci_root, 0xD4, &ret_val); 790 pci_read_config_dword(pci_root, 0xD4, &ret_val);
791 pci_dev_put(pci_root); 791 pci_dev_put(pci_root);
792 return ret_val; 792 return ret_val;
793} 793}
794static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value) 794static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
795 u32 value)
795{ 796{
796 int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0; 797 int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
797 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 798 struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
798 pci_write_config_dword(pci_root, 0xD4, value); 799 pci_write_config_dword(pci_root, 0xD4, value);
799 pci_write_config_dword(pci_root, 0xD0, mcr); 800 pci_write_config_dword(pci_root, 0xD0, mcr);
800 pci_dev_put(pci_root); 801 pci_dev_put(pci_root);
801} 802}
802static inline u32 MDFLD_MSG_READ32(uint port, uint offset) 803static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset)
803{ 804{
804 int mcr = (0x10<<24) | (port << 16) | (offset << 8); 805 int mcr = (0x10<<24) | (port << 16) | (offset << 8);
805 uint32_t ret_val = 0; 806 uint32_t ret_val = 0;
806 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 807 struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
807 pci_write_config_dword(pci_root, 0xD0, mcr); 808 pci_write_config_dword(pci_root, 0xD0, mcr);
808 pci_read_config_dword(pci_root, 0xD4, &ret_val); 809 pci_read_config_dword(pci_root, 0xD4, &ret_val);
809 pci_dev_put(pci_root); 810 pci_dev_put(pci_root);
810 return ret_val; 811 return ret_val;
811} 812}
812static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value) 813static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset,
814 u32 value)
813{ 815{
814 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; 816 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
815 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 817 struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
816 pci_write_config_dword(pci_root, 0xD4, value); 818 pci_write_config_dword(pci_root, 0xD4, value);
817 pci_write_config_dword(pci_root, 0xD0, mcr); 819 pci_write_config_dword(pci_root, 0xD0, mcr);
818 pci_dev_put(pci_root); 820 pci_dev_put(pci_root);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 90075b676256..c79160c37f84 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -213,8 +213,10 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
213 if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || 213 if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
214 (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { 214 (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
215 uint32_t type; 215 uint32_t type;
216 int domain = pci_domain_nr(dev->pdev->bus);
216 217
217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); 218 pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
219 0x7c, &type);
218 220
219 sim_data.memory_type = (type >> 12) & 1; 221 sim_data.memory_type = (type >> 12) & 1;
220 sim_data.memory_width = 64; 222 sim_data.memory_width = 64;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index b98599002831..0c9bdf023f5b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -216,12 +216,15 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
216{ 216{
217 struct nvkm_pll_vals pllvals; 217 struct nvkm_pll_vals pllvals;
218 int ret; 218 int ret;
219 int domain;
220
221 domain = pci_domain_nr(dev->pdev->bus);
219 222
220 if (plltype == PLL_MEMORY && 223 if (plltype == PLL_MEMORY &&
221 (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) { 224 (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
222 uint32_t mpllP; 225 uint32_t mpllP;
223 226 pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 227 0x6c, &mpllP);
225 mpllP = (mpllP >> 8) & 0xf; 228 mpllP = (mpllP >> 8) & 0xf;
226 if (!mpllP) 229 if (!mpllP)
227 mpllP = 4; 230 mpllP = 4;
@@ -232,7 +235,8 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
232 (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) { 235 (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
233 uint32_t clock; 236 uint32_t clock;
234 237
235 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 238 pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
239 0x4c, &clock);
236 return clock / 1000; 240 return clock / 1000;
237 } 241 }
238 242
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8d4a5be3b913..33b6139c1bf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -524,7 +524,8 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm)
524 } 524 }
525 525
526 /* subfunction one is a hdmi audio device? */ 526 /* subfunction one is a hdmi audio device? */
527 drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number, 527 drm->hdmi_device = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
528 (unsigned int)pdev->bus->number,
528 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1)); 529 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
529 530
530 if (!drm->hdmi_device) { 531 if (!drm->hdmi_device) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index 4c07d10bb976..18241c6ba5fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -28,8 +28,16 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28{ 28{
29 struct pci_dev *bridge; 29 struct pci_dev *bridge;
30 u32 mem, mib; 30 u32 mem, mib;
31 int domain = 0;
32 struct pci_dev *pdev = NULL;
31 33
32 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); 34 if (dev_is_pci(fb->subdev.device->dev))
35 pdev = to_pci_dev(fb->subdev.device->dev);
36
37 if (pdev)
38 domain = pci_domain_nr(pdev->bus);
39
40 bridge = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 1));
33 if (!bridge) { 41 if (!bridge) {
34 nvkm_error(&fb->subdev, "no bridge device\n"); 42 nvkm_error(&fb->subdev, "no bridge device\n");
35 return -ENODEV; 43 return -ENODEV;
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index 8755df3330a0..3300dac56390 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -239,8 +239,9 @@ static u8 sl82c105_bridge_revision(struct pci_dev *dev)
239 /* 239 /*
240 * The bridge should be part of the same device, but function 0. 240 * The bridge should be part of the same device, but function 0.
241 */ 241 */
242 bridge = pci_get_bus_and_slot(dev->bus->number, 242 bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
243 PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); 243 dev->bus->number,
244 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
244 if (!bridge) 245 if (!bridge)
245 return -1; 246 return -1;
246 247
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 50812b33291b..b3786474e84a 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -430,59 +430,16 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev)
430 430
431static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) 431static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
432{ 432{
433 struct pci_dev *bridge; 433 int rc = pci_enable_atomic_ops_to_root(pdev,
434 u32 ctl2, cap2; 434 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
435 u16 flags;
436 int rc;
437
438 bridge = pdev->bus->self;
439 if (!bridge)
440 goto disable;
441
442 /* Check atomic routing support all the way to root complex */
443 while (bridge->bus->parent) {
444 rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
445 if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
446 goto disable;
447
448 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
449 if (rc)
450 goto disable;
451 435
452 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); 436 if (rc) {
453 if (rc) 437 dev->atomic_cap = IB_ATOMIC_NONE;
454 goto disable; 438 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
455 439 } else {
456 if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) || 440 dev->atomic_cap = IB_ATOMIC_GLOB;
457 (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)) 441 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
458 goto disable;
459 bridge = bridge->bus->parent->self;
460 } 442 }
461
462 rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
463 if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
464 goto disable;
465
466 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
467 if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64))
468 goto disable;
469
470 /* Set atomic operations */
471 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
472 PCI_EXP_DEVCTL2_ATOMIC_REQ);
473 dev->atomic_cap = IB_ATOMIC_GLOB;
474
475 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
476
477 return;
478
479disable:
480 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
481 PCI_EXP_DEVCTL2_ATOMIC_REQ);
482 dev->atomic_cap = IB_ATOMIC_NONE;
483
484 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
485
486} 443}
487 444
488static const struct qed_rdma_ops *qed_ops; 445static const struct qed_rdma_ops *qed_ops;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7d5eb004091d..821547b23c65 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -527,7 +527,8 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
527 struct iommu_dev_data *dev_data = NULL; 527 struct iommu_dev_data *dev_data = NULL;
528 struct pci_dev *pdev; 528 struct pci_dev *pdev;
529 529
530 pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff); 530 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
531 devid & 0xff);
531 if (pdev) 532 if (pdev)
532 dev_data = get_dev_data(&pdev->dev); 533 dev_data = get_dev_data(&pdev->dev);
533 534
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6fe2d0346073..4e4a615bf13f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1697,8 +1697,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1697 u32 range, misc, low, high; 1697 u32 range, misc, low, high;
1698 int ret; 1698 int ret;
1699 1699
1700 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), 1700 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1701 iommu->devid & 0xff); 1701 iommu->devid & 0xff);
1702 if (!iommu->dev) 1702 if (!iommu->dev)
1703 return -ENODEV; 1703 return -ENODEV;
1704 1704
@@ -1764,8 +1764,9 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1764 if (is_rd890_iommu(iommu->dev)) { 1764 if (is_rd890_iommu(iommu->dev)) {
1765 int i, j; 1765 int i, j;
1766 1766
1767 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, 1767 iommu->root_pdev =
1768 PCI_DEVFN(0, 0)); 1768 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1769 PCI_DEVFN(0, 0));
1769 1770
1770 /* 1771 /*
1771 * Some rd890 systems may not be fully reconfigured by the 1772 * Some rd890 systems may not be fully reconfigured by the
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 7d94e1d39e5e..8696382be837 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -564,7 +564,8 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
564 finish = (iommu_fault->tag >> 9) & 1; 564 finish = (iommu_fault->tag >> 9) & 1;
565 565
566 devid = iommu_fault->device_id; 566 devid = iommu_fault->device_id;
567 pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff); 567 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
568 devid & 0xff);
568 if (!pdev) 569 if (!pdev)
569 return -ENODEV; 570 return -ENODEV;
570 dev_data = get_dev_data(&pdev->dev); 571 dev_data = get_dev_data(&pdev->dev);
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 7aafbb091b67..b0d4aab1a58c 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -289,14 +289,14 @@ static struct resource pic1_io_resource = {
289 .name = "pic1", 289 .name = "pic1",
290 .start = PIC_MASTER_CMD, 290 .start = PIC_MASTER_CMD,
291 .end = PIC_MASTER_IMR, 291 .end = PIC_MASTER_IMR,
292 .flags = IORESOURCE_BUSY 292 .flags = IORESOURCE_IO | IORESOURCE_BUSY
293}; 293};
294 294
295static struct resource pic2_io_resource = { 295static struct resource pic2_io_resource = {
296 .name = "pic2", 296 .name = "pic2",
297 .start = PIC_SLAVE_CMD, 297 .start = PIC_SLAVE_CMD,
298 .end = PIC_SLAVE_IMR, 298 .end = PIC_SLAVE_IMR,
299 .flags = IORESOURCE_BUSY 299 .flags = IORESOURCE_IO | IORESOURCE_BUSY
300}; 300};
301 301
302static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq, 302static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c4c2b3b85ebc..3e8b3b68dcb4 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1799,7 +1799,7 @@ static int powerbook_sleep_grackle(void)
1799 struct adb_request req; 1799 struct adb_request req;
1800 struct pci_dev *grackle; 1800 struct pci_dev *grackle;
1801 1801
1802 grackle = pci_get_bus_and_slot(0, 0); 1802 grackle = pci_get_domain_bus_and_slot(0, 0, 0);
1803 if (!grackle) 1803 if (!grackle)
1804 return -ENODEV; 1804 return -ENODEV;
1805 1805
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index a142b9dc0feb..ea40a24947ba 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -102,7 +102,6 @@ struct ttusb {
102 unsigned int isoc_in_pipe; 102 unsigned int isoc_in_pipe;
103 103
104 void *iso_buffer; 104 void *iso_buffer;
105 dma_addr_t iso_dma_handle;
106 105
107 struct urb *iso_urb[ISO_BUF_COUNT]; 106 struct urb *iso_urb[ISO_BUF_COUNT];
108 107
@@ -792,26 +791,17 @@ static void ttusb_free_iso_urbs(struct ttusb *ttusb)
792 791
793 for (i = 0; i < ISO_BUF_COUNT; i++) 792 for (i = 0; i < ISO_BUF_COUNT; i++)
794 usb_free_urb(ttusb->iso_urb[i]); 793 usb_free_urb(ttusb->iso_urb[i]);
795 794 kfree(ttusb->iso_buffer);
796 pci_free_consistent(NULL,
797 ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF *
798 ISO_BUF_COUNT, ttusb->iso_buffer,
799 ttusb->iso_dma_handle);
800} 795}
801 796
802static int ttusb_alloc_iso_urbs(struct ttusb *ttusb) 797static int ttusb_alloc_iso_urbs(struct ttusb *ttusb)
803{ 798{
804 int i; 799 int i;
805 800
806 ttusb->iso_buffer = pci_zalloc_consistent(NULL, 801 ttusb->iso_buffer = kcalloc(FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
807 ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT, 802 ISO_FRAME_SIZE, GFP_KERNEL);
808 &ttusb->iso_dma_handle); 803 if (!ttusb->iso_buffer)
809
810 if (!ttusb->iso_buffer) {
811 dprintk("%s: pci_alloc_consistent - not enough memory\n",
812 __func__);
813 return -ENOMEM; 804 return -ENOMEM;
814 }
815 805
816 for (i = 0; i < ISO_BUF_COUNT; i++) { 806 for (i = 0; i < ISO_BUF_COUNT; i++) {
817 struct urb *urb; 807 struct urb *urb;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index cdefb5dfbbdc..4d5acdf578a6 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -127,7 +127,6 @@ struct ttusb_dec {
127 struct urb *irq_urb; 127 struct urb *irq_urb;
128 dma_addr_t irq_dma_handle; 128 dma_addr_t irq_dma_handle;
129 void *iso_buffer; 129 void *iso_buffer;
130 dma_addr_t iso_dma_handle;
131 struct urb *iso_urb[ISO_BUF_COUNT]; 130 struct urb *iso_urb[ISO_BUF_COUNT];
132 int iso_stream_count; 131 int iso_stream_count;
133 struct mutex iso_mutex; 132 struct mutex iso_mutex;
@@ -1185,11 +1184,7 @@ static void ttusb_dec_free_iso_urbs(struct ttusb_dec *dec)
1185 1184
1186 for (i = 0; i < ISO_BUF_COUNT; i++) 1185 for (i = 0; i < ISO_BUF_COUNT; i++)
1187 usb_free_urb(dec->iso_urb[i]); 1186 usb_free_urb(dec->iso_urb[i]);
1188 1187 kfree(dec->iso_buffer);
1189 pci_free_consistent(NULL,
1190 ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF *
1191 ISO_BUF_COUNT),
1192 dec->iso_buffer, dec->iso_dma_handle);
1193} 1188}
1194 1189
1195static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec) 1190static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
@@ -1198,15 +1193,10 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
1198 1193
1199 dprintk("%s\n", __func__); 1194 dprintk("%s\n", __func__);
1200 1195
1201 dec->iso_buffer = pci_zalloc_consistent(NULL, 1196 dec->iso_buffer = kcalloc(FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
1202 ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT), 1197 ISO_FRAME_SIZE, GFP_KERNEL);
1203 &dec->iso_dma_handle); 1198 if (!dec->iso_buffer)
1204
1205 if (!dec->iso_buffer) {
1206 dprintk("%s: pci_alloc_consistent - not enough memory\n",
1207 __func__);
1208 return -ENOMEM; 1199 return -ENOMEM;
1209 }
1210 1200
1211 for (i = 0; i < ISO_BUF_COUNT; i++) { 1201 for (i = 0; i < ISO_BUF_COUNT; i++) {
1212 struct urb *urb; 1202 struct urb *urb;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3591077a5f6b..ffa7959f6b31 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -812,7 +812,7 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
812 if (!vf) 812 if (!vf)
813 return false; 813 return false;
814 814
815 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 815 dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
816 if (dev) 816 if (dev)
817 return bnx2x_is_pcie_pending(dev); 817 return bnx2x_is_pcie_pending(dev);
818 return false; 818 return false;
@@ -1041,6 +1041,13 @@ void bnx2x_iov_init_dmae(struct bnx2x *bp)
1041 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1041 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1042} 1042}
1043 1043
1044static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
1045{
1046 struct pci_dev *dev = bp->pdev;
1047
1048 return pci_domain_nr(dev->bus);
1049}
1050
1044static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1051static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1045{ 1052{
1046 struct pci_dev *dev = bp->pdev; 1053 struct pci_dev *dev = bp->pdev;
@@ -1606,6 +1613,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
1606 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1613 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1607 1614
1608 /* fill in the BDF and bars */ 1615 /* fill in the BDF and bars */
1616 vf->domain = bnx2x_vf_domain(bp, vfid);
1609 vf->bus = bnx2x_vf_bus(bp, vfid); 1617 vf->bus = bnx2x_vf_bus(bp, vfid);
1610 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1618 vf->devfn = bnx2x_vf_devfn(bp, vfid);
1611 bnx2x_vf_set_bars(bp, vf); 1619 bnx2x_vf_set_bars(bp, vf);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 53466f6cebab..eb814c65152f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -182,6 +182,7 @@ struct bnx2x_virtf {
182 u32 error; /* 0 means all's-well */ 182 u32 error; /* 0 means all's-well */
183 183
184 /* BDF */ 184 /* BDF */
185 unsigned int domain;
185 unsigned int bus; 186 unsigned int bus;
186 unsigned int devfn; 187 unsigned int devfn;
187 188
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 7d95f0866fb0..28a81ac97af5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -143,7 +143,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
143 struct hinic_hwif *hwif = cmdqs->hwif; 143 struct hinic_hwif *hwif = cmdqs->hwif;
144 struct pci_dev *pdev = hwif->pdev; 144 struct pci_dev *pdev = hwif->pdev;
145 145
146 cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, 146 cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
147 &cmdq_buf->dma_addr); 147 &cmdq_buf->dma_addr);
148 if (!cmdq_buf->buf) { 148 if (!cmdq_buf->buf) {
149 dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); 149 dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
@@ -161,7 +161,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
161void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, 161void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
162 struct hinic_cmdq_buf *cmdq_buf) 162 struct hinic_cmdq_buf *cmdq_buf)
163{ 163{
164 pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); 164 dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
165} 165}
166 166
167static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) 167static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
@@ -875,7 +875,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
875 int err; 875 int err;
876 876
877 cmdqs->hwif = hwif; 877 cmdqs->hwif = hwif;
878 cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev, 878 cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
879 HINIC_CMDQ_BUF_SIZE, 879 HINIC_CMDQ_BUF_SIZE,
880 HINIC_CMDQ_BUF_SIZE, 0); 880 HINIC_CMDQ_BUF_SIZE, 0);
881 if (!cmdqs->cmdq_buf_pool) 881 if (!cmdqs->cmdq_buf_pool)
@@ -916,7 +916,7 @@ err_cmdq_wqs:
916 devm_kfree(&pdev->dev, cmdqs->saved_wqs); 916 devm_kfree(&pdev->dev, cmdqs->saved_wqs);
917 917
918err_saved_wqs: 918err_saved_wqs:
919 pci_pool_destroy(cmdqs->cmdq_buf_pool); 919 dma_pool_destroy(cmdqs->cmdq_buf_pool);
920 return err; 920 return err;
921} 921}
922 922
@@ -942,5 +942,5 @@ void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
942 942
943 devm_kfree(&pdev->dev, cmdqs->saved_wqs); 943 devm_kfree(&pdev->dev, cmdqs->saved_wqs);
944 944
945 pci_pool_destroy(cmdqs->cmdq_buf_pool); 945 dma_pool_destroy(cmdqs->cmdq_buf_pool);
946} 946}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index b35583400cb6..23f8d39eab68 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -157,7 +157,7 @@ struct hinic_cmdq {
157struct hinic_cmdqs { 157struct hinic_cmdqs {
158 struct hinic_hwif *hwif; 158 struct hinic_hwif *hwif;
159 159
160 struct pci_pool *cmdq_buf_pool; 160 struct dma_pool *cmdq_buf_pool;
161 161
162 struct hinic_wq *saved_wqs; 162 struct hinic_wq *saved_wqs;
163 163
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 44b3937f7e81..29486478836e 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -607,7 +607,7 @@ struct nic {
607 struct mem *mem; 607 struct mem *mem;
608 dma_addr_t dma_addr; 608 dma_addr_t dma_addr;
609 609
610 struct pci_pool *cbs_pool; 610 struct dma_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr; 611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs; 612 u8 adaptive_ifs;
613 u8 tx_threshold; 613 u8 tx_threshold;
@@ -1892,7 +1892,7 @@ static void e100_clean_cbs(struct nic *nic)
1892 nic->cb_to_clean = nic->cb_to_clean->next; 1892 nic->cb_to_clean = nic->cb_to_clean->next;
1893 nic->cbs_avail++; 1893 nic->cbs_avail++;
1894 } 1894 }
1895 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); 1895 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1896 nic->cbs = NULL; 1896 nic->cbs = NULL;
1897 nic->cbs_avail = 0; 1897 nic->cbs_avail = 0;
1898 } 1898 }
@@ -1910,7 +1910,7 @@ static int e100_alloc_cbs(struct nic *nic)
1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; 1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1911 nic->cbs_avail = 0; 1911 nic->cbs_avail = 0;
1912 1912
1913 nic->cbs = pci_pool_zalloc(nic->cbs_pool, GFP_KERNEL, 1913 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
1914 &nic->cbs_dma_addr); 1914 &nic->cbs_dma_addr);
1915 if (!nic->cbs) 1915 if (!nic->cbs)
1916 return -ENOMEM; 1916 return -ENOMEM;
@@ -2960,8 +2960,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2960 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); 2960 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2961 goto err_out_free; 2961 goto err_out_free;
2962 } 2962 }
2963 nic->cbs_pool = pci_pool_create(netdev->name, 2963 nic->cbs_pool = dma_pool_create(netdev->name,
2964 nic->pdev, 2964 &nic->pdev->dev,
2965 nic->params.cbs.max * sizeof(struct cb), 2965 nic->params.cbs.max * sizeof(struct cb),
2966 sizeof(u32), 2966 sizeof(u32),
2967 0); 2967 0);
@@ -3001,7 +3001,7 @@ static void e100_remove(struct pci_dev *pdev)
3001 unregister_netdev(netdev); 3001 unregister_netdev(netdev);
3002 e100_free(nic); 3002 e100_free(nic);
3003 pci_iounmap(pdev, nic->csr); 3003 pci_iounmap(pdev, nic->csr);
3004 pci_pool_destroy(nic->cbs_pool); 3004 dma_pool_destroy(nic->cbs_pool);
3005 free_netdev(netdev); 3005 free_netdev(netdev);
3006 pci_release_regions(pdev); 3006 pci_release_regions(pdev);
3007 pci_disable_device(pdev); 3007 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 40e52ffb732f..7cd494611a74 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2594,8 +2594,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2594 if (adapter->pdata && adapter->pdata->platform_init) 2594 if (adapter->pdata && adapter->pdata->platform_init)
2595 adapter->pdata->platform_init(pdev); 2595 adapter->pdata->platform_init(pdev);
2596 2596
2597 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2597 adapter->ptp_pdev =
2598 PCI_DEVFN(12, 4)); 2598 pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2599 adapter->pdev->bus->number,
2600 PCI_DEVFN(12, 4));
2599 2601
2600 netdev->netdev_ops = &pch_gbe_netdev_ops; 2602 netdev->netdev_ops = &pch_gbe_netdev_ops;
2601 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2603 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 0624b71ab5d4..edcd1e60b30d 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -152,6 +152,8 @@ struct tsi108_prv_data {
152 u32 msg_enable; /* debug message level */ 152 u32 msg_enable; /* debug message level */
153 struct mii_if_info mii_if; 153 struct mii_if_info mii_if;
154 unsigned int init_media; 154 unsigned int init_media;
155
156 struct platform_device *pdev;
155}; 157};
156 158
157/* Structure for a device driver */ 159/* Structure for a device driver */
@@ -703,17 +705,18 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
703 data->txskbs[tx] = skb; 705 data->txskbs[tx] = skb;
704 706
705 if (i == 0) { 707 if (i == 0) {
706 data->txring[tx].buf0 = dma_map_single(NULL, skb->data, 708 data->txring[tx].buf0 = dma_map_single(&data->pdev->dev,
707 skb_headlen(skb), DMA_TO_DEVICE); 709 skb->data, skb_headlen(skb),
710 DMA_TO_DEVICE);
708 data->txring[tx].len = skb_headlen(skb); 711 data->txring[tx].len = skb_headlen(skb);
709 misc |= TSI108_TX_SOF; 712 misc |= TSI108_TX_SOF;
710 } else { 713 } else {
711 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 714 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
712 715
713 data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag, 716 data->txring[tx].buf0 =
714 0, 717 skb_frag_dma_map(&data->pdev->dev, frag,
715 skb_frag_size(frag), 718 0, skb_frag_size(frag),
716 DMA_TO_DEVICE); 719 DMA_TO_DEVICE);
717 data->txring[tx].len = skb_frag_size(frag); 720 data->txring[tx].len = skb_frag_size(frag);
718 } 721 }
719 722
@@ -808,9 +811,9 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
808 if (!skb) 811 if (!skb)
809 break; 812 break;
810 813
811 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 814 data->rxring[rx].buf0 = dma_map_single(&data->pdev->dev,
812 TSI108_RX_SKB_SIZE, 815 skb->data, TSI108_RX_SKB_SIZE,
813 DMA_FROM_DEVICE); 816 DMA_FROM_DEVICE);
814 817
815 /* Sometimes the hardware sets blen to zero after packet 818 /* Sometimes the hardware sets blen to zero after packet
816 * reception, even though the manual says that it's only ever 819 * reception, even though the manual says that it's only ever
@@ -1308,15 +1311,15 @@ static int tsi108_open(struct net_device *dev)
1308 data->id, dev->irq, dev->name); 1311 data->id, dev->irq, dev->name);
1309 } 1312 }
1310 1313
1311 data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma, 1314 data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size,
1312 GFP_KERNEL); 1315 &data->rxdma, GFP_KERNEL);
1313 if (!data->rxring) 1316 if (!data->rxring)
1314 return -ENOMEM; 1317 return -ENOMEM;
1315 1318
1316 data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma, 1319 data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size,
1317 GFP_KERNEL); 1320 &data->txdma, GFP_KERNEL);
1318 if (!data->txring) { 1321 if (!data->txring) {
1319 pci_free_consistent(NULL, rxring_size, data->rxring, 1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
1320 data->rxdma); 1323 data->rxdma);
1321 return -ENOMEM; 1324 return -ENOMEM;
1322 } 1325 }
@@ -1428,10 +1431,10 @@ static int tsi108_close(struct net_device *dev)
1428 dev_kfree_skb(skb); 1431 dev_kfree_skb(skb);
1429 } 1432 }
1430 1433
1431 dma_free_coherent(0, 1434 dma_free_coherent(&data->pdev->dev,
1432 TSI108_RXRING_LEN * sizeof(rx_desc), 1435 TSI108_RXRING_LEN * sizeof(rx_desc),
1433 data->rxring, data->rxdma); 1436 data->rxring, data->rxdma);
1434 dma_free_coherent(0, 1437 dma_free_coherent(&data->pdev->dev,
1435 TSI108_TXRING_LEN * sizeof(tx_desc), 1438 TSI108_TXRING_LEN * sizeof(tx_desc),
1436 data->txring, data->txdma); 1439 data->txring, data->txdma);
1437 1440
@@ -1576,6 +1579,7 @@ tsi108_init_one(struct platform_device *pdev)
1576 printk("tsi108_eth%d: probe...\n", pdev->id); 1579 printk("tsi108_eth%d: probe...\n", pdev->id);
1577 data = netdev_priv(dev); 1580 data = netdev_priv(dev);
1578 data->dev = dev; 1581 data->dev = dev;
1582 data->pdev = pdev;
1579 1583
1580 pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", 1584 pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
1581 pdev->id, einfo->regs, einfo->phyregs, 1585 pdev->id, einfo->regs, einfo->phyregs,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ad9a9578f9c4..ee244c2e8c2b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -62,10 +62,6 @@ config OF_DYNAMIC
62config OF_ADDRESS 62config OF_ADDRESS
63 def_bool y 63 def_bool y
64 depends on !SPARC && HAS_IOMEM 64 depends on !SPARC && HAS_IOMEM
65 select OF_ADDRESS_PCI if PCI
66
67config OF_ADDRESS_PCI
68 bool
69 65
70config OF_IRQ 66config OF_IRQ
71 def_bool y 67 def_bool y
@@ -82,18 +78,6 @@ config OF_MDIO
82 help 78 help
83 OpenFirmware MDIO bus (Ethernet PHY) accessors 79 OpenFirmware MDIO bus (Ethernet PHY) accessors
84 80
85config OF_PCI
86 def_tristate PCI
87 depends on PCI
88 help
89 OpenFirmware PCI bus accessors
90
91config OF_PCI_IRQ
92 def_tristate PCI
93 depends on OF_PCI && OF_IRQ
94 help
95 OpenFirmware PCI IRQ routing helpers
96
97config OF_RESERVED_MEM 81config OF_RESERVED_MEM
98 depends on OF_EARLY_FLATTREE 82 depends on OF_EARLY_FLATTREE
99 bool 83 bool
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 63a4be62ce19..663a4af0cccd 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -10,8 +10,6 @@ obj-$(CONFIG_OF_IRQ) += irq.o
10obj-$(CONFIG_OF_NET) += of_net.o 10obj-$(CONFIG_OF_NET) += of_net.o
11obj-$(CONFIG_OF_UNITTEST) += unittest.o 11obj-$(CONFIG_OF_UNITTEST) += unittest.o
12obj-$(CONFIG_OF_MDIO) += of_mdio.o 12obj-$(CONFIG_OF_MDIO) += of_mdio.o
13obj-$(CONFIG_OF_PCI) += of_pci.o
14obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
15obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o 13obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
16obj-$(CONFIG_OF_RESOLVE) += resolver.o 14obj-$(CONFIG_OF_RESOLVE) += resolver.o
17obj-$(CONFIG_OF_OVERLAY) += overlay.o 15obj-$(CONFIG_OF_OVERLAY) += overlay.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index fa6cabfc3cb9..a4984e7364b1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -96,7 +96,7 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
96 return IORESOURCE_MEM; 96 return IORESOURCE_MEM;
97} 97}
98 98
99#ifdef CONFIG_OF_ADDRESS_PCI 99#ifdef CONFIG_PCI
100/* 100/*
101 * PCI bus specific translator 101 * PCI bus specific translator
102 */ 102 */
@@ -171,9 +171,7 @@ static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
171{ 171{
172 return of_bus_default_translate(addr + 1, offset, na - 1); 172 return of_bus_default_translate(addr + 1, offset, na - 1);
173} 173}
174#endif /* CONFIG_OF_ADDRESS_PCI */
175 174
176#ifdef CONFIG_PCI
177const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 175const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
178 unsigned int *flags) 176 unsigned int *flags)
179{ 177{
@@ -361,6 +359,7 @@ invalid_range:
361 res->end = (resource_size_t)OF_BAD_ADDR; 359 res->end = (resource_size_t)OF_BAD_ADDR;
362 return err; 360 return err;
363} 361}
362EXPORT_SYMBOL(of_pci_range_to_resource);
364#endif /* CONFIG_PCI */ 363#endif /* CONFIG_PCI */
365 364
366/* 365/*
@@ -426,7 +425,7 @@ static unsigned int of_bus_isa_get_flags(const __be32 *addr)
426 */ 425 */
427 426
428static struct of_bus of_busses[] = { 427static struct of_bus of_busses[] = {
429#ifdef CONFIG_OF_ADDRESS_PCI 428#ifdef CONFIG_PCI
430 /* PCI */ 429 /* PCI */
431 { 430 {
432 .name = "pci", 431 .name = "pci",
@@ -437,7 +436,7 @@ static struct of_bus of_busses[] = {
437 .translate = of_bus_pci_translate, 436 .translate = of_bus_pci_translate,
438 .get_flags = of_bus_pci_get_flags, 437 .get_flags = of_bus_pci_get_flags,
439 }, 438 },
440#endif /* CONFIG_OF_ADDRESS_PCI */ 439#endif /* CONFIG_PCI */
441 /* ISA */ 440 /* ISA */
442 { 441 {
443 .name = "isa", 442 .name = "isa",
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
deleted file mode 100644
index a7b1cb6c2f65..000000000000
--- a/drivers/of/of_pci.c
+++ /dev/null
@@ -1,384 +0,0 @@
1#define pr_fmt(fmt) "OF: PCI: " fmt
2
3#include <linux/kernel.h>
4#include <linux/export.h>
5#include <linux/of.h>
6#include <linux/of_address.h>
7#include <linux/of_device.h>
8#include <linux/of_pci.h>
9#include <linux/slab.h>
10
11static inline int __of_pci_pci_compare(struct device_node *node,
12 unsigned int data)
13{
14 int devfn;
15
16 devfn = of_pci_get_devfn(node);
17 if (devfn < 0)
18 return 0;
19
20 return devfn == data;
21}
22
23struct device_node *of_pci_find_child_device(struct device_node *parent,
24 unsigned int devfn)
25{
26 struct device_node *node, *node2;
27
28 for_each_child_of_node(parent, node) {
29 if (__of_pci_pci_compare(node, devfn))
30 return node;
31 /*
32 * Some OFs create a parent node "multifunc-device" as
33 * a fake root for all functions of a multi-function
34 * device we go down them as well.
35 */
36 if (!strcmp(node->name, "multifunc-device")) {
37 for_each_child_of_node(node, node2) {
38 if (__of_pci_pci_compare(node2, devfn)) {
39 of_node_put(node);
40 return node2;
41 }
42 }
43 }
44 }
45 return NULL;
46}
47EXPORT_SYMBOL_GPL(of_pci_find_child_device);
48
49/**
50 * of_pci_get_devfn() - Get device and function numbers for a device node
51 * @np: device node
52 *
53 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
54 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
55 * and function numbers respectively. On error a negative error code is
56 * returned.
57 */
58int of_pci_get_devfn(struct device_node *np)
59{
60 u32 reg[5];
61 int error;
62
63 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
64 if (error)
65 return error;
66
67 return (reg[0] >> 8) & 0xff;
68}
69EXPORT_SYMBOL_GPL(of_pci_get_devfn);
70
71/**
72 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
73 * @node: device node
74 * @res: address to a struct resource to return the bus-range
75 *
76 * Returns 0 on success or a negative error-code on failure.
77 */
78int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
79{
80 u32 bus_range[2];
81 int error;
82
83 error = of_property_read_u32_array(node, "bus-range", bus_range,
84 ARRAY_SIZE(bus_range));
85 if (error)
86 return error;
87
88 res->name = node->name;
89 res->start = bus_range[0];
90 res->end = bus_range[1];
91 res->flags = IORESOURCE_BUS;
92
93 return 0;
94}
95EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
96
97/**
98 * This function will try to obtain the host bridge domain number by
99 * finding a property called "linux,pci-domain" of the given device node.
100 *
101 * @node: device tree node with the domain information
102 *
103 * Returns the associated domain number from DT in the range [0-0xffff], or
104 * a negative value if the required property is not found.
105 */
106int of_get_pci_domain_nr(struct device_node *node)
107{
108 u32 domain;
109 int error;
110
111 error = of_property_read_u32(node, "linux,pci-domain", &domain);
112 if (error)
113 return error;
114
115 return (u16)domain;
116}
117EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
118
119/**
120 * This function will try to find the limitation of link speed by finding
121 * a property called "max-link-speed" of the given device node.
122 *
123 * @node: device tree node with the max link speed information
124 *
125 * Returns the associated max link speed from DT, or a negative value if the
126 * required property is not found or is invalid.
127 */
128int of_pci_get_max_link_speed(struct device_node *node)
129{
130 u32 max_link_speed;
131
132 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
133 max_link_speed > 4)
134 return -EINVAL;
135
136 return max_link_speed;
137}
138EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
139
140/**
141 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
142 * is present and valid
143 */
144void of_pci_check_probe_only(void)
145{
146 u32 val;
147 int ret;
148
149 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
150 if (ret) {
151 if (ret == -ENODATA || ret == -EOVERFLOW)
152 pr_warn("linux,pci-probe-only without valid value, ignoring\n");
153 return;
154 }
155
156 if (val)
157 pci_add_flags(PCI_PROBE_ONLY);
158 else
159 pci_clear_flags(PCI_PROBE_ONLY);
160
161 pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
162}
163EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
164
165#if defined(CONFIG_OF_ADDRESS)
166/**
167 * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
168 * @dev: device node of the host bridge having the range property
169 * @busno: bus number associated with the bridge root bus
170 * @bus_max: maximum number of buses for this bridge
171 * @resources: list where the range of resources will be added after DT parsing
172 * @io_base: pointer to a variable that will contain on return the physical
173 * address for the start of the I/O range. Can be NULL if the caller doesn't
174 * expect IO ranges to be present in the device tree.
175 *
176 * It is the caller's job to free the @resources list.
177 *
178 * This function will parse the "ranges" property of a PCI host bridge device
179 * node and setup the resource mapping based on its content. It is expected
180 * that the property conforms with the Power ePAPR document.
181 *
182 * It returns zero if the range parsing has been successful or a standard error
183 * value if it failed.
184 */
185int of_pci_get_host_bridge_resources(struct device_node *dev,
186 unsigned char busno, unsigned char bus_max,
187 struct list_head *resources, resource_size_t *io_base)
188{
189 struct resource_entry *window;
190 struct resource *res;
191 struct resource *bus_range;
192 struct of_pci_range range;
193 struct of_pci_range_parser parser;
194 char range_type[4];
195 int err;
196
197 if (io_base)
198 *io_base = (resource_size_t)OF_BAD_ADDR;
199
200 bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
201 if (!bus_range)
202 return -ENOMEM;
203
204 pr_info("host bridge %pOF ranges:\n", dev);
205
206 err = of_pci_parse_bus_range(dev, bus_range);
207 if (err) {
208 bus_range->start = busno;
209 bus_range->end = bus_max;
210 bus_range->flags = IORESOURCE_BUS;
211 pr_info(" No bus range found for %pOF, using %pR\n",
212 dev, bus_range);
213 } else {
214 if (bus_range->end > bus_range->start + bus_max)
215 bus_range->end = bus_range->start + bus_max;
216 }
217 pci_add_resource(resources, bus_range);
218
219 /* Check for ranges property */
220 err = of_pci_range_parser_init(&parser, dev);
221 if (err)
222 goto parse_failed;
223
224 pr_debug("Parsing ranges property...\n");
225 for_each_of_pci_range(&parser, &range) {
226 /* Read next ranges element */
227 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
228 snprintf(range_type, 4, " IO");
229 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
230 snprintf(range_type, 4, "MEM");
231 else
232 snprintf(range_type, 4, "err");
233 pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
234 range.cpu_addr, range.cpu_addr + range.size - 1,
235 range.pci_addr);
236
237 /*
238 * If we failed translation or got a zero-sized region
239 * then skip this range
240 */
241 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
242 continue;
243
244 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
245 if (!res) {
246 err = -ENOMEM;
247 goto parse_failed;
248 }
249
250 err = of_pci_range_to_resource(&range, dev, res);
251 if (err) {
252 kfree(res);
253 continue;
254 }
255
256 if (resource_type(res) == IORESOURCE_IO) {
257 if (!io_base) {
258 pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
259 dev);
260 err = -EINVAL;
261 goto conversion_failed;
262 }
263 if (*io_base != (resource_size_t)OF_BAD_ADDR)
264 pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
265 dev);
266 *io_base = range.cpu_addr;
267 }
268
269 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
270 }
271
272 return 0;
273
274conversion_failed:
275 kfree(res);
276parse_failed:
277 resource_list_for_each_entry(window, resources)
278 kfree(window->res);
279 pci_free_resource_list(resources);
280 return err;
281}
282EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
283#endif /* CONFIG_OF_ADDRESS */
284
285/**
286 * of_pci_map_rid - Translate a requester ID through a downstream mapping.
287 * @np: root complex device node.
288 * @rid: PCI requester ID to map.
289 * @map_name: property name of the map to use.
290 * @map_mask_name: optional property name of the mask to use.
291 * @target: optional pointer to a target device node.
292 * @id_out: optional pointer to receive the translated ID.
293 *
294 * Given a PCI requester ID, look up the appropriate implementation-defined
295 * platform ID and/or the target device which receives transactions on that
296 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
297 * @id_out may be NULL if only the other is required. If @target points to
298 * a non-NULL device node pointer, only entries targeting that node will be
299 * matched; if it points to a NULL value, it will receive the device node of
300 * the first matching target phandle, with a reference held.
301 *
302 * Return: 0 on success or a standard error code on failure.
303 */
304int of_pci_map_rid(struct device_node *np, u32 rid,
305 const char *map_name, const char *map_mask_name,
306 struct device_node **target, u32 *id_out)
307{
308 u32 map_mask, masked_rid;
309 int map_len;
310 const __be32 *map = NULL;
311
312 if (!np || !map_name || (!target && !id_out))
313 return -EINVAL;
314
315 map = of_get_property(np, map_name, &map_len);
316 if (!map) {
317 if (target)
318 return -ENODEV;
319 /* Otherwise, no map implies no translation */
320 *id_out = rid;
321 return 0;
322 }
323
324 if (!map_len || map_len % (4 * sizeof(*map))) {
325 pr_err("%pOF: Error: Bad %s length: %d\n", np,
326 map_name, map_len);
327 return -EINVAL;
328 }
329
330 /* The default is to select all bits. */
331 map_mask = 0xffffffff;
332
333 /*
334 * Can be overridden by "{iommu,msi}-map-mask" property.
335 * If of_property_read_u32() fails, the default is used.
336 */
337 if (map_mask_name)
338 of_property_read_u32(np, map_mask_name, &map_mask);
339
340 masked_rid = map_mask & rid;
341 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
342 struct device_node *phandle_node;
343 u32 rid_base = be32_to_cpup(map + 0);
344 u32 phandle = be32_to_cpup(map + 1);
345 u32 out_base = be32_to_cpup(map + 2);
346 u32 rid_len = be32_to_cpup(map + 3);
347
348 if (rid_base & ~map_mask) {
349 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
350 np, map_name, map_name,
351 map_mask, rid_base);
352 return -EFAULT;
353 }
354
355 if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
356 continue;
357
358 phandle_node = of_find_node_by_phandle(phandle);
359 if (!phandle_node)
360 return -ENODEV;
361
362 if (target) {
363 if (*target)
364 of_node_put(phandle_node);
365 else
366 *target = phandle_node;
367
368 if (*target != phandle_node)
369 continue;
370 }
371
372 if (id_out)
373 *id_out = masked_rid - rid_base + out_base;
374
375 pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
376 np, map_name, map_mask, rid_base, out_base,
377 rid_len, rid, masked_rid - rid_base + out_base);
378 return 0;
379 }
380
381 pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
382 np, map_name, rid, target && *target ? *target : NULL);
383 return -EFAULT;
384}
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
deleted file mode 100644
index 3a05568f65df..000000000000
--- a/drivers/of/of_pci_irq.c
+++ /dev/null
@@ -1,131 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/of_pci.h>
3#include <linux/of_irq.h>
4#include <linux/export.h>
5
6/**
7 * of_irq_parse_pci - Resolve the interrupt for a PCI device
8 * @pdev: the device whose interrupt is to be resolved
9 * @out_irq: structure of_irq filled by this function
10 *
11 * This function resolves the PCI interrupt for a given PCI device. If a
12 * device-node exists for a given pci_dev, it will use normal OF tree
13 * walking. If not, it will implement standard swizzling and walk up the
14 * PCI tree until an device-node is found, at which point it will finish
15 * resolving using the OF tree walking.
16 */
17int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
18{
19 struct device_node *dn, *ppnode;
20 struct pci_dev *ppdev;
21 __be32 laddr[3];
22 u8 pin;
23 int rc;
24
25 /* Check if we have a device node, if yes, fallback to standard
26 * device tree parsing
27 */
28 dn = pci_device_to_OF_node(pdev);
29 if (dn) {
30 rc = of_irq_parse_one(dn, 0, out_irq);
31 if (!rc)
32 return rc;
33 }
34
35 /* Ok, we don't, time to have fun. Let's start by building up an
36 * interrupt spec. we assume #interrupt-cells is 1, which is standard
37 * for PCI. If you do different, then don't use that routine.
38 */
39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
40 if (rc != 0)
41 goto err;
42 /* No pin, exit with no error message. */
43 if (pin == 0)
44 return -ENODEV;
45
46 /* Now we walk up the PCI tree */
47 for (;;) {
48 /* Get the pci_dev of our parent */
49 ppdev = pdev->bus->self;
50
51 /* Ouch, it's a host bridge... */
52 if (ppdev == NULL) {
53 ppnode = pci_bus_to_OF_node(pdev->bus);
54
55 /* No node for host bridge ? give up */
56 if (ppnode == NULL) {
57 rc = -EINVAL;
58 goto err;
59 }
60 } else {
61 /* We found a P2P bridge, check if it has a node */
62 ppnode = pci_device_to_OF_node(ppdev);
63 }
64
65 /* Ok, we have found a parent with a device-node, hand over to
66 * the OF parsing code.
67 * We build a unit address from the linux device to be used for
68 * resolution. Note that we use the linux bus number which may
69 * not match your firmware bus numbering.
70 * Fortunately, in most cases, interrupt-map-mask doesn't
71 * include the bus number as part of the matching.
72 * You should still be careful about that though if you intend
73 * to rely on this function (you ship a firmware that doesn't
74 * create device nodes for all PCI devices).
75 */
76 if (ppnode)
77 break;
78
79 /* We can only get here if we hit a P2P bridge with no node,
80 * let's do standard swizzling and try again
81 */
82 pin = pci_swizzle_interrupt_pin(pdev, pin);
83 pdev = ppdev;
84 }
85
86 out_irq->np = ppnode;
87 out_irq->args_count = 1;
88 out_irq->args[0] = pin;
89 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
90 laddr[1] = laddr[2] = cpu_to_be32(0);
91 rc = of_irq_parse_raw(laddr, out_irq);
92 if (rc)
93 goto err;
94 return 0;
95err:
96 if (rc == -ENOENT) {
97 dev_warn(&pdev->dev,
98 "%s: no interrupt-map found, INTx interrupts not available\n",
99 __func__);
100 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
101 __func__);
102 } else {
103 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
104 }
105 return rc;
106}
107EXPORT_SYMBOL_GPL(of_irq_parse_pci);
108
109/**
110 * of_irq_parse_and_map_pci() - Decode a PCI irq from the device tree and map to a virq
111 * @dev: The pci device needing an irq
112 * @slot: PCI slot number; passed when used as map_irq callback. Unused
113 * @pin: PCI irq pin number; passed when used as map_irq callback. Unused
114 *
115 * @slot and @pin are unused, but included in the function so that this
116 * function can be used directly as the map_irq callback to
117 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
118 */
119int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
120{
121 struct of_phandle_args oirq;
122 int ret;
123
124 ret = of_irq_parse_pci(dev, &oirq);
125 if (ret)
126 return 0; /* Proper return code 0 == NO_IRQ */
127
128 return irq_create_of_mapping(&oirq);
129}
130EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
131
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 949aa74206cd..34b56a8f8480 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -126,6 +126,7 @@ config PCI_PASID
126 126
127config PCI_LABEL 127config PCI_LABEL
128 def_bool y if (DMI || ACPI) 128 def_bool y if (DMI || ACPI)
129 depends on PCI
129 select NLS 130 select NLS
130 131
131config PCI_HYPERV 132config PCI_HYPERV
@@ -136,6 +137,7 @@ config PCI_HYPERV
136 PCI devices from a PCI backend to support PCI driver domains. 137 PCI devices from a PCI backend to support PCI driver domains.
137 138
138source "drivers/pci/hotplug/Kconfig" 139source "drivers/pci/hotplug/Kconfig"
140source "drivers/pci/cadence/Kconfig"
139source "drivers/pci/dwc/Kconfig" 141source "drivers/pci/dwc/Kconfig"
140source "drivers/pci/host/Kconfig" 142source "drivers/pci/host/Kconfig"
141source "drivers/pci/endpoint/Kconfig" 143source "drivers/pci/endpoint/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index c7819b973df7..941970936840 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -3,12 +3,15 @@
3# Makefile for the PCI bus specific drivers. 3# Makefile for the PCI bus specific drivers.
4# 4#
5 5
6obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ 6obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o remove.o pci.o \
7 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 7 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
8 irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o 8 irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
9 9
10ifdef CONFIG_PCI
10obj-$(CONFIG_PROC_FS) += proc.o 11obj-$(CONFIG_PROC_FS) += proc.o
11obj-$(CONFIG_SYSFS) += slot.o 12obj-$(CONFIG_SYSFS) += slot.o
13obj-$(CONFIG_OF) += of.o
14endif
12 15
13obj-$(CONFIG_PCI_QUIRKS) += quirks.o 16obj-$(CONFIG_PCI_QUIRKS) += quirks.o
14 17
@@ -44,10 +47,15 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
44 47
45obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o 48obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
46 49
47obj-$(CONFIG_OF) += of.o
48
49ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG 50ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
50 51
51# PCI host controller drivers 52# PCI host controller drivers
52obj-y += host/ 53obj-y += host/
53obj-y += switch/ 54obj-y += switch/
55
56obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
57
58# Endpoint library must be initialized before its users
59obj-$(CONFIG_PCIE_CADENCE) += cadence/
60# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
61obj-y += dwc/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 5a64da3fb033..5e9a9822d9d4 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -334,8 +334,7 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
334 (tag == PCI_VPD_LTIN_RW_DATA)) { 334 (tag == PCI_VPD_LTIN_RW_DATA)) {
335 if (pci_read_vpd(dev, off+1, 2, 335 if (pci_read_vpd(dev, off+1, 2,
336 &header[1]) != 2) { 336 &header[1]) != 2) {
337 dev_warn(&dev->dev, 337 pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
338 "invalid large VPD tag %02x size at offset %zu",
339 tag, off + 1); 338 tag, off + 1);
340 return 0; 339 return 0;
341 } 340 }
@@ -355,8 +354,7 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
355 if ((tag != PCI_VPD_LTIN_ID_STRING) && 354 if ((tag != PCI_VPD_LTIN_ID_STRING) &&
356 (tag != PCI_VPD_LTIN_RO_DATA) && 355 (tag != PCI_VPD_LTIN_RO_DATA) &&
357 (tag != PCI_VPD_LTIN_RW_DATA)) { 356 (tag != PCI_VPD_LTIN_RW_DATA)) {
358 dev_warn(&dev->dev, 357 pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
359 "invalid %s VPD tag %02x at offset %zu",
360 (header[0] & PCI_VPD_LRDT) ? "large" : "short", 358 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
361 tag, off); 359 tag, off);
362 return 0; 360 return 0;
@@ -403,7 +401,7 @@ static int pci_vpd_wait(struct pci_dev *dev)
403 max_sleep *= 2; 401 max_sleep *= 2;
404 } 402 }
405 403
406 dev_warn(&dev->dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n"); 404 pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
407 return -ETIMEDOUT; 405 return -ETIMEDOUT;
408} 406}
409 407
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 30a4d33038bf..737d1c52f002 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -290,7 +290,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
290 res->end = end; 290 res->end = end;
291 res->flags &= ~IORESOURCE_UNSET; 291 res->flags &= ~IORESOURCE_UNSET;
292 orig_res.flags &= ~IORESOURCE_UNSET; 292 orig_res.flags &= ~IORESOURCE_UNSET;
293 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", 293 pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n",
294 &orig_res, res); 294 &orig_res, res);
295 295
296 return true; 296 return true;
@@ -326,7 +326,7 @@ void pci_bus_add_device(struct pci_dev *dev)
326 dev->match_driver = true; 326 dev->match_driver = true;
327 retval = device_attach(&dev->dev); 327 retval = device_attach(&dev->dev);
328 if (retval < 0 && retval != -EPROBE_DEFER) { 328 if (retval < 0 && retval != -EPROBE_DEFER) {
329 dev_warn(&dev->dev, "device attach failed (%d)\n", retval); 329 pci_warn(dev, "device attach failed (%d)\n", retval);
330 pci_proc_detach_device(dev); 330 pci_proc_detach_device(dev);
331 pci_remove_sysfs_dev_files(dev); 331 pci_remove_sysfs_dev_files(dev);
332 return; 332 return;
diff --git a/drivers/pci/cadence/Kconfig b/drivers/pci/cadence/Kconfig
new file mode 100644
index 000000000000..e6824cb56c16
--- /dev/null
+++ b/drivers/pci/cadence/Kconfig
@@ -0,0 +1,27 @@
1menu "Cadence PCIe controllers support"
2
3config PCIE_CADENCE
4 bool
5
6config PCIE_CADENCE_HOST
7 bool "Cadence PCIe host controller"
8 depends on OF
9 depends on PCI
10 select IRQ_DOMAIN
11 select PCIE_CADENCE
12 help
13 Say Y here if you want to support the Cadence PCIe controller in host
14 mode. This PCIe controller may be embedded into many different vendors
15 SoCs.
16
17config PCIE_CADENCE_EP
18 bool "Cadence PCIe endpoint controller"
19 depends on OF
20 depends on PCI_ENDPOINT
21 select PCIE_CADENCE
22 help
23 Say Y here if you want to support the Cadence PCIe controller in
24 endpoint mode. This PCIe controller may be embedded into many
25 different vendors SoCs.
26
27endmenu
diff --git a/drivers/pci/cadence/Makefile b/drivers/pci/cadence/Makefile
new file mode 100644
index 000000000000..719392b97998
--- /dev/null
+++ b/drivers/pci/cadence/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
3obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
4obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
diff --git a/drivers/pci/cadence/pcie-cadence-ep.c b/drivers/pci/cadence/pcie-cadence-ep.c
new file mode 100644
index 000000000000..3c3a97743453
--- /dev/null
+++ b/drivers/pci/cadence/pcie-cadence-ep.c
@@ -0,0 +1,542 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017 Cadence
3// Cadence PCIe endpoint controller driver.
4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6#include <linux/delay.h>
7#include <linux/kernel.h>
8#include <linux/of.h>
9#include <linux/pci-epc.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/sizes.h>
13
14#include "pcie-cadence.h"
15
16#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
17#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
18#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
19
20/**
21 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
22 * @pcie: Cadence PCIe controller
23 * @max_regions: maximum number of regions supported by hardware
24 * @ob_region_map: bitmask of mapped outbound regions
25 * @ob_addr: base addresses in the AXI bus where the outbound regions start
26 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
27 * dedicated outbound regions is mapped.
28 * @irq_cpu_addr: base address in the CPU space where a write access triggers
29 * the sending of a memory write (MSI) / normal message (legacy
30 * IRQ) TLP through the PCIe bus.
31 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
32 * dedicated outbound region.
33 * @irq_pci_fn: the latest PCI function that has updated the mapping of
34 * the MSI/legacy IRQ dedicated outbound region.
35 * @irq_pending: bitmask of asserted legacy IRQs.
36 */
37struct cdns_pcie_ep {
38 struct cdns_pcie pcie;
39 u32 max_regions;
40 unsigned long ob_region_map;
41 phys_addr_t *ob_addr;
42 phys_addr_t irq_phys_addr;
43 void __iomem *irq_cpu_addr;
44 u64 irq_pci_addr;
45 u8 irq_pci_fn;
46 u8 irq_pending;
47};
48
49static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
50 struct pci_epf_header *hdr)
51{
52 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
53 struct cdns_pcie *pcie = &ep->pcie;
54
55 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
56 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
57 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
58 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
59 hdr->subclass_code | hdr->baseclass_code << 8);
60 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
61 hdr->cache_line_size);
62 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
63 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
64
65 /*
66 * Vendor ID can only be modified from function 0, all other functions
67 * use the same vendor ID as function 0.
68 */
69 if (fn == 0) {
70 /* Update the vendor IDs. */
71 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
72 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
73
74 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
75 }
76
77 return 0;
78}
79
80static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
81 dma_addr_t bar_phys, size_t size, int flags)
82{
83 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
84 struct cdns_pcie *pcie = &ep->pcie;
85 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
86 u64 sz;
87
88 /* BAR size is 2^(aperture + 7) */
89 sz = max_t(size_t, size, CDNS_PCIE_EP_MIN_APERTURE);
90 /*
91 * roundup_pow_of_two() returns an unsigned long, which is not suited
92 * for 64bit values.
93 */
94 sz = 1ULL << fls64(sz - 1);
95 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
96
97 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
98 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
99 } else {
100 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
101 bool is_64bits = sz > SZ_2G;
102
103 if (is_64bits && (bar & 1))
104 return -EINVAL;
105
106 if (is_64bits && is_prefetch)
107 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
108 else if (is_prefetch)
109 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
110 else if (is_64bits)
111 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
112 else
113 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
114 }
115
116 addr0 = lower_32_bits(bar_phys);
117 addr1 = upper_32_bits(bar_phys);
118 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
119 addr0);
120 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
121 addr1);
122
123 if (bar < BAR_4) {
124 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
125 b = bar;
126 } else {
127 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
128 b = bar - BAR_4;
129 }
130
131 cfg = cdns_pcie_readl(pcie, reg);
132 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
133 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
134 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
135 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
136 cdns_pcie_writel(pcie, reg, cfg);
137
138 return 0;
139}
140
141static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
142 enum pci_barno bar)
143{
144 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
145 struct cdns_pcie *pcie = &ep->pcie;
146 u32 reg, cfg, b, ctrl;
147
148 if (bar < BAR_4) {
149 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
150 b = bar;
151 } else {
152 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
153 b = bar - BAR_4;
154 }
155
156 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
157 cfg = cdns_pcie_readl(pcie, reg);
158 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
159 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
160 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
161 cdns_pcie_writel(pcie, reg, cfg);
162
163 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
164 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
165}
166
167static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
168 u64 pci_addr, size_t size)
169{
170 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
171 struct cdns_pcie *pcie = &ep->pcie;
172 u32 r;
173
174 r = find_first_zero_bit(&ep->ob_region_map,
175 sizeof(ep->ob_region_map) * BITS_PER_LONG);
176 if (r >= ep->max_regions - 1) {
177 dev_err(&epc->dev, "no free outbound region\n");
178 return -EINVAL;
179 }
180
181 cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
182
183 set_bit(r, &ep->ob_region_map);
184 ep->ob_addr[r] = addr;
185
186 return 0;
187}
188
189static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
190 phys_addr_t addr)
191{
192 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
193 struct cdns_pcie *pcie = &ep->pcie;
194 u32 r;
195
196 for (r = 0; r < ep->max_regions - 1; r++)
197 if (ep->ob_addr[r] == addr)
198 break;
199
200 if (r == ep->max_regions - 1)
201 return;
202
203 cdns_pcie_reset_outbound_region(pcie, r);
204
205 ep->ob_addr[r] = 0;
206 clear_bit(r, &ep->ob_region_map);
207}
208
209static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
210{
211 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
212 struct cdns_pcie *pcie = &ep->pcie;
213 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
214 u16 flags;
215
216 /*
217 * Set the Multiple Message Capable bitfield into the Message Control
218 * register.
219 */
220 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
221 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
222 flags |= PCI_MSI_FLAGS_64BIT;
223 flags &= ~PCI_MSI_FLAGS_MASKBIT;
224 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
225
226 return 0;
227}
228
229static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
230{
231 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
232 struct cdns_pcie *pcie = &ep->pcie;
233 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
234 u16 flags, mmc, mme;
235
236 /* Validate that the MSI feature is actually enabled. */
237 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
238 if (!(flags & PCI_MSI_FLAGS_ENABLE))
239 return -EINVAL;
240
241 /*
242 * Get the Multiple Message Enable bitfield from the Message Control
243 * register.
244 */
245 mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
246 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
247
248 return mme;
249}
250
251static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
252 u8 intx, bool is_asserted)
253{
254 struct cdns_pcie *pcie = &ep->pcie;
255 u32 r = ep->max_regions - 1;
256 u32 offset;
257 u16 status;
258 u8 msg_code;
259
260 intx &= 3;
261
262 /* Set the outbound region if needed. */
263 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
264 ep->irq_pci_fn != fn)) {
265 /* Last region was reserved for IRQ writes. */
266 cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
267 ep->irq_phys_addr);
268 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
269 ep->irq_pci_fn = fn;
270 }
271
272 if (is_asserted) {
273 ep->irq_pending |= BIT(intx);
274 msg_code = MSG_CODE_ASSERT_INTA + intx;
275 } else {
276 ep->irq_pending &= ~BIT(intx);
277 msg_code = MSG_CODE_DEASSERT_INTA + intx;
278 }
279
280 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
281 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
282 status ^= PCI_STATUS_INTERRUPT;
283 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
284 }
285
286 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
287 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
288 CDNS_PCIE_MSG_NO_DATA;
289 writel(0, ep->irq_cpu_addr + offset);
290}
291
292static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
293{
294 u16 cmd;
295
296 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
297 if (cmd & PCI_COMMAND_INTX_DISABLE)
298 return -EINVAL;
299
300 cdns_pcie_ep_assert_intx(ep, fn, intx, true);
301 /*
302 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
303 * from drivers/pci/dwc/pci-dra7xx.c
304 */
305 mdelay(1);
306 cdns_pcie_ep_assert_intx(ep, fn, intx, false);
307 return 0;
308}
309
310static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
311 u8 interrupt_num)
312{
313 struct cdns_pcie *pcie = &ep->pcie;
314 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
315 u16 flags, mme, data, data_mask;
316 u8 msi_count;
317 u64 pci_addr, pci_addr_mask = 0xff;
318
319 /* Check whether the MSI feature has been enabled by the PCI host. */
320 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
321 if (!(flags & PCI_MSI_FLAGS_ENABLE))
322 return -EINVAL;
323
324 /* Get the number of enabled MSIs */
325 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
326 msi_count = 1 << mme;
327 if (!interrupt_num || interrupt_num > msi_count)
328 return -EINVAL;
329
330 /* Compute the data value to be written. */
331 data_mask = msi_count - 1;
332 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
333 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
334
335 /* Get the PCI address where to write the data into. */
336 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
337 pci_addr <<= 32;
338 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
339 pci_addr &= GENMASK_ULL(63, 2);
340
341 /* Set the outbound region if needed. */
342 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
343 ep->irq_pci_fn != fn)) {
344 /* Last region was reserved for IRQ writes. */
345 cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
346 false,
347 ep->irq_phys_addr,
348 pci_addr & ~pci_addr_mask,
349 pci_addr_mask + 1);
350 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
351 ep->irq_pci_fn = fn;
352 }
353 writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
354
355 return 0;
356}
357
358static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
359 enum pci_epc_irq_type type, u8 interrupt_num)
360{
361 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
362
363 switch (type) {
364 case PCI_EPC_IRQ_LEGACY:
365 return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
366
367 case PCI_EPC_IRQ_MSI:
368 return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
369
370 default:
371 break;
372 }
373
374 return -EINVAL;
375}
376
377static int cdns_pcie_ep_start(struct pci_epc *epc)
378{
379 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
380 struct cdns_pcie *pcie = &ep->pcie;
381 struct pci_epf *epf;
382 u32 cfg;
383
384 /*
385 * BIT(0) is hardwired to 1, hence function 0 is always enabled
386 * and can't be disabled anyway.
387 */
388 cfg = BIT(0);
389 list_for_each_entry(epf, &epc->pci_epf, list)
390 cfg |= BIT(epf->func_no);
391 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
392
393 /*
394 * The PCIe links are automatically established by the controller
395 * once for all at powerup: the software can neither start nor stop
396 * those links later at runtime.
397 *
398 * Then we only have to notify the EP core that our links are already
399 * established. However we don't call directly pci_epc_linkup() because
400 * we've already locked the epc->lock.
401 */
402 list_for_each_entry(epf, &epc->pci_epf, list)
403 pci_epf_linkup(epf);
404
405 return 0;
406}
407
408static const struct pci_epc_ops cdns_pcie_epc_ops = {
409 .write_header = cdns_pcie_ep_write_header,
410 .set_bar = cdns_pcie_ep_set_bar,
411 .clear_bar = cdns_pcie_ep_clear_bar,
412 .map_addr = cdns_pcie_ep_map_addr,
413 .unmap_addr = cdns_pcie_ep_unmap_addr,
414 .set_msi = cdns_pcie_ep_set_msi,
415 .get_msi = cdns_pcie_ep_get_msi,
416 .raise_irq = cdns_pcie_ep_raise_irq,
417 .start = cdns_pcie_ep_start,
418};
419
420static const struct of_device_id cdns_pcie_ep_of_match[] = {
421 { .compatible = "cdns,cdns-pcie-ep" },
422
423 { },
424};
425
426static int cdns_pcie_ep_probe(struct platform_device *pdev)
427{
428 struct device *dev = &pdev->dev;
429 struct device_node *np = dev->of_node;
430 struct cdns_pcie_ep *ep;
431 struct cdns_pcie *pcie;
432 struct pci_epc *epc;
433 struct resource *res;
434 int ret;
435
436 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
437 if (!ep)
438 return -ENOMEM;
439
440 pcie = &ep->pcie;
441 pcie->is_rc = false;
442
443 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
444 pcie->reg_base = devm_ioremap_resource(dev, res);
445 if (IS_ERR(pcie->reg_base)) {
446 dev_err(dev, "missing \"reg\"\n");
447 return PTR_ERR(pcie->reg_base);
448 }
449
450 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
451 if (!res) {
452 dev_err(dev, "missing \"mem\"\n");
453 return -EINVAL;
454 }
455 pcie->mem_res = res;
456
457 ret = of_property_read_u32(np, "cdns,max-outbound-regions",
458 &ep->max_regions);
459 if (ret < 0) {
460 dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
461 return ret;
462 }
463 ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr),
464 GFP_KERNEL);
465 if (!ep->ob_addr)
466 return -ENOMEM;
467
468 pm_runtime_enable(dev);
469 ret = pm_runtime_get_sync(dev);
470 if (ret < 0) {
471 dev_err(dev, "pm_runtime_get_sync() failed\n");
472 goto err_get_sync;
473 }
474
475 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
476 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
477
478 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
479 if (IS_ERR(epc)) {
480 dev_err(dev, "failed to create epc device\n");
481 ret = PTR_ERR(epc);
482 goto err_init;
483 }
484
485 epc_set_drvdata(epc, ep);
486
487 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
488 epc->max_functions = 1;
489
490 ret = pci_epc_mem_init(epc, pcie->mem_res->start,
491 resource_size(pcie->mem_res));
492 if (ret < 0) {
493 dev_err(dev, "failed to initialize the memory space\n");
494 goto err_init;
495 }
496
497 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
498 SZ_128K);
499 if (!ep->irq_cpu_addr) {
500 dev_err(dev, "failed to reserve memory space for MSI\n");
501 ret = -ENOMEM;
502 goto free_epc_mem;
503 }
504 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
505
506 return 0;
507
508 free_epc_mem:
509 pci_epc_mem_exit(epc);
510
511 err_init:
512 pm_runtime_put_sync(dev);
513
514 err_get_sync:
515 pm_runtime_disable(dev);
516
517 return ret;
518}
519
520static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
521{
522 struct device *dev = &pdev->dev;
523 int ret;
524
525 ret = pm_runtime_put_sync(dev);
526 if (ret < 0)
527 dev_dbg(dev, "pm_runtime_put_sync failed\n");
528
529 pm_runtime_disable(dev);
530
531 /* The PCIe controller can't be disabled. */
532}
533
534static struct platform_driver cdns_pcie_ep_driver = {
535 .driver = {
536 .name = "cdns-pcie-ep",
537 .of_match_table = cdns_pcie_ep_of_match,
538 },
539 .probe = cdns_pcie_ep_probe,
540 .shutdown = cdns_pcie_ep_shutdown,
541};
542builtin_platform_driver(cdns_pcie_ep_driver);
diff --git a/drivers/pci/cadence/pcie-cadence-host.c b/drivers/pci/cadence/pcie-cadence-host.c
new file mode 100644
index 000000000000..a4ebbd37b553
--- /dev/null
+++ b/drivers/pci/cadence/pcie-cadence-host.c
@@ -0,0 +1,336 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017 Cadence
3// Cadence PCIe host controller driver.
4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6#include <linux/kernel.h>
7#include <linux/of_address.h>
8#include <linux/of_pci.h>
9#include <linux/platform_device.h>
10#include <linux/pm_runtime.h>
11
12#include "pcie-cadence.h"
13
14/**
15 * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
16 * @pcie: Cadence PCIe controller
17 * @dev: pointer to PCIe device
18 * @cfg_res: start/end offsets in the physical system memory to map PCI
19 * configuration space accesses
20 * @bus_range: first/last buses behind the PCIe host controller
21 * @cfg_base: IO mapped window to access the PCI configuration space of a
22 * single function at a time
23 * @max_regions: maximum number of regions supported by the hardware
24 * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
25 * translation (nbits sets into the "no BAR match" register)
26 * @vendor_id: PCI vendor ID
27 * @device_id: PCI device ID
28 */
29struct cdns_pcie_rc {
30 struct cdns_pcie pcie;
31 struct device *dev;
32 struct resource *cfg_res;
33 struct resource *bus_range;
34 void __iomem *cfg_base;
35 u32 max_regions;
36 u32 no_bar_nbits;
37 u16 vendor_id;
38 u16 device_id;
39};
40
41static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
42 int where)
43{
44 struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
45 struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
46 struct cdns_pcie *pcie = &rc->pcie;
47 unsigned int busn = bus->number;
48 u32 addr0, desc0;
49
50 if (busn == rc->bus_range->start) {
51 /*
52 * Only the root port (devfn == 0) is connected to this bus.
53 * All other PCI devices are behind some bridge hence on another
54 * bus.
55 */
56 if (devfn)
57 return NULL;
58
59 return pcie->reg_base + (where & 0xfff);
60 }
61
62 /* Update Output registers for AXI region 0. */
63 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
64 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
65 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
66 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
67
68 /* Configuration Type 0 or Type 1 access. */
69 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
70 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
71 /*
72 * The bus number was already set once for all in desc1 by
73 * cdns_pcie_host_init_address_translation().
74 */
75 if (busn == rc->bus_range->start + 1)
76 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
77 else
78 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
79 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
80
81 return rc->cfg_base + (where & 0xfff);
82}
83
84static struct pci_ops cdns_pcie_host_ops = {
85 .map_bus = cdns_pci_map_bus,
86 .read = pci_generic_config_read,
87 .write = pci_generic_config_write,
88};
89
90static const struct of_device_id cdns_pcie_host_of_match[] = {
91 { .compatible = "cdns,cdns-pcie-host" },
92
93 { },
94};
95
96static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
97{
98 struct cdns_pcie *pcie = &rc->pcie;
99 u32 value, ctrl;
100
101 /*
102 * Set the root complex BAR configuration register:
103 * - disable both BAR0 and BAR1.
104 * - enable Prefetchable Memory Base and Limit registers in type 1
105 * config space (64 bits).
106 * - enable IO Base and Limit registers in type 1 config
107 * space (32 bits).
108 */
109 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
110 value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
111 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
112 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
113 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
114 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
115 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
116 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
117
118 /* Set root port configuration space */
119 if (rc->vendor_id != 0xffff)
120 cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
121 if (rc->device_id != 0xffff)
122 cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
123
124 cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
125 cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
126 cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
127
128 return 0;
129}
130
131static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
132{
133 struct cdns_pcie *pcie = &rc->pcie;
134 struct resource *cfg_res = rc->cfg_res;
135 struct resource *mem_res = pcie->mem_res;
136 struct resource *bus_range = rc->bus_range;
137 struct device *dev = rc->dev;
138 struct device_node *np = dev->of_node;
139 struct of_pci_range_parser parser;
140 struct of_pci_range range;
141 u32 addr0, addr1, desc1;
142 u64 cpu_addr;
143 int r, err;
144
145 /*
146 * Reserve region 0 for PCI configure space accesses:
147 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
148 * cdns_pci_map_bus(), other region registers are set here once for all.
149 */
150 addr1 = 0; /* Should be programmed to zero. */
151 desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start);
152 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
153 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
154
155 cpu_addr = cfg_res->start - mem_res->start;
156 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
157 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
158 addr1 = upper_32_bits(cpu_addr);
159 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
160 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
161
162 err = of_pci_range_parser_init(&parser, np);
163 if (err)
164 return err;
165
166 r = 1;
167 for_each_of_pci_range(&parser, &range) {
168 bool is_io;
169
170 if (r >= rc->max_regions)
171 break;
172
173 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
174 is_io = false;
175 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
176 is_io = true;
177 else
178 continue;
179
180 cdns_pcie_set_outbound_region(pcie, 0, r, is_io,
181 range.cpu_addr,
182 range.pci_addr,
183 range.size);
184 r++;
185 }
186
187 /*
188 * Set Root Port no BAR match Inbound Translation registers:
189 * needed for MSI and DMA.
190 * Root Port BAR0 and BAR1 are disabled, hence no need to set their
191 * inbound translation registers.
192 */
193 addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
194 addr1 = 0;
195 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
196 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
197
198 return 0;
199}
200
201static int cdns_pcie_host_init(struct device *dev,
202 struct list_head *resources,
203 struct cdns_pcie_rc *rc)
204{
205 struct resource *bus_range = NULL;
206 int err;
207
208 /* Parse our PCI ranges and request their resources */
209 err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
210 if (err)
211 return err;
212
213 rc->bus_range = bus_range;
214 rc->pcie.bus = bus_range->start;
215
216 err = cdns_pcie_host_init_root_port(rc);
217 if (err)
218 goto err_out;
219
220 err = cdns_pcie_host_init_address_translation(rc);
221 if (err)
222 goto err_out;
223
224 return 0;
225
226 err_out:
227 pci_free_resource_list(resources);
228 return err;
229}
230
231static int cdns_pcie_host_probe(struct platform_device *pdev)
232{
233 const char *type;
234 struct device *dev = &pdev->dev;
235 struct device_node *np = dev->of_node;
236 struct pci_host_bridge *bridge;
237 struct list_head resources;
238 struct cdns_pcie_rc *rc;
239 struct cdns_pcie *pcie;
240 struct resource *res;
241 int ret;
242
243 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
244 if (!bridge)
245 return -ENOMEM;
246
247 rc = pci_host_bridge_priv(bridge);
248 rc->dev = dev;
249
250 pcie = &rc->pcie;
251 pcie->is_rc = true;
252
253 rc->max_regions = 32;
254 of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
255
256 rc->no_bar_nbits = 32;
257 of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
258
259 rc->vendor_id = 0xffff;
260 of_property_read_u16(np, "vendor-id", &rc->vendor_id);
261
262 rc->device_id = 0xffff;
263 of_property_read_u16(np, "device-id", &rc->device_id);
264
265 type = of_get_property(np, "device_type", NULL);
266 if (!type || strcmp(type, "pci")) {
267 dev_err(dev, "invalid \"device_type\" %s\n", type);
268 return -EINVAL;
269 }
270
271 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
272 pcie->reg_base = devm_ioremap_resource(dev, res);
273 if (IS_ERR(pcie->reg_base)) {
274 dev_err(dev, "missing \"reg\"\n");
275 return PTR_ERR(pcie->reg_base);
276 }
277
278 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
279 rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
280 if (IS_ERR(rc->cfg_base)) {
281 dev_err(dev, "missing \"cfg\"\n");
282 return PTR_ERR(rc->cfg_base);
283 }
284 rc->cfg_res = res;
285
286 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
287 if (!res) {
288 dev_err(dev, "missing \"mem\"\n");
289 return -EINVAL;
290 }
291 pcie->mem_res = res;
292
293 pm_runtime_enable(dev);
294 ret = pm_runtime_get_sync(dev);
295 if (ret < 0) {
296 dev_err(dev, "pm_runtime_get_sync() failed\n");
297 goto err_get_sync;
298 }
299
300 ret = cdns_pcie_host_init(dev, &resources, rc);
301 if (ret)
302 goto err_init;
303
304 list_splice_init(&resources, &bridge->windows);
305 bridge->dev.parent = dev;
306 bridge->busnr = pcie->bus;
307 bridge->ops = &cdns_pcie_host_ops;
308 bridge->map_irq = of_irq_parse_and_map_pci;
309 bridge->swizzle_irq = pci_common_swizzle;
310
311 ret = pci_host_probe(bridge);
312 if (ret < 0)
313 goto err_host_probe;
314
315 return 0;
316
317 err_host_probe:
318 pci_free_resource_list(&resources);
319
320 err_init:
321 pm_runtime_put_sync(dev);
322
323 err_get_sync:
324 pm_runtime_disable(dev);
325
326 return ret;
327}
328
329static struct platform_driver cdns_pcie_host_driver = {
330 .driver = {
331 .name = "cdns-pcie-host",
332 .of_match_table = cdns_pcie_host_of_match,
333 },
334 .probe = cdns_pcie_host_probe,
335};
336builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/cadence/pcie-cadence.c b/drivers/pci/cadence/pcie-cadence.c
new file mode 100644
index 000000000000..138d113eb45d
--- /dev/null
+++ b/drivers/pci/cadence/pcie-cadence.c
@@ -0,0 +1,126 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017 Cadence
3// Cadence PCIe controller driver.
4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6#include <linux/kernel.h>
7
8#include "pcie-cadence.h"
9
10void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
11 u32 r, bool is_io,
12 u64 cpu_addr, u64 pci_addr, size_t size)
13{
14 /*
15 * roundup_pow_of_two() returns an unsigned long, which is not suited
16 * for 64bit values.
17 */
18 u64 sz = 1ULL << fls64(size - 1);
19 int nbits = ilog2(sz);
20 u32 addr0, addr1, desc0, desc1;
21
22 if (nbits < 8)
23 nbits = 8;
24
25 /* Set the PCI address */
26 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
27 (lower_32_bits(pci_addr) & GENMASK(31, 8));
28 addr1 = upper_32_bits(pci_addr);
29
30 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
31 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
32
33 /* Set the PCIe header descriptor */
34 if (is_io)
35 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
36 else
37 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
38 desc1 = 0;
39
40 /*
41 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
42 * PCIe descriptor, the PCI function number must be set into
43 * Bits [26:24] of DESC0 anyway.
44 *
45 * In Root Complex mode, the function number is always 0 but in Endpoint
46 * mode, the PCIe controller may support more than one function. This
47 * function number needs to be set properly into the outbound PCIe
48 * descriptor.
49 *
50 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
51 * then the driver must provide the bus, resp. device, number in
52 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
53 * number, the device number is always 0 in Root Complex mode.
54 *
55 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
56 * the PCIe controller will use the captured values for the bus and
57 * device numbers.
58 */
59 if (pcie->is_rc) {
60 /* The device and function numbers are always 0. */
61 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
62 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
63 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
64 } else {
65 /*
66 * Use captured values for bus and device numbers but still
67 * need to set the function number.
68 */
69 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
70 }
71
72 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
73 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
74
75 /* Set the CPU address */
76 cpu_addr -= pcie->mem_res->start;
77 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
78 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
79 addr1 = upper_32_bits(cpu_addr);
80
81 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
82 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
83}
84
85void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
86 u32 r, u64 cpu_addr)
87{
88 u32 addr0, addr1, desc0, desc1;
89
90 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
91 desc1 = 0;
92
93 /* See cdns_pcie_set_outbound_region() comments above. */
94 if (pcie->is_rc) {
95 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
96 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
97 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
98 } else {
99 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
100 }
101
102 /* Set the CPU address */
103 cpu_addr -= pcie->mem_res->start;
104 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
105 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
106 addr1 = upper_32_bits(cpu_addr);
107
108 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
109 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
110 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
111 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
112 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
113 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
114}
115
116void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
117{
118 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
119 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
120
121 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
122 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
123
124 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
125 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
126}
diff --git a/drivers/pci/cadence/pcie-cadence.h b/drivers/pci/cadence/pcie-cadence.h
new file mode 100644
index 000000000000..4bb27333b05c
--- /dev/null
+++ b/drivers/pci/cadence/pcie-cadence.h
@@ -0,0 +1,311 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017 Cadence
3// Cadence PCIe controller driver.
4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6#ifndef _PCIE_CADENCE_H
7#define _PCIE_CADENCE_H
8
9#include <linux/kernel.h>
10#include <linux/pci.h>
11
12/*
13 * Local Management Registers
14 */
15#define CDNS_PCIE_LM_BASE 0x00100000
16
17/* Vendor ID Register */
18#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
19#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
20#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
21#define CDNS_PCIE_LM_ID_VENDOR(vid) \
22 (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
23#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
24#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
25#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
26 (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
27
28/* Root Port Requestor ID Register */
29#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
30#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
31#define CDNS_PCIE_LM_RP_RID_SHIFT 0
32#define CDNS_PCIE_LM_RP_RID_(rid) \
33 (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
34
35/* Endpoint Bus and Device Number Register */
36#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
37#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
38#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
39#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
40#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
41
42/* Endpoint Function f BAR b Configuration Registers */
43#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
44 (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
45#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
46 (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
47#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
48 (GENMASK(4, 0) << ((b) * 8))
49#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
50 (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
51#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
52 (GENMASK(7, 5) << ((b) * 8))
53#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
54 (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
55
56/* Endpoint Function Configuration Register */
57#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
58
59/* Root Complex BAR Configuration Register */
60#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
61#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
62#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
63 (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
64#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
65#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
66 (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
67#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
68#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
69 (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
70#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
71#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
72 (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
73#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
74#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
75#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
76#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
77#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
78#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
79#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
80
81/* BAR control values applicable to both Endpoint Function and Root Complex */
82#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
83#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
84#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
85#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
86#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
87#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
88
89
90/*
91 * Endpoint Function Registers (PCI configuration space for endpoint functions)
92 */
93#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
94
95#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
96
97/*
98 * Root Port Registers (PCI configuration space for the root port function)
99 */
100#define CDNS_PCIE_RP_BASE 0x00200000
101
102
103/*
104 * Address Translation Registers
105 */
106#define CDNS_PCIE_AT_BASE 0x00400000
107
108/* Region r Outbound AXI to PCIe Address Translation Register 0 */
109#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
110 (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
111#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
112#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
113 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
114#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
115#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
116 (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
117#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
118#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
119 (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
120
121/* Region r Outbound AXI to PCIe Address Translation Register 1 */
122#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
123 (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
124
125/* Region r Outbound PCIe Descriptor Register 0 */
126#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
127 (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
128#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
129#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
130#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
131#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
132#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
133#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
134#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
135/* Bit 23 MUST be set in RC mode. */
136#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
137#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
138#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
139 (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
140
141/* Region r Outbound PCIe Descriptor Register 1 */
142#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
143 (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
144#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
145#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
146 ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
147
148/* Region r AXI Region Base Address Register 0 */
149#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
150 (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
151#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
152#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
153 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
154
155/* Region r AXI Region Base Address Register 1 */
156#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
157 (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
158
159/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
160#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
161 (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
162#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
163#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
164 (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
165#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
166 (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
167
168enum cdns_pcie_rp_bar {
169 RP_BAR0,
170 RP_BAR1,
171 RP_NO_BAR
172};
173
174/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
175#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
176 (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
177#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
178 (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
179
180/* Normal/Vendor specific message access: offset inside some outbound region */
181#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
182#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
183 (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
184#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
185#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
186 (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
187#define CDNS_PCIE_MSG_NO_DATA BIT(16)
188
189enum cdns_pcie_msg_code {
190 MSG_CODE_ASSERT_INTA = 0x20,
191 MSG_CODE_ASSERT_INTB = 0x21,
192 MSG_CODE_ASSERT_INTC = 0x22,
193 MSG_CODE_ASSERT_INTD = 0x23,
194 MSG_CODE_DEASSERT_INTA = 0x24,
195 MSG_CODE_DEASSERT_INTB = 0x25,
196 MSG_CODE_DEASSERT_INTC = 0x26,
197 MSG_CODE_DEASSERT_INTD = 0x27,
198};
199
200enum cdns_pcie_msg_routing {
201 /* Route to Root Complex */
202 MSG_ROUTING_TO_RC,
203
204 /* Use Address Routing */
205 MSG_ROUTING_BY_ADDR,
206
207 /* Use ID Routing */
208 MSG_ROUTING_BY_ID,
209
210 /* Route as Broadcast Message from Root Complex */
211 MSG_ROUTING_BCAST,
212
213 /* Local message; terminate at receiver (INTx messages) */
214 MSG_ROUTING_LOCAL,
215
216 /* Gather & route to Root Complex (PME_TO_Ack message) */
217 MSG_ROUTING_GATHER,
218};
219
220/**
221 * struct cdns_pcie - private data for Cadence PCIe controller drivers
222 * @reg_base: IO mapped register base
223 * @mem_res: start/end offsets in the physical system memory to map PCI accesses
224 * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
225 * @bus: In Root Complex mode, the bus number
226 */
227struct cdns_pcie {
228 void __iomem *reg_base;
229 struct resource *mem_res;
230 bool is_rc;
231 u8 bus;
232};
233
234/* Register access */
235static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
236{
237 writeb(value, pcie->reg_base + reg);
238}
239
240static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value)
241{
242 writew(value, pcie->reg_base + reg);
243}
244
245static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
246{
247 writel(value, pcie->reg_base + reg);
248}
249
250static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
251{
252 return readl(pcie->reg_base + reg);
253}
254
255/* Root Port register access */
256static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
257 u32 reg, u8 value)
258{
259 writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
260}
261
262static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
263 u32 reg, u16 value)
264{
265 writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
266}
267
268/* Endpoint Function register access */
269static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
270 u32 reg, u8 value)
271{
272 writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
273}
274
275static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
276 u32 reg, u16 value)
277{
278 writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
279}
280
281static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
282 u32 reg, u16 value)
283{
284 writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
285}
286
287static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg)
288{
289 return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
290}
291
292static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
293{
294 return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
295}
296
297static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
298{
299 return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
300}
301
302void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
303 u32 r, bool is_io,
304 u64 cpu_addr, u64 pci_addr, size_t size);
305
306void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
307 u32 r, u64 cpu_addr);
308
309void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
310
311#endif /* _PCIE_CADENCE_H */
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index fe1cb56b71f1..0f666b1ce289 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -17,39 +17,38 @@ config PCIE_DW_EP
17 select PCIE_DW 17 select PCIE_DW
18 18
19config PCI_DRA7XX 19config PCI_DRA7XX
20 bool "TI DRA7xx PCIe controller" 20 bool
21 depends on SOC_DRA7XX || COMPILE_TEST
22 depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
23 depends on OF && HAS_IOMEM && TI_PIPE3
24 help
25 Enables support for the PCIe controller in the DRA7xx SoC. There
26 are two instances of PCIe controller in DRA7xx. This controller can
27 work either as EP or RC. In order to enable host-specific features
28 PCI_DRA7XX_HOST must be selected and in order to enable device-
29 specific features PCI_DRA7XX_EP must be selected. This uses
30 the DesignWare core.
31
32if PCI_DRA7XX
33 21
34config PCI_DRA7XX_HOST 22config PCI_DRA7XX_HOST
35 bool "PCI DRA7xx Host Mode" 23 bool "TI DRA7xx PCIe controller Host Mode"
36 depends on PCI 24 depends on SOC_DRA7XX || COMPILE_TEST
37 depends on PCI_MSI_IRQ_DOMAIN 25 depends on PCI && PCI_MSI_IRQ_DOMAIN
26 depends on OF && HAS_IOMEM && TI_PIPE3
38 select PCIE_DW_HOST 27 select PCIE_DW_HOST
28 select PCI_DRA7XX
39 default y 29 default y
40 help 30 help
41 Enables support for the PCIe controller in the DRA7xx SoC to work in 31 Enables support for the PCIe controller in the DRA7xx SoC to work in
42 host mode. 32 host mode. There are two instances of PCIe controller in DRA7xx.
33 This controller can work either as EP or RC. In order to enable
34 host-specific features PCI_DRA7XX_HOST must be selected and in order
35 to enable device-specific features PCI_DRA7XX_EP must be selected.
36 This uses the DesignWare core.
43 37
44config PCI_DRA7XX_EP 38config PCI_DRA7XX_EP
45 bool "PCI DRA7xx Endpoint Mode" 39 bool "TI DRA7xx PCIe controller Endpoint Mode"
40 depends on SOC_DRA7XX || COMPILE_TEST
46 depends on PCI_ENDPOINT 41 depends on PCI_ENDPOINT
42 depends on OF && HAS_IOMEM && TI_PIPE3
47 select PCIE_DW_EP 43 select PCIE_DW_EP
44 select PCI_DRA7XX
48 help 45 help
49 Enables support for the PCIe controller in the DRA7xx SoC to work in 46 Enables support for the PCIe controller in the DRA7xx SoC to work in
50 endpoint mode. 47 endpoint mode. There are two instances of PCIe controller in DRA7xx.
51 48 This controller can work either as EP or RC. In order to enable
52endif 49 host-specific features PCI_DRA7XX_HOST must be selected and in order
50 to enable device-specific features PCI_DRA7XX_EP must be selected.
51 This uses the DesignWare core.
53 52
54config PCIE_DW_PLAT 53config PCIE_DW_PLAT
55 bool "Platform bus based DesignWare PCIe Controller" 54 bool "Platform bus based DesignWare PCIe Controller"
@@ -151,15 +150,28 @@ config PCIE_ARMADA_8K
151 DesignWare core functions to implement the driver. 150 DesignWare core functions to implement the driver.
152 151
153config PCIE_ARTPEC6 152config PCIE_ARTPEC6
154 bool "Axis ARTPEC-6 PCIe controller" 153 bool
155 depends on PCI 154
155config PCIE_ARTPEC6_HOST
156 bool "Axis ARTPEC-6 PCIe controller Host Mode"
156 depends on MACH_ARTPEC6 157 depends on MACH_ARTPEC6
157 depends on PCI_MSI_IRQ_DOMAIN 158 depends on PCI && PCI_MSI_IRQ_DOMAIN
158 select PCIEPORTBUS 159 select PCIEPORTBUS
159 select PCIE_DW_HOST 160 select PCIE_DW_HOST
161 select PCIE_ARTPEC6
162 help
163 Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
164 host mode. This uses the DesignWare core.
165
166config PCIE_ARTPEC6_EP
167 bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
168 depends on MACH_ARTPEC6
169 depends on PCI_ENDPOINT
170 select PCIE_DW_EP
171 select PCIE_ARTPEC6
160 help 172 help
161 Say Y here to enable PCIe controller support on Axis ARTPEC-6 173 Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
162 SoCs. This PCIe controller uses the DesignWare core. 174 endpoint mode. This uses the DesignWare core.
163 175
164config PCIE_KIRIN 176config PCIE_KIRIN
165 depends on OF && ARM64 177 depends on OF && ARM64
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index 41ba499c96ee..5d2ce72c7a52 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -3,9 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
3obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o 3obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
4obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o 4obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
5obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o 5obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
6ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),) 6obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
7 obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
8endif
9obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o 7obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
10obj-$(CONFIG_PCI_IMX6) += pci-imx6.o 8obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
11obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o 9obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
@@ -27,4 +25,6 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
27# ARM64 and use internal ifdefs to only build the pieces we need 25# ARM64 and use internal ifdefs to only build the pieces we need
28# depending on whether ACPI, the DT driver, or both are enabled. 26# depending on whether ACPI, the DT driver, or both are enabled.
29 27
28ifdef CONFIG_PCI
30obj-$(CONFIG_ARM64) += pcie-hisi.o 29obj-$(CONFIG_ARM64) += pcie-hisi.o
30endif
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 47855f7c5a6f..ed8558d638e5 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -107,7 +107,7 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
107 writel(value, pcie->base + offset); 107 writel(value, pcie->base + offset);
108} 108}
109 109
110static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr) 110static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
111{ 111{
112 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 112 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
113} 113}
@@ -223,6 +223,7 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
223 223
224static const struct irq_domain_ops intx_domain_ops = { 224static const struct irq_domain_ops intx_domain_ops = {
225 .map = dra7xx_pcie_intx_map, 225 .map = dra7xx_pcie_intx_map,
226 .xlate = pci_irqd_intx_xlate,
226}; 227};
227 228
228static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 229static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
@@ -253,7 +254,8 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
253 struct dra7xx_pcie *dra7xx = arg; 254 struct dra7xx_pcie *dra7xx = arg;
254 struct dw_pcie *pci = dra7xx->pci; 255 struct dw_pcie *pci = dra7xx->pci;
255 struct pcie_port *pp = &pci->pp; 256 struct pcie_port *pp = &pci->pp;
256 u32 reg; 257 unsigned long reg;
258 u32 virq, bit;
257 259
258 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 260 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
259 261
@@ -265,8 +267,11 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
265 case INTB: 267 case INTB:
266 case INTC: 268 case INTC:
267 case INTD: 269 case INTD:
268 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain, 270 for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
269 ffs(reg))); 271 virq = irq_find_mapping(dra7xx->irq_domain, bit);
272 if (virq)
273 generic_handle_irq(virq);
274 }
270 break; 275 break;
271 } 276 }
272 277
@@ -334,15 +339,6 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
334 return IRQ_HANDLED; 339 return IRQ_HANDLED;
335} 340}
336 341
337static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
338{
339 u32 reg;
340
341 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
342 dw_pcie_writel_dbi2(pci, reg, 0x0);
343 dw_pcie_writel_dbi(pci, reg, 0x0);
344}
345
346static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 342static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
347{ 343{
348 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 344 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -372,7 +368,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
372 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 368 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
373} 369}
374 370
375static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, 371static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
376 enum pci_epc_irq_type type, u8 interrupt_num) 372 enum pci_epc_irq_type type, u8 interrupt_num)
377{ 373{
378 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 374 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -467,6 +463,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
467 if (!pci->dbi_base) 463 if (!pci->dbi_base)
468 return -ENOMEM; 464 return -ENOMEM;
469 465
466 pp->ops = &dra7xx_pcie_host_ops;
467
470 ret = dw_pcie_host_init(pp); 468 ret = dw_pcie_host_init(pp);
471 if (ret) { 469 if (ret) {
472 dev_err(dev, "failed to initialize host\n"); 470 dev_err(dev, "failed to initialize host\n");
@@ -596,7 +594,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
596 void __iomem *base; 594 void __iomem *base;
597 struct resource *res; 595 struct resource *res;
598 struct dw_pcie *pci; 596 struct dw_pcie *pci;
599 struct pcie_port *pp;
600 struct dra7xx_pcie *dra7xx; 597 struct dra7xx_pcie *dra7xx;
601 struct device *dev = &pdev->dev; 598 struct device *dev = &pdev->dev;
602 struct device_node *np = dev->of_node; 599 struct device_node *np = dev->of_node;
@@ -624,9 +621,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
624 pci->dev = dev; 621 pci->dev = dev;
625 pci->ops = &dw_pcie_ops; 622 pci->ops = &dw_pcie_ops;
626 623
627 pp = &pci->pp;
628 pp->ops = &dra7xx_pcie_host_ops;
629
630 irq = platform_get_irq(pdev, 0); 624 irq = platform_get_irq(pdev, 0);
631 if (irq < 0) { 625 if (irq < 0) {
632 dev_err(dev, "missing IRQ resource: %d\n", irq); 626 dev_err(dev, "missing IRQ resource: %d\n", irq);
@@ -702,6 +696,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
702 696
703 switch (mode) { 697 switch (mode) {
704 case DW_PCIE_RC_TYPE: 698 case DW_PCIE_RC_TYPE:
699 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
700 ret = -ENODEV;
701 goto err_gpio;
702 }
703
705 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 704 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
706 DEVICE_TYPE_RC); 705 DEVICE_TYPE_RC);
707 ret = dra7xx_add_pcie_port(dra7xx, pdev); 706 ret = dra7xx_add_pcie_port(dra7xx, pdev);
@@ -709,6 +708,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
709 goto err_gpio; 708 goto err_gpio;
710 break; 709 break;
711 case DW_PCIE_EP_TYPE: 710 case DW_PCIE_EP_TYPE:
711 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
712 ret = -ENODEV;
713 goto err_gpio;
714 }
715
712 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 716 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
713 DEVICE_TYPE_EP); 717 DEVICE_TYPE_EP);
714 718
@@ -807,7 +811,7 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
807} 811}
808#endif 812#endif
809 813
810void dra7xx_pcie_shutdown(struct platform_device *pdev) 814static void dra7xx_pcie_shutdown(struct platform_device *pdev)
811{ 815{
812 struct device *dev = &pdev->dev; 816 struct device *dev = &pdev->dev;
813 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 817 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 328cc9f53865..ca6278113936 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -52,49 +52,8 @@
52#define PCIE_ELBI_SLV_ARMISC 0x120 52#define PCIE_ELBI_SLV_ARMISC 0x120
53#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) 53#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
54 54
55/* PCIe Purple registers */
56#define PCIE_PHY_GLOBAL_RESET 0x000
57#define PCIE_PHY_COMMON_RESET 0x004
58#define PCIE_PHY_CMN_REG 0x008
59#define PCIE_PHY_MAC_RESET 0x00c
60#define PCIE_PHY_PLL_LOCKED 0x010
61#define PCIE_PHY_TRSVREG_RESET 0x020
62#define PCIE_PHY_TRSV_RESET 0x024
63
64/* PCIe PHY registers */
65#define PCIE_PHY_IMPEDANCE 0x004
66#define PCIE_PHY_PLL_DIV_0 0x008
67#define PCIE_PHY_PLL_BIAS 0x00c
68#define PCIE_PHY_DCC_FEEDBACK 0x014
69#define PCIE_PHY_PLL_DIV_1 0x05c
70#define PCIE_PHY_COMMON_POWER 0x064
71#define PCIE_PHY_COMMON_PD_CMN BIT(3)
72#define PCIE_PHY_TRSV0_EMP_LVL 0x084
73#define PCIE_PHY_TRSV0_DRV_LVL 0x088
74#define PCIE_PHY_TRSV0_RXCDR 0x0ac
75#define PCIE_PHY_TRSV0_POWER 0x0c4
76#define PCIE_PHY_TRSV0_PD_TSV BIT(7)
77#define PCIE_PHY_TRSV0_LVCC 0x0dc
78#define PCIE_PHY_TRSV1_EMP_LVL 0x144
79#define PCIE_PHY_TRSV1_RXCDR 0x16c
80#define PCIE_PHY_TRSV1_POWER 0x184
81#define PCIE_PHY_TRSV1_PD_TSV BIT(7)
82#define PCIE_PHY_TRSV1_LVCC 0x19c
83#define PCIE_PHY_TRSV2_EMP_LVL 0x204
84#define PCIE_PHY_TRSV2_RXCDR 0x22c
85#define PCIE_PHY_TRSV2_POWER 0x244
86#define PCIE_PHY_TRSV2_PD_TSV BIT(7)
87#define PCIE_PHY_TRSV2_LVCC 0x25c
88#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
89#define PCIE_PHY_TRSV3_RXCDR 0x2ec
90#define PCIE_PHY_TRSV3_POWER 0x304
91#define PCIE_PHY_TRSV3_PD_TSV BIT(7)
92#define PCIE_PHY_TRSV3_LVCC 0x31c
93
94struct exynos_pcie_mem_res { 55struct exynos_pcie_mem_res {
95 void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */ 56 void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
96 void __iomem *phy_base; /* DT 1st resource: PHY CTRL */
97 void __iomem *block_base; /* DT 2nd resource: PHY ADDITIONAL CTRL */
98}; 57};
99 58
100struct exynos_pcie_clk_res { 59struct exynos_pcie_clk_res {
@@ -109,8 +68,6 @@ struct exynos_pcie {
109 const struct exynos_pcie_ops *ops; 68 const struct exynos_pcie_ops *ops;
110 int reset_gpio; 69 int reset_gpio;
111 70
112 /* For Generic PHY Framework */
113 bool using_phy;
114 struct phy *phy; 71 struct phy *phy;
115}; 72};
116 73
@@ -138,20 +95,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
138 if (IS_ERR(ep->mem_res->elbi_base)) 95 if (IS_ERR(ep->mem_res->elbi_base))
139 return PTR_ERR(ep->mem_res->elbi_base); 96 return PTR_ERR(ep->mem_res->elbi_base);
140 97
141 /* If using the PHY framework, doesn't need to get other resource */
142 if (ep->using_phy)
143 return 0;
144
145 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
146 ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
147 if (IS_ERR(ep->mem_res->phy_base))
148 return PTR_ERR(ep->mem_res->phy_base);
149
150 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
151 ep->mem_res->block_base = devm_ioremap_resource(dev, res);
152 if (IS_ERR(ep->mem_res->block_base))
153 return PTR_ERR(ep->mem_res->block_base);
154
155 return 0; 98 return 0;
156} 99}
157 100
@@ -276,111 +219,6 @@ static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
276 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET); 219 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
277 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET); 220 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
278 exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET); 221 exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
279 exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_MAC_RESET);
280}
281
282static void exynos_pcie_assert_phy_reset(struct exynos_pcie *ep)
283{
284 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_MAC_RESET);
285 exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_GLOBAL_RESET);
286}
287
288static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *ep)
289{
290 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_GLOBAL_RESET);
291 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_PWR_RESET);
292 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
293 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_CMN_REG);
294 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSVREG_RESET);
295 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSV_RESET);
296}
297
298static void exynos_pcie_power_on_phy(struct exynos_pcie *ep)
299{
300 u32 val;
301
302 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
303 val &= ~PCIE_PHY_COMMON_PD_CMN;
304 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
305
306 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
307 val &= ~PCIE_PHY_TRSV0_PD_TSV;
308 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
309
310 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
311 val &= ~PCIE_PHY_TRSV1_PD_TSV;
312 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
313
314 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
315 val &= ~PCIE_PHY_TRSV2_PD_TSV;
316 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
317
318 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
319 val &= ~PCIE_PHY_TRSV3_PD_TSV;
320 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
321}
322
323static void exynos_pcie_power_off_phy(struct exynos_pcie *ep)
324{
325 u32 val;
326
327 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
328 val |= PCIE_PHY_COMMON_PD_CMN;
329 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
330
331 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
332 val |= PCIE_PHY_TRSV0_PD_TSV;
333 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
334
335 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
336 val |= PCIE_PHY_TRSV1_PD_TSV;
337 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
338
339 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
340 val |= PCIE_PHY_TRSV2_PD_TSV;
341 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
342
343 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
344 val |= PCIE_PHY_TRSV3_PD_TSV;
345 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
346}
347
348static void exynos_pcie_init_phy(struct exynos_pcie *ep)
349{
350 /* DCC feedback control off */
351 exynos_pcie_writel(ep->mem_res->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
352
353 /* set TX/RX impedance */
354 exynos_pcie_writel(ep->mem_res->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
355
356 /* set 50Mhz PHY clock */
357 exynos_pcie_writel(ep->mem_res->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
358 exynos_pcie_writel(ep->mem_res->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
359
360 /* set TX Differential output for lane 0 */
361 exynos_pcie_writel(ep->mem_res->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
362
363 /* set TX Pre-emphasis Level Control for lane 0 to minimum */
364 exynos_pcie_writel(ep->mem_res->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
365
366 /* set RX clock and data recovery bandwidth */
367 exynos_pcie_writel(ep->mem_res->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
368 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
369 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
370 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
371 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
372
373 /* change TX Pre-emphasis Level Control for lanes */
374 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
375 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
376 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
377 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
378
379 /* set LVCC */
380 exynos_pcie_writel(ep->mem_res->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
381 exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
382 exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
383 exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
384} 222}
385 223
386static void exynos_pcie_assert_reset(struct exynos_pcie *ep) 224static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
@@ -398,7 +236,6 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
398 struct dw_pcie *pci = ep->pci; 236 struct dw_pcie *pci = ep->pci;
399 struct pcie_port *pp = &pci->pp; 237 struct pcie_port *pp = &pci->pp;
400 struct device *dev = pci->dev; 238 struct device *dev = pci->dev;
401 u32 val;
402 239
403 if (dw_pcie_link_up(pci)) { 240 if (dw_pcie_link_up(pci)) {
404 dev_err(dev, "Link already up\n"); 241 dev_err(dev, "Link already up\n");
@@ -407,32 +244,13 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
407 244
408 exynos_pcie_assert_core_reset(ep); 245 exynos_pcie_assert_core_reset(ep);
409 246
410 if (ep->using_phy) { 247 phy_reset(ep->phy);
411 phy_reset(ep->phy);
412
413 exynos_pcie_writel(ep->mem_res->elbi_base, 1,
414 PCIE_PWR_RESET);
415
416 phy_power_on(ep->phy);
417 phy_init(ep->phy);
418 } else {
419 exynos_pcie_assert_phy_reset(ep);
420 exynos_pcie_deassert_phy_reset(ep);
421 exynos_pcie_power_on_phy(ep);
422 exynos_pcie_init_phy(ep);
423
424 /* pulse for common reset */
425 exynos_pcie_writel(ep->mem_res->block_base, 1,
426 PCIE_PHY_COMMON_RESET);
427 udelay(500);
428 exynos_pcie_writel(ep->mem_res->block_base, 0,
429 PCIE_PHY_COMMON_RESET);
430 }
431 248
432 /* pulse for common reset */ 249 exynos_pcie_writel(ep->mem_res->elbi_base, 1,
433 exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_COMMON_RESET); 250 PCIE_PWR_RESET);
434 udelay(500); 251
435 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET); 252 phy_power_on(ep->phy);
253 phy_init(ep->phy);
436 254
437 exynos_pcie_deassert_core_reset(ep); 255 exynos_pcie_deassert_core_reset(ep);
438 dw_pcie_setup_rc(pp); 256 dw_pcie_setup_rc(pp);
@@ -446,18 +264,7 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
446 if (!dw_pcie_wait_for_link(pci)) 264 if (!dw_pcie_wait_for_link(pci))
447 return 0; 265 return 0;
448 266
449 if (ep->using_phy) { 267 phy_power_off(ep->phy);
450 phy_power_off(ep->phy);
451 return -ETIMEDOUT;
452 }
453
454 while (exynos_pcie_readl(ep->mem_res->phy_base,
455 PCIE_PHY_PLL_LOCKED) == 0) {
456 val = exynos_pcie_readl(ep->mem_res->block_base,
457 PCIE_PHY_PLL_LOCKED);
458 dev_info(dev, "PLL Locked: 0x%x\n", val);
459 }
460 exynos_pcie_power_off_phy(ep);
461 return -ETIMEDOUT; 268 return -ETIMEDOUT;
462} 269}
463 270
@@ -675,16 +482,13 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
675 482
676 ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); 483 ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
677 484
678 /* Assume that controller doesn't use the PHY framework */
679 ep->using_phy = false;
680
681 ep->phy = devm_of_phy_get(dev, np, NULL); 485 ep->phy = devm_of_phy_get(dev, np, NULL);
682 if (IS_ERR(ep->phy)) { 486 if (IS_ERR(ep->phy)) {
683 if (PTR_ERR(ep->phy) == -EPROBE_DEFER) 487 if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
684 return PTR_ERR(ep->phy); 488 return PTR_ERR(ep->phy);
685 dev_warn(dev, "Use the 'phy' property. Current DT of pci-exynos was deprecated!!\n"); 489
686 } else 490 ep->phy = NULL;
687 ep->using_phy = true; 491 }
688 492
689 if (ep->ops && ep->ops->get_mem_resources) { 493 if (ep->ops && ep->ops->get_mem_resources) {
690 ret = ep->ops->get_mem_resources(pdev, ep); 494 ret = ep->ops->get_mem_resources(pdev, ep);
@@ -692,7 +496,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
692 return ret; 496 return ret;
693 } 497 }
694 498
695 if (ep->ops && ep->ops->get_clk_resources) { 499 if (ep->ops && ep->ops->get_clk_resources &&
500 ep->ops->init_clk_resources) {
696 ret = ep->ops->get_clk_resources(ep); 501 ret = ep->ops->get_clk_resources(ep);
697 if (ret) 502 if (ret)
698 return ret; 503 return ret;
@@ -710,8 +515,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
710 return 0; 515 return 0;
711 516
712fail_probe: 517fail_probe:
713 if (ep->using_phy) 518 phy_exit(ep->phy);
714 phy_exit(ep->phy);
715 519
716 if (ep->ops && ep->ops->deinit_clk_resources) 520 if (ep->ops && ep->ops->deinit_clk_resources)
717 ep->ops->deinit_clk_resources(ep); 521 ep->ops->deinit_clk_resources(ep);
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index 8d8d49e44c50..d4f8ab90c018 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -175,7 +175,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
175 } 175 }
176 176
177 /* interrupt controller is in a child node */ 177 /* interrupt controller is in a child node */
178 *np_temp = of_find_node_by_name(np_pcie, controller); 178 *np_temp = of_get_child_by_name(np_pcie, controller);
179 if (!(*np_temp)) { 179 if (!(*np_temp)) {
180 dev_err(dev, "Node for %s is absent\n", controller); 180 dev_err(dev, "Node for %s is absent\n", controller);
181 return -EINVAL; 181 return -EINVAL;
@@ -184,6 +184,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
184 temp = of_irq_count(*np_temp); 184 temp = of_irq_count(*np_temp);
185 if (!temp) { 185 if (!temp) {
186 dev_err(dev, "No IRQ entries in %s\n", controller); 186 dev_err(dev, "No IRQ entries in %s\n", controller);
187 of_node_put(*np_temp);
187 return -EINVAL; 188 return -EINVAL;
188 } 189 }
189 190
@@ -201,6 +202,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
201 break; 202 break;
202 } 203 }
203 204
205 of_node_put(*np_temp);
206
204 if (temp) { 207 if (temp) {
205 *num_irqs = temp; 208 *num_irqs = temp;
206 return 0; 209 return 0;
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index b89884919423..93b3df9ed1b5 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -10,6 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/of_device.h>
13#include <linux/pci.h> 14#include <linux/pci.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
15#include <linux/resource.h> 16#include <linux/resource.h>
@@ -23,44 +24,72 @@
23 24
24#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) 25#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev)
25 26
27enum artpec_pcie_variants {
28 ARTPEC6,
29 ARTPEC7,
30};
31
26struct artpec6_pcie { 32struct artpec6_pcie {
27 struct dw_pcie *pci; 33 struct dw_pcie *pci;
28 struct regmap *regmap; /* DT axis,syscon-pcie */ 34 struct regmap *regmap; /* DT axis,syscon-pcie */
29 void __iomem *phy_base; /* DT phy */ 35 void __iomem *phy_base; /* DT phy */
36 enum artpec_pcie_variants variant;
37 enum dw_pcie_device_mode mode;
38};
39
40struct artpec_pcie_of_data {
41 enum artpec_pcie_variants variant;
42 enum dw_pcie_device_mode mode;
30}; 43};
31 44
45static const struct of_device_id artpec6_pcie_of_match[];
46
32/* PCIe Port Logic registers (memory-mapped) */ 47/* PCIe Port Logic registers (memory-mapped) */
33#define PL_OFFSET 0x700 48#define PL_OFFSET 0x700
34#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
35#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
36 49
37#define MISC_CONTROL_1_OFF (PL_OFFSET + 0x1bc) 50#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
38#define DBI_RO_WR_EN 1 51#define ACK_N_FTS_MASK GENMASK(15, 8)
52#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
53
54#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
55#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
39 56
40/* ARTPEC-6 specific registers */ 57/* ARTPEC-6 specific registers */
41#define PCIECFG 0x18 58#define PCIECFG 0x18
42#define PCIECFG_DBG_OEN (1 << 24) 59#define PCIECFG_DBG_OEN BIT(24)
43#define PCIECFG_CORE_RESET_REQ (1 << 21) 60#define PCIECFG_CORE_RESET_REQ BIT(21)
44#define PCIECFG_LTSSM_ENABLE (1 << 20) 61#define PCIECFG_LTSSM_ENABLE BIT(20)
45#define PCIECFG_CLKREQ_B (1 << 11) 62#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16)
46#define PCIECFG_REFCLK_ENABLE (1 << 10) 63#define PCIECFG_CLKREQ_B BIT(11)
47#define PCIECFG_PLL_ENABLE (1 << 9) 64#define PCIECFG_REFCLK_ENABLE BIT(10)
48#define PCIECFG_PCLK_ENABLE (1 << 8) 65#define PCIECFG_PLL_ENABLE BIT(9)
49#define PCIECFG_RISRCREN (1 << 4) 66#define PCIECFG_PCLK_ENABLE BIT(8)
50#define PCIECFG_MODE_TX_DRV_EN (1 << 3) 67#define PCIECFG_RISRCREN BIT(4)
51#define PCIECFG_CISRREN (1 << 2) 68#define PCIECFG_MODE_TX_DRV_EN BIT(3)
52#define PCIECFG_MACRO_ENABLE (1 << 0) 69#define PCIECFG_CISRREN BIT(2)
70#define PCIECFG_MACRO_ENABLE BIT(0)
71/* ARTPEC-7 specific fields */
72#define PCIECFG_REFCLKSEL BIT(23)
73#define PCIECFG_NOC_RESET BIT(3)
74
75#define PCIESTAT 0x1c
76/* ARTPEC-7 specific fields */
77#define PCIESTAT_EXTREFCLK BIT(3)
53 78
54#define NOCCFG 0x40 79#define NOCCFG 0x40
55#define NOCCFG_ENABLE_CLK_PCIE (1 << 4) 80#define NOCCFG_ENABLE_CLK_PCIE BIT(4)
56#define NOCCFG_POWER_PCIE_IDLEACK (1 << 3) 81#define NOCCFG_POWER_PCIE_IDLEACK BIT(3)
57#define NOCCFG_POWER_PCIE_IDLE (1 << 2) 82#define NOCCFG_POWER_PCIE_IDLE BIT(2)
58#define NOCCFG_POWER_PCIE_IDLEREQ (1 << 1) 83#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1)
59 84
60#define PHY_STATUS 0x118 85#define PHY_STATUS 0x118
61#define PHY_COSPLLLOCK (1 << 0) 86#define PHY_COSPLLLOCK BIT(0)
62 87
63#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff 88#define PHY_TX_ASIC_OUT 0x4040
89#define PHY_TX_ASIC_OUT_TX_ACK BIT(0)
90
91#define PHY_RX_ASIC_OUT 0x405c
92#define PHY_RX_ASIC_OUT_ACK BIT(0)
64 93
65static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) 94static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
66{ 95{
@@ -75,22 +104,123 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
75 regmap_write(artpec6_pcie->regmap, offset, val); 104 regmap_write(artpec6_pcie->regmap, offset, val);
76} 105}
77 106
78static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr) 107static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
79{ 108{
80 return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR; 109 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
110 struct pcie_port *pp = &pci->pp;
111 struct dw_pcie_ep *ep = &pci->ep;
112
113 switch (artpec6_pcie->mode) {
114 case DW_PCIE_RC_TYPE:
115 return pci_addr - pp->cfg0_base;
116 case DW_PCIE_EP_TYPE:
117 return pci_addr - ep->phys_base;
118 default:
119 dev_err(pci->dev, "UNKNOWN device type\n");
120 }
121 return pci_addr;
81} 122}
82 123
83static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) 124static int artpec6_pcie_establish_link(struct dw_pcie *pci)
84{ 125{
85 struct dw_pcie *pci = artpec6_pcie->pci; 126 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
86 struct pcie_port *pp = &pci->pp; 127 u32 val;
128
129 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
130 val |= PCIECFG_LTSSM_ENABLE;
131 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
132
133 return 0;
134}
135
136static void artpec6_pcie_stop_link(struct dw_pcie *pci)
137{
138 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
87 u32 val; 139 u32 val;
88 unsigned int retries;
89 140
90 /* Hold DW core in reset */
91 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); 141 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
92 val |= PCIECFG_CORE_RESET_REQ; 142 val &= ~PCIECFG_LTSSM_ENABLE;
93 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); 143 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
144}
145
146static const struct dw_pcie_ops dw_pcie_ops = {
147 .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
148 .start_link = artpec6_pcie_establish_link,
149 .stop_link = artpec6_pcie_stop_link,
150};
151
152static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
153{
154 struct dw_pcie *pci = artpec6_pcie->pci;
155 struct device *dev = pci->dev;
156 u32 val;
157 unsigned int retries;
158
159 retries = 50;
160 do {
161 usleep_range(1000, 2000);
162 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
163 retries--;
164 } while (retries &&
165 (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
166 if (!retries)
167 dev_err(dev, "PCIe clock manager did not leave idle state\n");
168
169 retries = 50;
170 do {
171 usleep_range(1000, 2000);
172 val = readl(artpec6_pcie->phy_base + PHY_STATUS);
173 retries--;
174 } while (retries && !(val & PHY_COSPLLLOCK));
175 if (!retries)
176 dev_err(dev, "PHY PLL did not lock\n");
177}
178
179static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
180{
181 struct dw_pcie *pci = artpec6_pcie->pci;
182 struct device *dev = pci->dev;
183 u32 val;
184 u16 phy_status_tx, phy_status_rx;
185 unsigned int retries;
186
187 retries = 50;
188 do {
189 usleep_range(1000, 2000);
190 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
191 retries--;
192 } while (retries &&
193 (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
194 if (!retries)
195 dev_err(dev, "PCIe clock manager did not leave idle state\n");
196
197 retries = 50;
198 do {
199 usleep_range(1000, 2000);
200 phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
201 phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
202 retries--;
203 } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
204 (phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
205 if (!retries)
206 dev_err(dev, "PHY did not enter Pn state\n");
207}
208
209static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
210{
211 switch (artpec6_pcie->variant) {
212 case ARTPEC6:
213 artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
214 break;
215 case ARTPEC7:
216 artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
217 break;
218 }
219}
220
221static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
222{
223 u32 val;
94 224
95 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); 225 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
96 val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ 226 val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
@@ -116,45 +246,110 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
116 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); 246 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
117 val &= ~NOCCFG_POWER_PCIE_IDLEREQ; 247 val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
118 artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); 248 artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
249}
119 250
120 retries = 50; 251static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
121 do { 252{
122 usleep_range(1000, 2000); 253 struct dw_pcie *pci = artpec6_pcie->pci;
123 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); 254 u32 val;
124 retries--; 255 bool extrefclk;
125 } while (retries &&
126 (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
127 256
128 retries = 50; 257 /* Check if external reference clock is connected */
129 do { 258 val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
130 usleep_range(1000, 2000); 259 extrefclk = !!(val & PCIESTAT_EXTREFCLK);
131 val = readl(artpec6_pcie->phy_base + PHY_STATUS); 260 dev_dbg(pci->dev, "Using reference clock: %s\n",
132 retries--; 261 extrefclk ? "external" : "internal");
133 } while (retries && !(val & PHY_COSPLLLOCK));
134 262
135 /* Take DW core out of reset */
136 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); 263 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
137 val &= ~PCIECFG_CORE_RESET_REQ; 264 val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
265 PCIECFG_PCLK_ENABLE;
266 if (extrefclk)
267 val |= PCIECFG_REFCLKSEL;
268 else
269 val &= ~PCIECFG_REFCLKSEL;
138 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); 270 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
139 usleep_range(100, 200); 271 usleep_range(10, 20);
140 272
141 /* setup root complex */ 273 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
142 dw_pcie_setup_rc(pp); 274 val |= NOCCFG_ENABLE_CLK_PCIE;
275 artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
276 usleep_range(20, 30);
277
278 val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
279 val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
280 artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
281}
282
283static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
284{
285 switch (artpec6_pcie->variant) {
286 case ARTPEC6:
287 artpec6_pcie_init_phy_a6(artpec6_pcie);
288 break;
289 case ARTPEC7:
290 artpec6_pcie_init_phy_a7(artpec6_pcie);
291 break;
292 }
293}
294
295static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
296{
297 struct dw_pcie *pci = artpec6_pcie->pci;
298 u32 val;
299
300 if (artpec6_pcie->variant != ARTPEC7)
301 return;
302
303 /*
304 * Increase the N_FTS (Number of Fast Training Sequences)
305 * to be transmitted when transitioning from L0s to L0.
306 */
307 val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
308 val &= ~ACK_N_FTS_MASK;
309 val |= ACK_N_FTS(180);
310 dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
311
312 /*
313 * Set the Number of Fast Training Sequences that the core
314 * advertises as its N_FTS during Gen2 or Gen3 link training.
315 */
316 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
317 val &= ~FAST_TRAINING_SEQ_MASK;
318 val |= FAST_TRAINING_SEQ(180);
319 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
320}
321
322static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
323{
324 u32 val;
143 325
144 /* assert LTSSM enable */
145 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); 326 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
146 val |= PCIECFG_LTSSM_ENABLE; 327 switch (artpec6_pcie->variant) {
328 case ARTPEC6:
329 val |= PCIECFG_CORE_RESET_REQ;
330 break;
331 case ARTPEC7:
332 val &= ~PCIECFG_NOC_RESET;
333 break;
334 }
147 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); 335 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
336}
148 337
149 /* check if the link is up or not */ 338static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
150 if (!dw_pcie_wait_for_link(pci)) 339{
151 return 0; 340 u32 val;
152
153 dev_dbg(pci->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
154 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
155 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
156 341
157 return -ETIMEDOUT; 342 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
343 switch (artpec6_pcie->variant) {
344 case ARTPEC6:
345 val &= ~PCIECFG_CORE_RESET_REQ;
346 break;
347 case ARTPEC7:
348 val |= PCIECFG_NOC_RESET;
349 break;
350 }
351 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
352 usleep_range(100, 200);
158} 353}
159 354
160static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) 355static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
@@ -171,7 +366,14 @@ static int artpec6_pcie_host_init(struct pcie_port *pp)
171 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 366 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
172 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); 367 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
173 368
174 artpec6_pcie_establish_link(artpec6_pcie); 369 artpec6_pcie_assert_core_reset(artpec6_pcie);
370 artpec6_pcie_init_phy(artpec6_pcie);
371 artpec6_pcie_deassert_core_reset(artpec6_pcie);
372 artpec6_pcie_wait_for_phy(artpec6_pcie);
373 artpec6_pcie_set_nfts(artpec6_pcie);
374 dw_pcie_setup_rc(pp);
375 artpec6_pcie_establish_link(pci);
376 dw_pcie_wait_for_link(pci);
175 artpec6_pcie_enable_interrupts(artpec6_pcie); 377 artpec6_pcie_enable_interrupts(artpec6_pcie);
176 378
177 return 0; 379 return 0;
@@ -227,10 +429,78 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
227 return 0; 429 return 0;
228} 430}
229 431
230static const struct dw_pcie_ops dw_pcie_ops = { 432static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
231 .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, 433{
434 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
435 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
436 enum pci_barno bar;
437
438 artpec6_pcie_assert_core_reset(artpec6_pcie);
439 artpec6_pcie_init_phy(artpec6_pcie);
440 artpec6_pcie_deassert_core_reset(artpec6_pcie);
441 artpec6_pcie_wait_for_phy(artpec6_pcie);
442 artpec6_pcie_set_nfts(artpec6_pcie);
443
444 for (bar = BAR_0; bar <= BAR_5; bar++)
445 dw_pcie_ep_reset_bar(pci, bar);
446}
447
448static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
449 enum pci_epc_irq_type type, u8 interrupt_num)
450{
451 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
452
453 switch (type) {
454 case PCI_EPC_IRQ_LEGACY:
455 dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
456 return -EINVAL;
457 case PCI_EPC_IRQ_MSI:
458 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
459 default:
460 dev_err(pci->dev, "UNKNOWN IRQ type\n");
461 }
462
463 return 0;
464}
465
466static struct dw_pcie_ep_ops pcie_ep_ops = {
467 .ep_init = artpec6_pcie_ep_init,
468 .raise_irq = artpec6_pcie_raise_irq,
232}; 469};
233 470
471static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
472 struct platform_device *pdev)
473{
474 int ret;
475 struct dw_pcie_ep *ep;
476 struct resource *res;
477 struct device *dev = &pdev->dev;
478 struct dw_pcie *pci = artpec6_pcie->pci;
479
480 ep = &pci->ep;
481 ep->ops = &pcie_ep_ops;
482
483 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
484 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
485 if (!pci->dbi_base2)
486 return -ENOMEM;
487
488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
489 if (!res)
490 return -EINVAL;
491
492 ep->phys_base = res->start;
493 ep->addr_size = resource_size(res);
494
495 ret = dw_pcie_ep_init(ep);
496 if (ret) {
497 dev_err(dev, "failed to initialize endpoint\n");
498 return ret;
499 }
500
501 return 0;
502}
503
234static int artpec6_pcie_probe(struct platform_device *pdev) 504static int artpec6_pcie_probe(struct platform_device *pdev)
235{ 505{
236 struct device *dev = &pdev->dev; 506 struct device *dev = &pdev->dev;
@@ -239,6 +509,18 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
239 struct resource *dbi_base; 509 struct resource *dbi_base;
240 struct resource *phy_base; 510 struct resource *phy_base;
241 int ret; 511 int ret;
512 const struct of_device_id *match;
513 const struct artpec_pcie_of_data *data;
514 enum artpec_pcie_variants variant;
515 enum dw_pcie_device_mode mode;
516
517 match = of_match_device(artpec6_pcie_of_match, dev);
518 if (!match)
519 return -EINVAL;
520
521 data = (struct artpec_pcie_of_data *)match->data;
522 variant = (enum artpec_pcie_variants)data->variant;
523 mode = (enum dw_pcie_device_mode)data->mode;
242 524
243 artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); 525 artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
244 if (!artpec6_pcie) 526 if (!artpec6_pcie)
@@ -252,6 +534,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
252 pci->ops = &dw_pcie_ops; 534 pci->ops = &dw_pcie_ops;
253 535
254 artpec6_pcie->pci = pci; 536 artpec6_pcie->pci = pci;
537 artpec6_pcie->variant = variant;
538 artpec6_pcie->mode = mode;
255 539
256 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 540 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
257 pci->dbi_base = devm_ioremap_resource(dev, dbi_base); 541 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
@@ -271,15 +555,73 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
271 555
272 platform_set_drvdata(pdev, artpec6_pcie); 556 platform_set_drvdata(pdev, artpec6_pcie);
273 557
274 ret = artpec6_add_pcie_port(artpec6_pcie, pdev); 558 switch (artpec6_pcie->mode) {
275 if (ret < 0) 559 case DW_PCIE_RC_TYPE:
276 return ret; 560 if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
561 return -ENODEV;
562
563 ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
564 if (ret < 0)
565 return ret;
566 break;
567 case DW_PCIE_EP_TYPE: {
568 u32 val;
569
570 if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
571 return -ENODEV;
572
573 val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
574 val &= ~PCIECFG_DEVICE_TYPE_MASK;
575 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
576 ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
577 if (ret < 0)
578 return ret;
579 break;
580 }
581 default:
582 dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
583 }
277 584
278 return 0; 585 return 0;
279} 586}
280 587
588static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
589 .variant = ARTPEC6,
590 .mode = DW_PCIE_RC_TYPE,
591};
592
593static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
594 .variant = ARTPEC6,
595 .mode = DW_PCIE_EP_TYPE,
596};
597
598static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
599 .variant = ARTPEC7,
600 .mode = DW_PCIE_RC_TYPE,
601};
602
603static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
604 .variant = ARTPEC7,
605 .mode = DW_PCIE_EP_TYPE,
606};
607
281static const struct of_device_id artpec6_pcie_of_match[] = { 608static const struct of_device_id artpec6_pcie_of_match[] = {
282 { .compatible = "axis,artpec6-pcie", }, 609 {
610 .compatible = "axis,artpec6-pcie",
611 .data = &artpec6_pcie_rc_of_data,
612 },
613 {
614 .compatible = "axis,artpec6-pcie-ep",
615 .data = &artpec6_pcie_ep_of_data,
616 },
617 {
618 .compatible = "axis,artpec7-pcie",
619 .data = &artpec7_pcie_rc_of_data,
620 },
621 {
622 .compatible = "axis,artpec7-pcie-ep",
623 .data = &artpec7_pcie_ep_of_data,
624 },
283 {}, 625 {},
284}; 626};
285 627
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index 2d71a2262a2a..3a6feeff5f5b 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -19,21 +19,24 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
19 pci_epc_linkup(epc); 19 pci_epc_linkup(epc);
20} 20}
21 21
22static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) 22void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
23{ 23{
24 u32 reg; 24 u32 reg;
25 25
26 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 26 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
27 dw_pcie_dbi_ro_wr_en(pci);
27 dw_pcie_writel_dbi2(pci, reg, 0x0); 28 dw_pcie_writel_dbi2(pci, reg, 0x0);
28 dw_pcie_writel_dbi(pci, reg, 0x0); 29 dw_pcie_writel_dbi(pci, reg, 0x0);
30 dw_pcie_dbi_ro_wr_dis(pci);
29} 31}
30 32
31static int dw_pcie_ep_write_header(struct pci_epc *epc, 33static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
32 struct pci_epf_header *hdr) 34 struct pci_epf_header *hdr)
33{ 35{
34 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 36 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
35 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 37 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
36 38
39 dw_pcie_dbi_ro_wr_en(pci);
37 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); 40 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
38 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); 41 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
39 dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); 42 dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
@@ -47,6 +50,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc,
47 dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); 50 dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
48 dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, 51 dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
49 hdr->interrupt_pin); 52 hdr->interrupt_pin);
53 dw_pcie_dbi_ro_wr_dis(pci);
50 54
51 return 0; 55 return 0;
52} 56}
@@ -59,8 +63,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
59 u32 free_win; 63 u32 free_win;
60 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 64 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
61 65
62 free_win = find_first_zero_bit(&ep->ib_window_map, 66 free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
63 sizeof(ep->ib_window_map));
64 if (free_win >= ep->num_ib_windows) { 67 if (free_win >= ep->num_ib_windows) {
65 dev_err(pci->dev, "no free inbound window\n"); 68 dev_err(pci->dev, "no free inbound window\n");
66 return -EINVAL; 69 return -EINVAL;
@@ -74,7 +77,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
74 } 77 }
75 78
76 ep->bar_to_atu[bar] = free_win; 79 ep->bar_to_atu[bar] = free_win;
77 set_bit(free_win, &ep->ib_window_map); 80 set_bit(free_win, ep->ib_window_map);
78 81
79 return 0; 82 return 0;
80} 83}
@@ -85,8 +88,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
85 u32 free_win; 88 u32 free_win;
86 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 89 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
87 90
88 free_win = find_first_zero_bit(&ep->ob_window_map, 91 free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
89 sizeof(ep->ob_window_map));
90 if (free_win >= ep->num_ob_windows) { 92 if (free_win >= ep->num_ob_windows) {
91 dev_err(pci->dev, "no free outbound window\n"); 93 dev_err(pci->dev, "no free outbound window\n");
92 return -EINVAL; 94 return -EINVAL;
@@ -95,13 +97,14 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
95 dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, 97 dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
96 phys_addr, pci_addr, size); 98 phys_addr, pci_addr, size);
97 99
98 set_bit(free_win, &ep->ob_window_map); 100 set_bit(free_win, ep->ob_window_map);
99 ep->outbound_addr[free_win] = phys_addr; 101 ep->outbound_addr[free_win] = phys_addr;
100 102
101 return 0; 103 return 0;
102} 104}
103 105
104static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar) 106static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
107 enum pci_barno bar)
105{ 108{
106 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 109 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
107 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 110 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -110,10 +113,11 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
110 dw_pcie_ep_reset_bar(pci, bar); 113 dw_pcie_ep_reset_bar(pci, bar);
111 114
112 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); 115 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
113 clear_bit(atu_index, &ep->ib_window_map); 116 clear_bit(atu_index, ep->ib_window_map);
114} 117}
115 118
116static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar, 119static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
120 enum pci_barno bar,
117 dma_addr_t bar_phys, size_t size, int flags) 121 dma_addr_t bar_phys, size_t size, int flags)
118{ 122{
119 int ret; 123 int ret;
@@ -131,8 +135,10 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
131 if (ret) 135 if (ret)
132 return ret; 136 return ret;
133 137
138 dw_pcie_dbi_ro_wr_en(pci);
134 dw_pcie_writel_dbi2(pci, reg, size - 1); 139 dw_pcie_writel_dbi2(pci, reg, size - 1);
135 dw_pcie_writel_dbi(pci, reg, flags); 140 dw_pcie_writel_dbi(pci, reg, flags);
141 dw_pcie_dbi_ro_wr_dis(pci);
136 142
137 return 0; 143 return 0;
138} 144}
@@ -152,7 +158,8 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
152 return -EINVAL; 158 return -EINVAL;
153} 159}
154 160
155static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr) 161static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
162 phys_addr_t addr)
156{ 163{
157 int ret; 164 int ret;
158 u32 atu_index; 165 u32 atu_index;
@@ -164,10 +171,11 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
164 return; 171 return;
165 172
166 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); 173 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
167 clear_bit(atu_index, &ep->ob_window_map); 174 clear_bit(atu_index, ep->ob_window_map);
168} 175}
169 176
170static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr, 177static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
178 phys_addr_t addr,
171 u64 pci_addr, size_t size) 179 u64 pci_addr, size_t size)
172{ 180{
173 int ret; 181 int ret;
@@ -183,39 +191,37 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
183 return 0; 191 return 0;
184} 192}
185 193
186static int dw_pcie_ep_get_msi(struct pci_epc *epc) 194static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
187{ 195{
188 int val; 196 int val;
189 u32 lower_addr;
190 u32 upper_addr;
191 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 197 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
192 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 198 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
193 199
194 val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL); 200 val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
195 val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; 201 if (!(val & MSI_CAP_MSI_EN_MASK))
196
197 lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
198 upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
199
200 if (!(lower_addr || upper_addr))
201 return -EINVAL; 202 return -EINVAL;
202 203
204 val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
203 return val; 205 return val;
204} 206}
205 207
206static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int) 208static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
207{ 209{
208 int val; 210 int val;
209 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 211 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
210 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 212 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
211 213
212 val = (encode_int << MSI_CAP_MMC_SHIFT); 214 val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
215 val &= ~MSI_CAP_MMC_MASK;
216 val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
217 dw_pcie_dbi_ro_wr_en(pci);
213 dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); 218 dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
219 dw_pcie_dbi_ro_wr_dis(pci);
214 220
215 return 0; 221 return 0;
216} 222}
217 223
218static int dw_pcie_ep_raise_irq(struct pci_epc *epc, 224static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
219 enum pci_epc_irq_type type, u8 interrupt_num) 225 enum pci_epc_irq_type type, u8 interrupt_num)
220{ 226{
221 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 227 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -223,7 +229,7 @@ static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
223 if (!ep->ops->raise_irq) 229 if (!ep->ops->raise_irq)
224 return -EINVAL; 230 return -EINVAL;
225 231
226 return ep->ops->raise_irq(ep, type, interrupt_num); 232 return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
227} 233}
228 234
229static void dw_pcie_ep_stop(struct pci_epc *epc) 235static void dw_pcie_ep_stop(struct pci_epc *epc)
@@ -261,10 +267,48 @@ static const struct pci_epc_ops epc_ops = {
261 .stop = dw_pcie_ep_stop, 267 .stop = dw_pcie_ep_stop,
262}; 268};
263 269
270int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
271 u8 interrupt_num)
272{
273 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
274 struct pci_epc *epc = ep->epc;
275 u16 msg_ctrl, msg_data;
276 u32 msg_addr_lower, msg_addr_upper;
277 u64 msg_addr;
278 bool has_upper;
279 int ret;
280
281 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
282 msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
283 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
284 msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
285 if (has_upper) {
286 msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
287 msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
288 } else {
289 msg_addr_upper = 0;
290 msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
291 }
292 msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
293 ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
294 epc->mem->page_size);
295 if (ret)
296 return ret;
297
298 writel(msg_data | (interrupt_num - 1), ep->msi_mem);
299
300 dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
301
302 return 0;
303}
304
264void dw_pcie_ep_exit(struct dw_pcie_ep *ep) 305void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
265{ 306{
266 struct pci_epc *epc = ep->epc; 307 struct pci_epc *epc = ep->epc;
267 308
309 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
310 epc->mem->page_size);
311
268 pci_epc_mem_exit(epc); 312 pci_epc_mem_exit(epc);
269} 313}
270 314
@@ -287,12 +331,32 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
287 dev_err(dev, "unable to read *num-ib-windows* property\n"); 331 dev_err(dev, "unable to read *num-ib-windows* property\n");
288 return ret; 332 return ret;
289 } 333 }
334 if (ep->num_ib_windows > MAX_IATU_IN) {
335 dev_err(dev, "invalid *num-ib-windows*\n");
336 return -EINVAL;
337 }
290 338
291 ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); 339 ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
292 if (ret < 0) { 340 if (ret < 0) {
293 dev_err(dev, "unable to read *num-ob-windows* property\n"); 341 dev_err(dev, "unable to read *num-ob-windows* property\n");
294 return ret; 342 return ret;
295 } 343 }
344 if (ep->num_ob_windows > MAX_IATU_OUT) {
345 dev_err(dev, "invalid *num-ob-windows*\n");
346 return -EINVAL;
347 }
348
349 ep->ib_window_map = devm_kzalloc(dev, sizeof(long) *
350 BITS_TO_LONGS(ep->num_ib_windows),
351 GFP_KERNEL);
352 if (!ep->ib_window_map)
353 return -ENOMEM;
354
355 ep->ob_window_map = devm_kzalloc(dev, sizeof(long) *
356 BITS_TO_LONGS(ep->num_ob_windows),
357 GFP_KERNEL);
358 if (!ep->ob_window_map)
359 return -ENOMEM;
296 360
297 addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, 361 addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
298 GFP_KERNEL); 362 GFP_KERNEL);
@@ -320,6 +384,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
320 return ret; 384 return ret;
321 } 385 }
322 386
387 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
388 epc->mem->page_size);
389 if (!ep->msi_mem) {
390 dev_err(dev, "Failed to reserve memory for MSI\n");
391 return -ENOMEM;
392 }
393
323 ep->epc = epc; 394 ep->epc = epc;
324 epc_set_drvdata(epc, ep); 395 epc_set_drvdata(epc, ep);
325 dw_pcie_setup(pci); 396 dw_pcie_setup(pci);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 0fc1d1db72f8..8de2d5c69b1d 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -80,10 +80,19 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
80 80
81void dw_pcie_msi_init(struct pcie_port *pp) 81void dw_pcie_msi_init(struct pcie_port *pp)
82{ 82{
83 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
84 struct device *dev = pci->dev;
85 struct page *page;
83 u64 msi_target; 86 u64 msi_target;
84 87
85 pp->msi_data = __get_free_pages(GFP_KERNEL, 0); 88 page = alloc_page(GFP_KERNEL);
86 msi_target = virt_to_phys((void *)pp->msi_data); 89 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
90 if (dma_mapping_error(dev, pp->msi_data)) {
91 dev_err(dev, "failed to map MSI data\n");
92 __free_page(page);
93 return;
94 }
95 msi_target = (u64)pp->msi_data;
87 96
88 /* program the msi_data */ 97 /* program the msi_data */
89 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, 98 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
@@ -184,7 +193,7 @@ static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
184 if (pp->ops->get_msi_addr) 193 if (pp->ops->get_msi_addr)
185 msi_target = pp->ops->get_msi_addr(pp); 194 msi_target = pp->ops->get_msi_addr(pp);
186 else 195 else
187 msi_target = virt_to_phys((void *)pp->msi_data); 196 msi_target = (u64)pp->msi_data;
188 197
189 msg.address_lo = (u32)(msi_target & 0xffffffff); 198 msg.address_lo = (u32)(msi_target & 0xffffffff);
190 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff); 199 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index a3aaabc203e2..1b7282e5b494 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -146,7 +146,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
146 u32 retries, val; 146 u32 retries, val;
147 147
148 if (pci->ops->cpu_addr_fixup) 148 if (pci->ops->cpu_addr_fixup)
149 cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr); 149 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
150 150
151 if (pci->iatu_unroll_enabled) { 151 if (pci->iatu_unroll_enabled) {
152 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, 152 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index 948a461c1ac7..11b13864a406 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -11,6 +11,7 @@
11#ifndef _PCIE_DESIGNWARE_H 11#ifndef _PCIE_DESIGNWARE_H
12#define _PCIE_DESIGNWARE_H 12#define _PCIE_DESIGNWARE_H
13 13
14#include <linux/dma-mapping.h>
14#include <linux/irq.h> 15#include <linux/irq.h>
15#include <linux/msi.h> 16#include <linux/msi.h>
16#include <linux/pci.h> 17#include <linux/pci.h>
@@ -97,10 +98,14 @@
97 98
98#define MSI_MESSAGE_CONTROL 0x52 99#define MSI_MESSAGE_CONTROL 0x52
99#define MSI_CAP_MMC_SHIFT 1 100#define MSI_CAP_MMC_SHIFT 1
101#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
100#define MSI_CAP_MME_SHIFT 4 102#define MSI_CAP_MME_SHIFT 4
103#define MSI_CAP_MSI_EN_MASK 0x1
101#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) 104#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
102#define MSI_MESSAGE_ADDR_L32 0x54 105#define MSI_MESSAGE_ADDR_L32 0x54
103#define MSI_MESSAGE_ADDR_U32 0x58 106#define MSI_MESSAGE_ADDR_U32 0x58
107#define MSI_MESSAGE_DATA_32 0x58
108#define MSI_MESSAGE_DATA_64 0x5C
104 109
105/* 110/*
106 * Maximum number of MSI IRQs can be 256 per controller. But keep 111 * Maximum number of MSI IRQs can be 256 per controller. But keep
@@ -110,6 +115,10 @@
110#define MAX_MSI_IRQS 32 115#define MAX_MSI_IRQS 32
111#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32) 116#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
112 117
118/* Maximum number of inbound/outbound iATUs */
119#define MAX_IATU_IN 256
120#define MAX_IATU_OUT 256
121
113struct pcie_port; 122struct pcie_port;
114struct dw_pcie; 123struct dw_pcie;
115struct dw_pcie_ep; 124struct dw_pcie_ep;
@@ -165,7 +174,7 @@ struct pcie_port {
165 const struct dw_pcie_host_ops *ops; 174 const struct dw_pcie_host_ops *ops;
166 int msi_irq; 175 int msi_irq;
167 struct irq_domain *irq_domain; 176 struct irq_domain *irq_domain;
168 unsigned long msi_data; 177 dma_addr_t msi_data;
169 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); 178 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
170}; 179};
171 180
@@ -177,8 +186,8 @@ enum dw_pcie_as_type {
177 186
178struct dw_pcie_ep_ops { 187struct dw_pcie_ep_ops {
179 void (*ep_init)(struct dw_pcie_ep *ep); 188 void (*ep_init)(struct dw_pcie_ep *ep);
180 int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type, 189 int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
181 u8 interrupt_num); 190 enum pci_epc_irq_type type, u8 interrupt_num);
182}; 191};
183 192
184struct dw_pcie_ep { 193struct dw_pcie_ep {
@@ -189,14 +198,16 @@ struct dw_pcie_ep {
189 size_t page_size; 198 size_t page_size;
190 u8 bar_to_atu[6]; 199 u8 bar_to_atu[6];
191 phys_addr_t *outbound_addr; 200 phys_addr_t *outbound_addr;
192 unsigned long ib_window_map; 201 unsigned long *ib_window_map;
193 unsigned long ob_window_map; 202 unsigned long *ob_window_map;
194 u32 num_ib_windows; 203 u32 num_ib_windows;
195 u32 num_ob_windows; 204 u32 num_ob_windows;
205 void __iomem *msi_mem;
206 phys_addr_t msi_mem_phys;
196}; 207};
197 208
198struct dw_pcie_ops { 209struct dw_pcie_ops {
199 u64 (*cpu_addr_fixup)(u64 cpu_addr); 210 u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
200 u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, 211 u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
201 size_t size); 212 size_t size);
202 void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, 213 void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
@@ -331,6 +342,9 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
331void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); 342void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
332int dw_pcie_ep_init(struct dw_pcie_ep *ep); 343int dw_pcie_ep_init(struct dw_pcie_ep *ep);
333void dw_pcie_ep_exit(struct dw_pcie_ep *ep); 344void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
345int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
346 u8 interrupt_num);
347void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
334#else 348#else
335static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) 349static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
336{ 350{
@@ -344,5 +358,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
344static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) 358static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
345{ 359{
346} 360}
361
362static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
363 u8 interrupt_num)
364{
365 return 0;
366}
367
368static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
369{
370}
347#endif 371#endif
348#endif /* _PCIE_DESIGNWARE_H */ 372#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index b01294e899f9..6310c66e265c 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -163,7 +163,7 @@ struct qcom_pcie {
163 union qcom_pcie_resources res; 163 union qcom_pcie_resources res;
164 struct phy *phy; 164 struct phy *phy;
165 struct gpio_desc *reset; 165 struct gpio_desc *reset;
166 struct qcom_pcie_ops *ops; 166 const struct qcom_pcie_ops *ops;
167}; 167};
168 168
169#define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 169#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -1226,7 +1226,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1226 1226
1227 pcie->pci = pci; 1227 pcie->pci = pci;
1228 1228
1229 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); 1229 pcie->ops = of_device_get_match_data(dev);
1230 1230
1231 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); 1231 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
1232 if (IS_ERR(pcie->reset)) 1232 if (IS_ERR(pcie->reset))
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index f9105c7f20e9..64d8a17f8094 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -93,7 +93,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
93 goto err; 93 goto err;
94 } 94 }
95 95
96 ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size); 96 ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
97 reg->size);
97 if (ret) { 98 if (ret) {
98 dev_err(dev, "failed to map source address\n"); 99 dev_err(dev, "failed to map source address\n");
99 reg->status = STATUS_SRC_ADDR_INVALID; 100 reg->status = STATUS_SRC_ADDR_INVALID;
@@ -108,7 +109,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
108 goto err_src_map_addr; 109 goto err_src_map_addr;
109 } 110 }
110 111
111 ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size); 112 ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
113 reg->size);
112 if (ret) { 114 if (ret) {
113 dev_err(dev, "failed to map destination address\n"); 115 dev_err(dev, "failed to map destination address\n");
114 reg->status = STATUS_DST_ADDR_INVALID; 116 reg->status = STATUS_DST_ADDR_INVALID;
@@ -117,13 +119,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
117 119
118 memcpy(dst_addr, src_addr, reg->size); 120 memcpy(dst_addr, src_addr, reg->size);
119 121
120 pci_epc_unmap_addr(epc, dst_phys_addr); 122 pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
121 123
122err_dst_addr: 124err_dst_addr:
123 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 125 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
124 126
125err_src_map_addr: 127err_src_map_addr:
126 pci_epc_unmap_addr(epc, src_phys_addr); 128 pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
127 129
128err_src_addr: 130err_src_addr:
129 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); 131 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@@ -153,7 +155,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
153 goto err; 155 goto err;
154 } 156 }
155 157
156 ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size); 158 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
159 reg->size);
157 if (ret) { 160 if (ret) {
158 dev_err(dev, "failed to map address\n"); 161 dev_err(dev, "failed to map address\n");
159 reg->status = STATUS_SRC_ADDR_INVALID; 162 reg->status = STATUS_SRC_ADDR_INVALID;
@@ -175,7 +178,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
175 kfree(buf); 178 kfree(buf);
176 179
177err_map_addr: 180err_map_addr:
178 pci_epc_unmap_addr(epc, phys_addr); 181 pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
179 182
180err_addr: 183err_addr:
181 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); 184 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@@ -204,7 +207,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
204 goto err; 207 goto err;
205 } 208 }
206 209
207 ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size); 210 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
211 reg->size);
208 if (ret) { 212 if (ret) {
209 dev_err(dev, "failed to map address\n"); 213 dev_err(dev, "failed to map address\n");
210 reg->status = STATUS_DST_ADDR_INVALID; 214 reg->status = STATUS_DST_ADDR_INVALID;
@@ -231,7 +235,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
231 kfree(buf); 235 kfree(buf);
232 236
233err_map_addr: 237err_map_addr:
234 pci_epc_unmap_addr(epc, phys_addr); 238 pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
235 239
236err_addr: 240err_addr:
237 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); 241 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@@ -249,11 +253,11 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
249 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 253 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
250 254
251 reg->status |= STATUS_IRQ_RAISED; 255 reg->status |= STATUS_IRQ_RAISED;
252 msi_count = pci_epc_get_msi(epc); 256 msi_count = pci_epc_get_msi(epc, epf->func_no);
253 if (irq > msi_count || msi_count <= 0) 257 if (irq > msi_count || msi_count <= 0)
254 pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); 258 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
255 else 259 else
256 pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq); 260 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
257} 261}
258 262
259static void pci_epf_test_cmd_handler(struct work_struct *work) 263static void pci_epf_test_cmd_handler(struct work_struct *work)
@@ -280,7 +284,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
280 284
281 if (command & COMMAND_RAISE_LEGACY_IRQ) { 285 if (command & COMMAND_RAISE_LEGACY_IRQ) {
282 reg->status = STATUS_IRQ_RAISED; 286 reg->status = STATUS_IRQ_RAISED;
283 pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); 287 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
284 goto reset_handler; 288 goto reset_handler;
285 } 289 }
286 290
@@ -315,11 +319,11 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
315 } 319 }
316 320
317 if (command & COMMAND_RAISE_MSI_IRQ) { 321 if (command & COMMAND_RAISE_MSI_IRQ) {
318 msi_count = pci_epc_get_msi(epc); 322 msi_count = pci_epc_get_msi(epc, epf->func_no);
319 if (irq > msi_count || msi_count <= 0) 323 if (irq > msi_count || msi_count <= 0)
320 goto reset_handler; 324 goto reset_handler;
321 reg->status = STATUS_IRQ_RAISED; 325 reg->status = STATUS_IRQ_RAISED;
322 pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq); 326 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
323 goto reset_handler; 327 goto reset_handler;
324 } 328 }
325 329
@@ -347,7 +351,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
347 for (bar = BAR_0; bar <= BAR_5; bar++) { 351 for (bar = BAR_0; bar <= BAR_5; bar++) {
348 if (epf_test->reg[bar]) { 352 if (epf_test->reg[bar]) {
349 pci_epf_free_space(epf, epf_test->reg[bar], bar); 353 pci_epf_free_space(epf, epf_test->reg[bar], bar);
350 pci_epc_clear_bar(epc, bar); 354 pci_epc_clear_bar(epc, epf->func_no, bar);
351 } 355 }
352 } 356 }
353} 357}
@@ -369,7 +373,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
369 373
370 for (bar = BAR_0; bar <= BAR_5; bar++) { 374 for (bar = BAR_0; bar <= BAR_5; bar++) {
371 epf_bar = &epf->bar[bar]; 375 epf_bar = &epf->bar[bar];
372 ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr, 376 ret = pci_epc_set_bar(epc, epf->func_no, bar,
377 epf_bar->phys_addr,
373 epf_bar->size, flags); 378 epf_bar->size, flags);
374 if (ret) { 379 if (ret) {
375 pci_epf_free_space(epf, epf_test->reg[bar], bar); 380 pci_epf_free_space(epf, epf_test->reg[bar], bar);
@@ -422,7 +427,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
422 if (WARN_ON_ONCE(!epc)) 427 if (WARN_ON_ONCE(!epc))
423 return -EINVAL; 428 return -EINVAL;
424 429
425 ret = pci_epc_write_header(epc, header); 430 ret = pci_epc_write_header(epc, epf->func_no, header);
426 if (ret) { 431 if (ret) {
427 dev_err(dev, "configuration header write failed\n"); 432 dev_err(dev, "configuration header write failed\n");
428 return ret; 433 return ret;
@@ -436,7 +441,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
436 if (ret) 441 if (ret)
437 return ret; 442 return ret;
438 443
439 ret = pci_epc_set_msi(epc, epf->msi_interrupts); 444 ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
440 if (ret) 445 if (ret)
441 return ret; 446 return ret;
442 447
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 9029608c8b1b..018ea3433cb5 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -7,18 +7,22 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/idr.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11 12
12#include <linux/pci-epc.h> 13#include <linux/pci-epc.h>
13#include <linux/pci-epf.h> 14#include <linux/pci-epf.h>
14#include <linux/pci-ep-cfs.h> 15#include <linux/pci-ep-cfs.h>
15 16
17static DEFINE_IDR(functions_idr);
18static DEFINE_MUTEX(functions_mutex);
16static struct config_group *functions_group; 19static struct config_group *functions_group;
17static struct config_group *controllers_group; 20static struct config_group *controllers_group;
18 21
19struct pci_epf_group { 22struct pci_epf_group {
20 struct config_group group; 23 struct config_group group;
21 struct pci_epf *epf; 24 struct pci_epf *epf;
25 int index;
22}; 26};
23 27
24struct pci_epc_group { 28struct pci_epc_group {
@@ -86,22 +90,23 @@ static int pci_epc_epf_link(struct config_item *epc_item,
86{ 90{
87 int ret; 91 int ret;
88 u32 func_no = 0; 92 u32 func_no = 0;
89 struct pci_epc *epc;
90 struct pci_epf *epf;
91 struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); 93 struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
92 struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); 94 struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
93 95 struct pci_epc *epc = epc_group->epc;
94 epc = epc_group->epc; 96 struct pci_epf *epf = epf_group->epf;
95 epf = epf_group->epf;
96 ret = pci_epc_add_epf(epc, epf);
97 if (ret)
98 goto err_add_epf;
99 97
100 func_no = find_first_zero_bit(&epc_group->function_num_map, 98 func_no = find_first_zero_bit(&epc_group->function_num_map,
101 sizeof(epc_group->function_num_map)); 99 BITS_PER_LONG);
100 if (func_no >= BITS_PER_LONG)
101 return -EINVAL;
102
102 set_bit(func_no, &epc_group->function_num_map); 103 set_bit(func_no, &epc_group->function_num_map);
103 epf->func_no = func_no; 104 epf->func_no = func_no;
104 105
106 ret = pci_epc_add_epf(epc, epf);
107 if (ret)
108 goto err_add_epf;
109
105 ret = pci_epf_bind(epf); 110 ret = pci_epf_bind(epf);
106 if (ret) 111 if (ret)
107 goto err_epf_bind; 112 goto err_epf_bind;
@@ -342,6 +347,9 @@ static void pci_epf_release(struct config_item *item)
342{ 347{
343 struct pci_epf_group *epf_group = to_pci_epf_group(item); 348 struct pci_epf_group *epf_group = to_pci_epf_group(item);
344 349
350 mutex_lock(&functions_mutex);
351 idr_remove(&functions_idr, epf_group->index);
352 mutex_unlock(&functions_mutex);
345 pci_epf_destroy(epf_group->epf); 353 pci_epf_destroy(epf_group->epf);
346 kfree(epf_group); 354 kfree(epf_group);
347} 355}
@@ -361,22 +369,57 @@ static struct config_group *pci_epf_make(struct config_group *group,
361{ 369{
362 struct pci_epf_group *epf_group; 370 struct pci_epf_group *epf_group;
363 struct pci_epf *epf; 371 struct pci_epf *epf;
372 char *epf_name;
373 int index, err;
364 374
365 epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL); 375 epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
366 if (!epf_group) 376 if (!epf_group)
367 return ERR_PTR(-ENOMEM); 377 return ERR_PTR(-ENOMEM);
368 378
379 mutex_lock(&functions_mutex);
380 index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
381 mutex_unlock(&functions_mutex);
382 if (index < 0) {
383 err = index;
384 goto free_group;
385 }
386
387 epf_group->index = index;
388
369 config_group_init_type_name(&epf_group->group, name, &pci_epf_type); 389 config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
370 390
371 epf = pci_epf_create(group->cg_item.ci_name); 391 epf_name = kasprintf(GFP_KERNEL, "%s.%d",
392 group->cg_item.ci_name, epf_group->index);
393 if (!epf_name) {
394 err = -ENOMEM;
395 goto remove_idr;
396 }
397
398 epf = pci_epf_create(epf_name);
372 if (IS_ERR(epf)) { 399 if (IS_ERR(epf)) {
373 pr_err("failed to create endpoint function device\n"); 400 pr_err("failed to create endpoint function device\n");
374 return ERR_PTR(-EINVAL); 401 err = -EINVAL;
402 goto free_name;
375 } 403 }
376 404
377 epf_group->epf = epf; 405 epf_group->epf = epf;
378 406
407 kfree(epf_name);
408
379 return &epf_group->group; 409 return &epf_group->group;
410
411free_name:
412 kfree(epf_name);
413
414remove_idr:
415 mutex_lock(&functions_mutex);
416 idr_remove(&functions_idr, epf_group->index);
417 mutex_unlock(&functions_mutex);
418
419free_group:
420 kfree(epf_group);
421
422 return ERR_PTR(err);
380} 423}
381 424
382static void pci_epf_drop(struct config_group *group, struct config_item *item) 425static void pci_epf_drop(struct config_group *group, struct config_item *item)
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 71b72c63fddb..e245bba0ab53 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/slab.h> 10#include <linux/slab.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/of_device.h> 12#include <linux/of_device.h>
@@ -131,25 +130,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
131/** 130/**
132 * pci_epc_raise_irq() - interrupt the host system 131 * pci_epc_raise_irq() - interrupt the host system
133 * @epc: the EPC device which has to interrupt the host 132 * @epc: the EPC device which has to interrupt the host
133 * @func_no: the endpoint function number in the EPC device
134 * @type: specify the type of interrupt; legacy or MSI 134 * @type: specify the type of interrupt; legacy or MSI
135 * @interrupt_num: the MSI interrupt number 135 * @interrupt_num: the MSI interrupt number
136 * 136 *
137 * Invoke to raise an MSI or legacy interrupt 137 * Invoke to raise an MSI or legacy interrupt
138 */ 138 */
139int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, 139int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
140 u8 interrupt_num) 140 enum pci_epc_irq_type type, u8 interrupt_num)
141{ 141{
142 int ret; 142 int ret;
143 unsigned long flags; 143 unsigned long flags;
144 144
145 if (IS_ERR(epc)) 145 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
146 return -EINVAL; 146 return -EINVAL;
147 147
148 if (!epc->ops->raise_irq) 148 if (!epc->ops->raise_irq)
149 return 0; 149 return 0;
150 150
151 spin_lock_irqsave(&epc->lock, flags); 151 spin_lock_irqsave(&epc->lock, flags);
152 ret = epc->ops->raise_irq(epc, type, interrupt_num); 152 ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
153 spin_unlock_irqrestore(&epc->lock, flags); 153 spin_unlock_irqrestore(&epc->lock, flags);
154 154
155 return ret; 155 return ret;
@@ -159,22 +159,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
159/** 159/**
160 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated 160 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
161 * @epc: the EPC device to which MSI interrupts was requested 161 * @epc: the EPC device to which MSI interrupts was requested
162 * @func_no: the endpoint function number in the EPC device
162 * 163 *
163 * Invoke to get the number of MSI interrupts allocated by the RC 164 * Invoke to get the number of MSI interrupts allocated by the RC
164 */ 165 */
165int pci_epc_get_msi(struct pci_epc *epc) 166int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
166{ 167{
167 int interrupt; 168 int interrupt;
168 unsigned long flags; 169 unsigned long flags;
169 170
170 if (IS_ERR(epc)) 171 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
171 return 0; 172 return 0;
172 173
173 if (!epc->ops->get_msi) 174 if (!epc->ops->get_msi)
174 return 0; 175 return 0;
175 176
176 spin_lock_irqsave(&epc->lock, flags); 177 spin_lock_irqsave(&epc->lock, flags);
177 interrupt = epc->ops->get_msi(epc); 178 interrupt = epc->ops->get_msi(epc, func_no);
178 spin_unlock_irqrestore(&epc->lock, flags); 179 spin_unlock_irqrestore(&epc->lock, flags);
179 180
180 if (interrupt < 0) 181 if (interrupt < 0)
@@ -189,17 +190,18 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
189/** 190/**
190 * pci_epc_set_msi() - set the number of MSI interrupt numbers required 191 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
191 * @epc: the EPC device on which MSI has to be configured 192 * @epc: the EPC device on which MSI has to be configured
193 * @func_no: the endpoint function number in the EPC device
192 * @interrupts: number of MSI interrupts required by the EPF 194 * @interrupts: number of MSI interrupts required by the EPF
193 * 195 *
194 * Invoke to set the required number of MSI interrupts. 196 * Invoke to set the required number of MSI interrupts.
195 */ 197 */
196int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts) 198int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
197{ 199{
198 int ret; 200 int ret;
199 u8 encode_int; 201 u8 encode_int;
200 unsigned long flags; 202 unsigned long flags;
201 203
202 if (IS_ERR(epc)) 204 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
203 return -EINVAL; 205 return -EINVAL;
204 206
205 if (!epc->ops->set_msi) 207 if (!epc->ops->set_msi)
@@ -208,7 +210,7 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
208 encode_int = order_base_2(interrupts); 210 encode_int = order_base_2(interrupts);
209 211
210 spin_lock_irqsave(&epc->lock, flags); 212 spin_lock_irqsave(&epc->lock, flags);
211 ret = epc->ops->set_msi(epc, encode_int); 213 ret = epc->ops->set_msi(epc, func_no, encode_int);
212 spin_unlock_irqrestore(&epc->lock, flags); 214 spin_unlock_irqrestore(&epc->lock, flags);
213 215
214 return ret; 216 return ret;
@@ -218,22 +220,24 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
218/** 220/**
219 * pci_epc_unmap_addr() - unmap CPU address from PCI address 221 * pci_epc_unmap_addr() - unmap CPU address from PCI address
220 * @epc: the EPC device on which address is allocated 222 * @epc: the EPC device on which address is allocated
223 * @func_no: the endpoint function number in the EPC device
221 * @phys_addr: physical address of the local system 224 * @phys_addr: physical address of the local system
222 * 225 *
223 * Invoke to unmap the CPU address from PCI address. 226 * Invoke to unmap the CPU address from PCI address.
224 */ 227 */
225void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr) 228void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
229 phys_addr_t phys_addr)
226{ 230{
227 unsigned long flags; 231 unsigned long flags;
228 232
229 if (IS_ERR(epc)) 233 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
230 return; 234 return;
231 235
232 if (!epc->ops->unmap_addr) 236 if (!epc->ops->unmap_addr)
233 return; 237 return;
234 238
235 spin_lock_irqsave(&epc->lock, flags); 239 spin_lock_irqsave(&epc->lock, flags);
236 epc->ops->unmap_addr(epc, phys_addr); 240 epc->ops->unmap_addr(epc, func_no, phys_addr);
237 spin_unlock_irqrestore(&epc->lock, flags); 241 spin_unlock_irqrestore(&epc->lock, flags);
238} 242}
239EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); 243EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -241,26 +245,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
241/** 245/**
242 * pci_epc_map_addr() - map CPU address to PCI address 246 * pci_epc_map_addr() - map CPU address to PCI address
243 * @epc: the EPC device on which address is allocated 247 * @epc: the EPC device on which address is allocated
248 * @func_no: the endpoint function number in the EPC device
244 * @phys_addr: physical address of the local system 249 * @phys_addr: physical address of the local system
245 * @pci_addr: PCI address to which the physical address should be mapped 250 * @pci_addr: PCI address to which the physical address should be mapped
246 * @size: the size of the allocation 251 * @size: the size of the allocation
247 * 252 *
248 * Invoke to map CPU address with PCI address. 253 * Invoke to map CPU address with PCI address.
249 */ 254 */
250int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, 255int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
251 u64 pci_addr, size_t size) 256 phys_addr_t phys_addr, u64 pci_addr, size_t size)
252{ 257{
253 int ret; 258 int ret;
254 unsigned long flags; 259 unsigned long flags;
255 260
256 if (IS_ERR(epc)) 261 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
257 return -EINVAL; 262 return -EINVAL;
258 263
259 if (!epc->ops->map_addr) 264 if (!epc->ops->map_addr)
260 return 0; 265 return 0;
261 266
262 spin_lock_irqsave(&epc->lock, flags); 267 spin_lock_irqsave(&epc->lock, flags);
263 ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size); 268 ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
264 spin_unlock_irqrestore(&epc->lock, flags); 269 spin_unlock_irqrestore(&epc->lock, flags);
265 270
266 return ret; 271 return ret;
@@ -270,22 +275,23 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
270/** 275/**
271 * pci_epc_clear_bar() - reset the BAR 276 * pci_epc_clear_bar() - reset the BAR
272 * @epc: the EPC device for which the BAR has to be cleared 277 * @epc: the EPC device for which the BAR has to be cleared
278 * @func_no: the endpoint function number in the EPC device
273 * @bar: the BAR number that has to be reset 279 * @bar: the BAR number that has to be reset
274 * 280 *
275 * Invoke to reset the BAR of the endpoint device. 281 * Invoke to reset the BAR of the endpoint device.
276 */ 282 */
277void pci_epc_clear_bar(struct pci_epc *epc, int bar) 283void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar)
278{ 284{
279 unsigned long flags; 285 unsigned long flags;
280 286
281 if (IS_ERR(epc)) 287 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
282 return; 288 return;
283 289
284 if (!epc->ops->clear_bar) 290 if (!epc->ops->clear_bar)
285 return; 291 return;
286 292
287 spin_lock_irqsave(&epc->lock, flags); 293 spin_lock_irqsave(&epc->lock, flags);
288 epc->ops->clear_bar(epc, bar); 294 epc->ops->clear_bar(epc, func_no, bar);
289 spin_unlock_irqrestore(&epc->lock, flags); 295 spin_unlock_irqrestore(&epc->lock, flags);
290} 296}
291EXPORT_SYMBOL_GPL(pci_epc_clear_bar); 297EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -293,26 +299,27 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
293/** 299/**
294 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space 300 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
295 * @epc: the EPC device on which BAR has to be configured 301 * @epc: the EPC device on which BAR has to be configured
302 * @func_no: the endpoint function number in the EPC device
296 * @bar: the BAR number that has to be configured 303 * @bar: the BAR number that has to be configured
297 * @size: the size of the addr space 304 * @size: the size of the addr space
298 * @flags: specify memory allocation/io allocation/32bit address/64 bit address 305 * @flags: specify memory allocation/io allocation/32bit address/64 bit address
299 * 306 *
300 * Invoke to configure the BAR of the endpoint device. 307 * Invoke to configure the BAR of the endpoint device.
301 */ 308 */
302int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, 309int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, enum pci_barno bar,
303 dma_addr_t bar_phys, size_t size, int flags) 310 dma_addr_t bar_phys, size_t size, int flags)
304{ 311{
305 int ret; 312 int ret;
306 unsigned long irq_flags; 313 unsigned long irq_flags;
307 314
308 if (IS_ERR(epc)) 315 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
309 return -EINVAL; 316 return -EINVAL;
310 317
311 if (!epc->ops->set_bar) 318 if (!epc->ops->set_bar)
312 return 0; 319 return 0;
313 320
314 spin_lock_irqsave(&epc->lock, irq_flags); 321 spin_lock_irqsave(&epc->lock, irq_flags);
315 ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags); 322 ret = epc->ops->set_bar(epc, func_no, bar, bar_phys, size, flags);
316 spin_unlock_irqrestore(&epc->lock, irq_flags); 323 spin_unlock_irqrestore(&epc->lock, irq_flags);
317 324
318 return ret; 325 return ret;
@@ -322,6 +329,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
322/** 329/**
323 * pci_epc_write_header() - write standard configuration header 330 * pci_epc_write_header() - write standard configuration header
324 * @epc: the EPC device to which the configuration header should be written 331 * @epc: the EPC device to which the configuration header should be written
332 * @func_no: the endpoint function number in the EPC device
325 * @header: standard configuration header fields 333 * @header: standard configuration header fields
326 * 334 *
327 * Invoke to write the configuration header to the endpoint controller. Every 335 * Invoke to write the configuration header to the endpoint controller. Every
@@ -329,19 +337,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
329 * configuration header would be written. The callback function should write 337 * configuration header would be written. The callback function should write
330 * the header fields to this dedicated location. 338 * the header fields to this dedicated location.
331 */ 339 */
332int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header) 340int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
341 struct pci_epf_header *header)
333{ 342{
334 int ret; 343 int ret;
335 unsigned long flags; 344 unsigned long flags;
336 345
337 if (IS_ERR(epc)) 346 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
338 return -EINVAL; 347 return -EINVAL;
339 348
340 if (!epc->ops->write_header) 349 if (!epc->ops->write_header)
341 return 0; 350 return 0;
342 351
343 spin_lock_irqsave(&epc->lock, flags); 352 spin_lock_irqsave(&epc->lock, flags);
344 ret = epc->ops->write_header(epc, header); 353 ret = epc->ops->write_header(epc, func_no, header);
345 spin_unlock_irqrestore(&epc->lock, flags); 354 spin_unlock_irqrestore(&epc->lock, flags);
346 355
347 return ret; 356 return ret;
@@ -360,7 +369,6 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
360int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) 369int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
361{ 370{
362 unsigned long flags; 371 unsigned long flags;
363 struct device *dev = epc->dev.parent;
364 372
365 if (epf->epc) 373 if (epf->epc)
366 return -EBUSY; 374 return -EBUSY;
@@ -372,12 +380,6 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
372 return -EINVAL; 380 return -EINVAL;
373 381
374 epf->epc = epc; 382 epf->epc = epc;
375 if (dev->of_node) {
376 of_dma_configure(&epf->dev, dev->of_node);
377 } else {
378 dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask);
379 epf->dev.dma_mask = epc->dev.dma_mask;
380 }
381 383
382 spin_lock_irqsave(&epc->lock, flags); 384 spin_lock_irqsave(&epc->lock, flags);
383 list_add_tail(&epf->list, &epc->pci_epf); 385 list_add_tail(&epf->list, &epc->pci_epf);
@@ -492,9 +494,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
492 INIT_LIST_HEAD(&epc->pci_epf); 494 INIT_LIST_HEAD(&epc->pci_epf);
493 495
494 device_initialize(&epc->dev); 496 device_initialize(&epc->dev);
495 dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask);
496 epc->dev.class = pci_epc_class; 497 epc->dev.class = pci_epc_class;
497 epc->dev.dma_mask = dev->dma_mask;
498 epc->dev.parent = dev; 498 epc->dev.parent = dev;
499 epc->ops = ops; 499 epc->ops = ops;
500 500
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 9f282ea632d2..766ce1dca2ec 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
88 */ 88 */
89void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar) 89void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
90{ 90{
91 struct device *dev = &epf->dev; 91 struct device *dev = epf->epc->dev.parent;
92 92
93 if (!addr) 93 if (!addr)
94 return; 94 return;
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
111void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) 111void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
112{ 112{
113 void *space; 113 void *space;
114 struct device *dev = &epf->dev; 114 struct device *dev = epf->epc->dev.parent;
115 dma_addr_t phys_addr; 115 dma_addr_t phys_addr;
116 116
117 if (size < 128) 117 if (size < 128)
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 34ec1d88f961..3b1059190867 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_VMD) += vmd.o
34# ARM64 and use internal ifdefs to only build the pieces we need 34# ARM64 and use internal ifdefs to only build the pieces we need
35# depending on whether ACPI, the DT driver, or both are enabled. 35# depending on whether ACPI, the DT driver, or both are enabled.
36 36
37ifdef CONFIG_PCI
37obj-$(CONFIG_ARM64) += pci-thunder-ecam.o 38obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
38obj-$(CONFIG_ARM64) += pci-thunder-pem.o 39obj-$(CONFIG_ARM64) += pci-thunder-pem.o
39obj-$(CONFIG_ARM64) += pci-xgene.o 40obj-$(CONFIG_ARM64) += pci-xgene.o
41endif
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 31f2c231e893..5d028f53fdcd 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -13,50 +13,6 @@
13#include <linux/pci-ecam.h> 13#include <linux/pci-ecam.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15 15
16static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
17 struct list_head *resources, struct resource **bus_range)
18{
19 int err, res_valid = 0;
20 struct device_node *np = dev->of_node;
21 resource_size_t iobase;
22 struct resource_entry *win, *tmp;
23
24 err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
25 if (err)
26 return err;
27
28 err = devm_request_pci_bus_resources(dev, resources);
29 if (err)
30 return err;
31
32 resource_list_for_each_entry_safe(win, tmp, resources) {
33 struct resource *res = win->res;
34
35 switch (resource_type(res)) {
36 case IORESOURCE_IO:
37 err = pci_remap_iospace(res, iobase);
38 if (err) {
39 dev_warn(dev, "error %d: failed to map resource %pR\n",
40 err, res);
41 resource_list_destroy_entry(win);
42 }
43 break;
44 case IORESOURCE_MEM:
45 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
46 break;
47 case IORESOURCE_BUS:
48 *bus_range = res;
49 break;
50 }
51 }
52
53 if (res_valid)
54 return 0;
55
56 dev_err(dev, "non-prefetchable memory resource required\n");
57 return -EINVAL;
58}
59
60static void gen_pci_unmap_cfg(void *ptr) 16static void gen_pci_unmap_cfg(void *ptr)
61{ 17{
62 pci_ecam_free((struct pci_config_window *)ptr); 18 pci_ecam_free((struct pci_config_window *)ptr);
@@ -71,9 +27,9 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
71 struct pci_config_window *cfg; 27 struct pci_config_window *cfg;
72 28
73 /* Parse our PCI ranges and request their resources */ 29 /* Parse our PCI ranges and request their resources */
74 err = gen_pci_parse_request_of_pci_ranges(dev, resources, &bus_range); 30 err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
75 if (err) 31 if (err)
76 goto err_out; 32 return ERR_PTR(err);
77 33
78 err = of_address_to_resource(dev->of_node, 0, &cfgres); 34 err = of_address_to_resource(dev->of_node, 0, &cfgres);
79 if (err) { 35 if (err) {
@@ -105,7 +61,6 @@ int pci_host_common_probe(struct platform_device *pdev,
105 const char *type; 61 const char *type;
106 struct device *dev = &pdev->dev; 62 struct device *dev = &pdev->dev;
107 struct device_node *np = dev->of_node; 63 struct device_node *np = dev->of_node;
108 struct pci_bus *bus, *child;
109 struct pci_host_bridge *bridge; 64 struct pci_host_bridge *bridge;
110 struct pci_config_window *cfg; 65 struct pci_config_window *cfg;
111 struct list_head resources; 66 struct list_head resources;
@@ -124,14 +79,13 @@ int pci_host_common_probe(struct platform_device *pdev,
124 of_pci_check_probe_only(); 79 of_pci_check_probe_only();
125 80
126 /* Parse and map our Configuration Space windows */ 81 /* Parse and map our Configuration Space windows */
127 INIT_LIST_HEAD(&resources);
128 cfg = gen_pci_init(dev, &resources, ops); 82 cfg = gen_pci_init(dev, &resources, ops);
129 if (IS_ERR(cfg)) 83 if (IS_ERR(cfg))
130 return PTR_ERR(cfg); 84 return PTR_ERR(cfg);
131 85
132 /* Do not reassign resources if probe only */ 86 /* Do not reassign resources if probe only */
133 if (!pci_has_flag(PCI_PROBE_ONLY)) 87 if (!pci_has_flag(PCI_PROBE_ONLY))
134 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); 88 pci_add_flags(PCI_REASSIGN_ALL_BUS);
135 89
136 list_splice_init(&resources, &bridge->windows); 90 list_splice_init(&resources, &bridge->windows);
137 bridge->dev.parent = dev; 91 bridge->dev.parent = dev;
@@ -141,29 +95,11 @@ int pci_host_common_probe(struct platform_device *pdev,
141 bridge->map_irq = of_irq_parse_and_map_pci; 95 bridge->map_irq = of_irq_parse_and_map_pci;
142 bridge->swizzle_irq = pci_common_swizzle; 96 bridge->swizzle_irq = pci_common_swizzle;
143 97
144 ret = pci_scan_root_bus_bridge(bridge); 98 ret = pci_host_probe(bridge);
145 if (ret < 0) { 99 if (ret < 0) {
146 dev_err(dev, "Scanning root bridge failed"); 100 pci_free_resource_list(&resources);
147 return ret; 101 return ret;
148 } 102 }
149 103
150 bus = bridge->bus;
151
152 /*
153 * We insert PCI resources into the iomem_resource and
154 * ioport_resource trees in either pci_bus_claim_resources()
155 * or pci_bus_assign_resources().
156 */
157 if (pci_has_flag(PCI_PROBE_ONLY)) {
158 pci_bus_claim_resources(bus);
159 } else {
160 pci_bus_size_bridges(bus);
161 pci_bus_assign_resources(bus);
162
163 list_for_each_entry(child, &bus->children, node)
164 pcie_bus_configure_settings(child);
165 }
166
167 pci_bus_add_devices(bus);
168 return 0; 104 return 0;
169} 105}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 8ff28057ec02..dd9b3bcc41c3 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -256,11 +256,10 @@ struct tegra_pcie {
256 256
257 void __iomem *pads; 257 void __iomem *pads;
258 void __iomem *afi; 258 void __iomem *afi;
259 void __iomem *cfg;
259 int irq; 260 int irq;
260 261
261 struct list_head buses; 262 struct resource cs;
262 struct resource *cs;
263
264 struct resource io; 263 struct resource io;
265 struct resource pio; 264 struct resource pio;
266 struct resource mem; 265 struct resource mem;
@@ -309,7 +308,6 @@ struct tegra_pcie_port {
309}; 308};
310 309
311struct tegra_pcie_bus { 310struct tegra_pcie_bus {
312 struct vm_struct *area;
313 struct list_head list; 311 struct list_head list;
314 unsigned int nr; 312 unsigned int nr;
315}; 313};
@@ -349,109 +347,26 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
349 * 347 *
350 * Mapping the whole extended configuration space would require 256 MiB of 348 * Mapping the whole extended configuration space would require 256 MiB of
351 * virtual address space, only a small part of which will actually be used. 349 * virtual address space, only a small part of which will actually be used.
352 * To work around this, a 1 MiB of virtual addresses are allocated per bus
353 * when the bus is first accessed. When the physical range is mapped, the
354 * the bus number bits are hidden so that the extended register number bits
355 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
356 *
357 * [19:16] extended register number
358 * [15:11] device number
359 * [10: 8] function number
360 * [ 7: 0] register number
361 * 350 *
362 * This is achieved by stitching together 16 chunks of 64 KiB of physical 351 * To work around this, a 4 KiB region is used to generate the required
363 * address space via the MMU. 352 * configuration transaction with relevant B:D:F and register offset values.
353 * This is achieved by dynamically programming base address and size of
354 * AFI_AXI_BAR used for end point config space mapping to make sure that the
355 * address (access to which generates correct config transaction) falls in
356 * this 4 KiB region.
364 */ 357 */
365static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where) 358static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
359 unsigned int where)
366{ 360{
367 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) | 361 return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
368 (PCI_FUNC(devfn) << 8) | (where & 0xfc); 362 (PCI_FUNC(devfn) << 8) | (where & 0xff);
369}
370
371static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
372 unsigned int busnr)
373{
374 struct device *dev = pcie->dev;
375 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
376 phys_addr_t cs = pcie->cs->start;
377 struct tegra_pcie_bus *bus;
378 unsigned int i;
379 int err;
380
381 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
382 if (!bus)
383 return ERR_PTR(-ENOMEM);
384
385 INIT_LIST_HEAD(&bus->list);
386 bus->nr = busnr;
387
388 /* allocate 1 MiB of virtual addresses */
389 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
390 if (!bus->area) {
391 err = -ENOMEM;
392 goto free;
393 }
394
395 /* map each of the 16 chunks of 64 KiB each */
396 for (i = 0; i < 16; i++) {
397 unsigned long virt = (unsigned long)bus->area->addr +
398 i * SZ_64K;
399 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
400
401 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
402 if (err < 0) {
403 dev_err(dev, "ioremap_page_range() failed: %d\n", err);
404 goto unmap;
405 }
406 }
407
408 return bus;
409
410unmap:
411 vunmap(bus->area->addr);
412free:
413 kfree(bus);
414 return ERR_PTR(err);
415}
416
417static int tegra_pcie_add_bus(struct pci_bus *bus)
418{
419 struct pci_host_bridge *host = pci_find_host_bridge(bus);
420 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
421 struct tegra_pcie_bus *b;
422
423 b = tegra_pcie_bus_alloc(pcie, bus->number);
424 if (IS_ERR(b))
425 return PTR_ERR(b);
426
427 list_add_tail(&b->list, &pcie->buses);
428
429 return 0;
430}
431
432static void tegra_pcie_remove_bus(struct pci_bus *child)
433{
434 struct pci_host_bridge *host = pci_find_host_bridge(child);
435 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
436 struct tegra_pcie_bus *bus, *tmp;
437
438 list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
439 if (bus->nr == child->number) {
440 vunmap(bus->area->addr);
441 list_del(&bus->list);
442 kfree(bus);
443 break;
444 }
445 }
446} 363}
447 364
448static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, 365static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
449 unsigned int devfn, 366 unsigned int devfn,
450 int where) 367 int where)
451{ 368{
452 struct pci_host_bridge *host = pci_find_host_bridge(bus); 369 struct tegra_pcie *pcie = bus->sysdata;
453 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
454 struct device *dev = pcie->dev;
455 void __iomem *addr = NULL; 370 void __iomem *addr = NULL;
456 371
457 if (bus->number == 0) { 372 if (bus->number == 0) {
@@ -465,19 +380,17 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
465 } 380 }
466 } 381 }
467 } else { 382 } else {
468 struct tegra_pcie_bus *b; 383 unsigned int offset;
384 u32 base;
469 385
470 list_for_each_entry(b, &pcie->buses, list) 386 offset = tegra_pcie_conf_offset(bus->number, devfn, where);
471 if (b->nr == bus->number)
472 addr = (void __iomem *)b->area->addr;
473 387
474 if (!addr) { 388 /* move 4 KiB window to offset within the FPCI region */
475 dev_err(dev, "failed to map cfg. space for bus %u\n", 389 base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
476 bus->number); 390 afi_writel(pcie, base, AFI_FPCI_BAR0);
477 return NULL;
478 }
479 391
480 addr += tegra_pcie_conf_offset(devfn, where); 392 /* move to correct offset within the 4 KiB page */
393 addr = pcie->cfg + (offset & (SZ_4K - 1));
481 } 394 }
482 395
483 return addr; 396 return addr;
@@ -504,8 +417,6 @@ static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
504} 417}
505 418
506static struct pci_ops tegra_pcie_ops = { 419static struct pci_ops tegra_pcie_ops = {
507 .add_bus = tegra_pcie_add_bus,
508 .remove_bus = tegra_pcie_remove_bus,
509 .map_bus = tegra_pcie_map_bus, 420 .map_bus = tegra_pcie_map_bus,
510 .read = tegra_pcie_config_read, 421 .read = tegra_pcie_config_read,
511 .write = tegra_pcie_config_write, 422 .write = tegra_pcie_config_write,
@@ -648,8 +559,7 @@ static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
648 559
649static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) 560static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
650{ 561{
651 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); 562 struct tegra_pcie *pcie = pdev->bus->sysdata;
652 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
653 int irq; 563 int irq;
654 564
655 tegra_cpuidle_pcie_irqs_in_use(); 565 tegra_cpuidle_pcie_irqs_in_use();
@@ -730,12 +640,9 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
730 u32 fpci_bar, size, axi_address; 640 u32 fpci_bar, size, axi_address;
731 641
732 /* Bar 0: type 1 extended configuration space */ 642 /* Bar 0: type 1 extended configuration space */
733 fpci_bar = 0xfe100000; 643 size = resource_size(&pcie->cs);
734 size = resource_size(pcie->cs); 644 afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
735 axi_address = pcie->cs->start;
736 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
737 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); 645 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
738 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
739 646
740 /* Bar 1: downstream IO bar */ 647 /* Bar 1: downstream IO bar */
741 fpci_bar = 0xfdfc0000; 648 fpci_bar = 0xfdfc0000;
@@ -1340,10 +1247,14 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1340 goto poweroff; 1247 goto poweroff;
1341 } 1248 }
1342 1249
1343 pcie->cs = devm_request_mem_region(dev, res->start, 1250 pcie->cs = *res;
1344 resource_size(res), res->name); 1251
1345 if (!pcie->cs) { 1252 /* constrain configuration space to 4 KiB */
1346 err = -EADDRNOTAVAIL; 1253 pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1254
1255 pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1256 if (IS_ERR(pcie->cfg)) {
1257 err = PTR_ERR(pcie->cfg);
1347 goto poweroff; 1258 goto poweroff;
1348 } 1259 }
1349 1260
@@ -2332,9 +2243,9 @@ static int tegra_pcie_probe(struct platform_device *pdev)
2332 return -ENOMEM; 2243 return -ENOMEM;
2333 2244
2334 pcie = pci_host_bridge_priv(host); 2245 pcie = pci_host_bridge_priv(host);
2246 host->sysdata = pcie;
2335 2247
2336 pcie->soc = of_device_get_match_data(dev); 2248 pcie->soc = of_device_get_match_data(dev);
2337 INIT_LIST_HEAD(&pcie->buses);
2338 INIT_LIST_HEAD(&pcie->ports); 2249 INIT_LIST_HEAD(&pcie->ports);
2339 pcie->dev = dev; 2250 pcie->dev = dev;
2340 2251
@@ -2369,7 +2280,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
2369 2280
2370 tegra_pcie_enable_ports(pcie); 2281 tegra_pcie_enable_ports(pcie);
2371 2282
2372 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
2373 host->busnr = pcie->busn.start; 2283 host->busnr = pcie->busn.start;
2374 host->dev.parent = &pdev->dev; 2284 host->dev.parent = &pdev->dev;
2375 host->ops = &tegra_pcie_ops; 2285 host->ops = &tegra_pcie_ops;
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index b5b59d058c1b..5b3876f5312b 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -194,7 +194,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
194 writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); 194 writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
195 195
196 pci_add_flags(PCI_ENABLE_PROC_DOMAINS); 196 pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
197 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); 197 pci_add_flags(PCI_REASSIGN_ALL_BUS);
198 198
199 list_splice_init(&pci_res, &bridge->windows); 199 list_splice_init(&pci_res, &bridge->windows);
200 bridge->dev.parent = dev; 200 bridge->dev.parent = dev;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 2fc106d52aff..0a0d7ee6d3c9 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -658,7 +658,6 @@ static int xgene_pcie_probe(struct platform_device *pdev)
658 658
659 bus = bridge->bus; 659 bus = bridge->bus;
660 660
661 pci_scan_child_bus(bus);
662 pci_assign_unassigned_bus_resources(bus); 661 pci_assign_unassigned_bus_resources(bus);
663 list_for_each_entry(child, &bus->children, node) 662 list_for_each_entry(child, &bus->children, node)
664 pcie_bus_configure_settings(child); 663 pcie_bus_configure_settings(child);
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 3e6927c318f2..e764a2a2693c 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -84,6 +84,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
84 pcie->need_ob_cfg = true; 84 pcie->need_ob_cfg = true;
85 } 85 }
86 86
87 /*
88 * DT nodes are not used by all platforms that use the iProc PCIe
89 * core driver. For platforms that require explict inbound mapping
90 * configuration, "dma-ranges" would have been present in DT
91 */
92 pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
93
87 /* PHY use is optional */ 94 /* PHY use is optional */
88 pcie->phy = devm_phy_get(dev, "pcie-phy"); 95 pcie->phy = devm_phy_get(dev, "pcie-phy");
89 if (IS_ERR(pcie->phy)) { 96 if (IS_ERR(pcie->phy)) {
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 331dab12daee..cbb095481cdc 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -1370,9 +1370,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
1370 } 1370 }
1371 } 1371 }
1372 1372
1373 ret = iproc_pcie_map_dma_ranges(pcie); 1373 if (pcie->need_ib_cfg) {
1374 if (ret && ret != -ENOENT) 1374 ret = iproc_pcie_map_dma_ranges(pcie);
1375 goto err_power_off_phy; 1375 if (ret && ret != -ENOENT)
1376 goto err_power_off_phy;
1377 }
1376 1378
1377#ifdef CONFIG_ARM 1379#ifdef CONFIG_ARM
1378 pcie->sysdata.private_data = pcie; 1380 pcie->sysdata.private_data = pcie;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index 944d546fba2a..d55f56a186cd 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -66,6 +66,7 @@ struct iproc_msi;
66 * @ob: outbound mapping related parameters 66 * @ob: outbound mapping related parameters
67 * @ob_map: outbound mapping related parameters specific to the controller 67 * @ob_map: outbound mapping related parameters specific to the controller
68 * 68 *
69 * @need_ib_cfg: indicates SW needs to configure the inbound mapping window
69 * @ib: inbound mapping related parameters 70 * @ib: inbound mapping related parameters
70 * @ib_map: outbound mapping region related parameters 71 * @ib_map: outbound mapping region related parameters
71 * 72 *
@@ -93,6 +94,7 @@ struct iproc_pcie {
93 struct iproc_pcie_ob ob; 94 struct iproc_pcie_ob ob;
94 const struct iproc_pcie_ob_map *ob_map; 95 const struct iproc_pcie_ob_map *ob_map;
95 96
97 bool need_ib_cfg;
96 struct iproc_pcie_ib ib; 98 struct iproc_pcie_ib ib;
97 const struct iproc_pcie_ib_map *ib_map; 99 const struct iproc_pcie_ib_map *ib_map;
98 100
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 5efce1f237c5..b4c4aad2cf66 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -456,7 +456,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
456 456
457 rcar_pcie_setup(&bridge->windows, pcie); 457 rcar_pcie_setup(&bridge->windows, pcie);
458 458
459 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); 459 pci_add_flags(PCI_REASSIGN_ALL_BUS);
460 460
461 bridge->dev.parent = dev; 461 bridge->dev.parent = dev;
462 bridge->sysdata = pcie; 462 bridge->sysdata = pcie;
@@ -1120,7 +1120,9 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1120 1120
1121 INIT_LIST_HEAD(&pcie->resources); 1121 INIT_LIST_HEAD(&pcie->resources);
1122 1122
1123 rcar_pcie_parse_request_of_pci_ranges(pcie); 1123 err = rcar_pcie_parse_request_of_pci_ranges(pcie);
1124 if (err)
1125 goto err_free_bridge;
1124 1126
1125 err = rcar_pcie_get_resources(pcie); 1127 err = rcar_pcie_get_resources(pcie);
1126 if (err < 0) { 1128 if (err < 0) {
@@ -1175,6 +1177,7 @@ err_pm_disable:
1175 1177
1176err_free_resource_list: 1178err_free_resource_list:
1177 pci_free_resource_list(&pcie->resources); 1179 pci_free_resource_list(&pcie->resources);
1180err_free_bridge:
1178 pci_free_host_bridge(bridge); 1181 pci_free_host_bridge(bridge);
1179 1182
1180 return err; 1183 return err;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 47498fb2a812..e2198a2feeca 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -797,10 +797,8 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
797 797
798 handle = adev->handle; 798 handle = adev->handle;
799 bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); 799 bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
800 if (!bridge) { 800 if (!bridge)
801 acpi_handle_err(handle, "No memory for bridge object\n");
802 return; 801 return;
803 }
804 802
805 INIT_LIST_HEAD(&bridge->slots); 803 INIT_LIST_HEAD(&bridge->slots);
806 kref_init(&bridge->ref); 804 kref_init(&bridge->ref);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 91669ab68ffb..1797e36ec586 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -821,7 +821,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
821 821
822 bus = pdev->subordinate; 822 bus = pdev->subordinate;
823 if (!bus) { 823 if (!bus) {
824 dev_notice(&pdev->dev, "the device is not a bridge, skipping\n"); 824 pci_notice(pdev, "the device is not a bridge, skipping\n");
825 rc = -ENODEV; 825 rc = -ENODEV;
826 goto err_disable_device; 826 goto err_disable_device;
827 } 827 }
@@ -869,7 +869,6 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
869 869
870 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL); 870 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
871 if (!ctrl) { 871 if (!ctrl) {
872 err("%s : out of memory\n", __func__);
873 rc = -ENOMEM; 872 rc = -ENOMEM;
874 goto err_disable_device; 873 goto err_disable_device;
875 } 874 }
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 21c9bf20e542..1b2b3f3b648b 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -75,7 +75,9 @@ int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func)
75 pci_lock_rescan_remove(); 75 pci_lock_rescan_remove();
76 76
77 if (func->pci_dev == NULL) 77 if (func->pci_dev == NULL)
78 func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function)); 78 func->pci_dev = pci_get_domain_bus_and_slot(0, func->bus,
79 PCI_DEVFN(func->device,
80 func->function));
79 81
80 /* No pci device, we need to create it then */ 82 /* No pci device, we need to create it then */
81 if (func->pci_dev == NULL) { 83 if (func->pci_dev == NULL) {
@@ -85,7 +87,9 @@ int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func)
85 if (num) 87 if (num)
86 pci_bus_add_devices(ctrl->pci_dev->bus); 88 pci_bus_add_devices(ctrl->pci_dev->bus);
87 89
88 func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function)); 90 func->pci_dev = pci_get_domain_bus_and_slot(0, func->bus,
91 PCI_DEVFN(func->device,
92 func->function));
89 if (func->pci_dev == NULL) { 93 if (func->pci_dev == NULL) {
90 dbg("ERROR: pci_dev still null\n"); 94 dbg("ERROR: pci_dev still null\n");
91 goto out; 95 goto out;
@@ -115,7 +119,10 @@ int cpqhp_unconfigure_device(struct pci_func *func)
115 119
116 pci_lock_rescan_remove(); 120 pci_lock_rescan_remove();
117 for (j = 0; j < 8 ; j++) { 121 for (j = 0; j < 8 ; j++) {
118 struct pci_dev *temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j)); 122 struct pci_dev *temp = pci_get_domain_bus_and_slot(0,
123 func->bus,
124 PCI_DEVFN(func->device,
125 j));
119 if (temp) { 126 if (temp) {
120 pci_dev_put(temp); 127 pci_dev_put(temp);
121 pci_stop_and_remove_bus_device(temp); 128 pci_stop_and_remove_bus_device(temp);
@@ -305,6 +312,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
305 int cloop = 0; 312 int cloop = 0;
306 int stop_it; 313 int stop_it;
307 int index; 314 int index;
315 u16 devfn;
308 316
309 /* Decide which slots are supported */ 317 /* Decide which slots are supported */
310 318
@@ -402,7 +410,9 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
402 new_slot->switch_save = 0x10; 410 new_slot->switch_save = 0x10;
403 /* In case of unsupported board */ 411 /* In case of unsupported board */
404 new_slot->status = DevError; 412 new_slot->status = DevError;
405 new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function); 413 devfn = (new_slot->device << 3) | new_slot->function;
414 new_slot->pci_dev = pci_get_domain_bus_and_slot(0,
415 new_slot->bus, devfn);
406 416
407 for (cloop = 0; cloop < 0x20; cloop++) { 417 for (cloop = 0; cloop < 0x20; cloop++) {
408 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) &(new_slot->config_space[cloop])); 418 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) &(new_slot->config_space[cloop]));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index e699220a6f37..b81ca3fa0e84 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -589,10 +589,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
589 u8 mode; 589 u8 mode;
590 590
591 info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); 591 info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
592 if (!info) { 592 if (!info)
593 err("out of system memory\n");
594 return -ENOMEM; 593 return -ENOMEM;
595 }
596 594
597 info->power_status = SLOT_PWRGD(slot_cur->status); 595 info->power_status = SLOT_PWRGD(slot_cur->status);
598 info->attention_status = SLOT_ATTN(slot_cur->status, 596 info->attention_status = SLOT_ATTN(slot_cur->status,
@@ -693,7 +691,8 @@ static void ibm_unconfigure_device(struct pci_func *func)
693 pci_lock_rescan_remove(); 691 pci_lock_rescan_remove();
694 692
695 for (j = 0; j < 0x08; j++) { 693 for (j = 0; j < 0x08; j++) {
696 temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j); 694 temp = pci_get_domain_bus_and_slot(0, func->busno,
695 (func->device << 3) | j);
697 if (temp) { 696 if (temp) {
698 pci_stop_and_remove_bus_device(temp); 697 pci_stop_and_remove_bus_device(temp);
699 pci_dev_put(temp); 698 pci_dev_put(temp);
@@ -720,14 +719,12 @@ static u8 bus_structure_fixup(u8 busno)
720 return 1; 719 return 1;
721 720
722 bus = kmalloc(sizeof(*bus), GFP_KERNEL); 721 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
723 if (!bus) { 722 if (!bus)
724 err("%s - out of memory\n", __func__);
725 return 1; 723 return 1;
726 } 724
727 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 725 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
728 if (!dev) { 726 if (!dev) {
729 kfree(bus); 727 kfree(bus);
730 err("%s - out of memory\n", __func__);
731 return 1; 728 return 1;
732 } 729 }
733 730
@@ -766,7 +763,7 @@ static int ibm_configure_device(struct pci_func *func)
766 if (!(bus_structure_fixup(func->busno))) 763 if (!(bus_structure_fixup(func->busno)))
767 flag = 1; 764 flag = 1;
768 if (func->dev == NULL) 765 if (func->dev == NULL)
769 func->dev = pci_get_bus_and_slot(func->busno, 766 func->dev = pci_get_domain_bus_and_slot(0, func->busno,
770 PCI_DEVFN(func->device, func->function)); 767 PCI_DEVFN(func->device, func->function));
771 768
772 if (func->dev == NULL) { 769 if (func->dev == NULL) {
@@ -779,7 +776,7 @@ static int ibm_configure_device(struct pci_func *func)
779 if (num) 776 if (num)
780 pci_bus_add_devices(bus); 777 pci_bus_add_devices(bus);
781 778
782 func->dev = pci_get_bus_and_slot(func->busno, 779 func->dev = pci_get_domain_bus_and_slot(0, func->busno,
783 PCI_DEVFN(func->device, func->function)); 780 PCI_DEVFN(func->device, func->function));
784 if (func->dev == NULL) { 781 if (func->dev == NULL) {
785 err("ERROR... : pci_dev still NULL\n"); 782 err("ERROR... : pci_dev still NULL\n");
@@ -1087,7 +1084,6 @@ static int enable_slot(struct hotplug_slot *hs)
1087 if (!slot_cur->func) { 1084 if (!slot_cur->func) {
1088 /* We cannot do update_slot_info here, since no memory for 1085 /* We cannot do update_slot_info here, since no memory for
1089 * kmalloc n.e.ways, and update_slot_info allocates some */ 1086 * kmalloc n.e.ways, and update_slot_info allocates some */
1090 err("out of system memory\n");
1091 rc = -ENOMEM; 1087 rc = -ENOMEM;
1092 goto error_power; 1088 goto error_power;
1093 } 1089 }
@@ -1194,7 +1190,6 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
1194 /* We need this for functions that were there on bootup */ 1190 /* We need this for functions that were there on bootup */
1195 slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); 1191 slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
1196 if (!slot_cur->func) { 1192 if (!slot_cur->func) {
1197 err("out of system memory\n");
1198 rc = -ENOMEM; 1193 rc = -ENOMEM;
1199 goto error; 1194 goto error;
1200 } 1195 }
@@ -1292,7 +1287,6 @@ static int __init ibmphp_init(void)
1292 1287
1293 ibmphp_pci_bus = kmalloc(sizeof(*ibmphp_pci_bus), GFP_KERNEL); 1288 ibmphp_pci_bus = kmalloc(sizeof(*ibmphp_pci_bus), GFP_KERNEL);
1294 if (!ibmphp_pci_bus) { 1289 if (!ibmphp_pci_bus) {
1295 err("out of memory\n");
1296 rc = -ENOMEM; 1290 rc = -ENOMEM;
1297 goto exit; 1291 goto exit;
1298 } 1292 }
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index b95d60fa11e9..e22d023f91d1 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -153,10 +153,9 @@ int ibmphp_configure_card(struct pci_func *func, u8 slotno)
153 goto error; 153 goto error;
154 } 154 }
155 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); 155 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
156 if (!newfunc) { 156 if (!newfunc)
157 err("out of system memory\n");
158 return -ENOMEM; 157 return -ENOMEM;
159 } 158
160 newfunc->busno = cur_func->busno; 159 newfunc->busno = cur_func->busno;
161 newfunc->device = device; 160 newfunc->device = device;
162 cur_func->next = newfunc; 161 cur_func->next = newfunc;
@@ -191,10 +190,9 @@ int ibmphp_configure_card(struct pci_func *func, u8 slotno)
191 for (i = 0; i < 32; i++) { 190 for (i = 0; i < 32; i++) {
192 if (func->devices[i]) { 191 if (func->devices[i]) {
193 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); 192 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
194 if (!newfunc) { 193 if (!newfunc)
195 err("out of system memory\n");
196 return -ENOMEM; 194 return -ENOMEM;
197 } 195
198 newfunc->busno = sec_number; 196 newfunc->busno = sec_number;
199 newfunc->device = (u8) i; 197 newfunc->device = (u8) i;
200 for (j = 0; j < 4; j++) 198 for (j = 0; j < 4; j++)
@@ -219,10 +217,9 @@ int ibmphp_configure_card(struct pci_func *func, u8 slotno)
219 } 217 }
220 218
221 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); 219 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
222 if (!newfunc) { 220 if (!newfunc)
223 err("out of system memory\n");
224 return -ENOMEM; 221 return -ENOMEM;
225 } 222
226 newfunc->busno = cur_func->busno; 223 newfunc->busno = cur_func->busno;
227 newfunc->device = device; 224 newfunc->device = device;
228 for (j = 0; j < 4; j++) 225 for (j = 0; j < 4; j++)
@@ -265,10 +262,9 @@ int ibmphp_configure_card(struct pci_func *func, u8 slotno)
265 if (func->devices[i]) { 262 if (func->devices[i]) {
266 debug("inside for loop, device is %x\n", i); 263 debug("inside for loop, device is %x\n", i);
267 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); 264 newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
268 if (!newfunc) { 265 if (!newfunc)
269 err(" out of system memory\n");
270 return -ENOMEM; 266 return -ENOMEM;
271 } 267
272 newfunc->busno = sec_number; 268 newfunc->busno = sec_number;
273 newfunc->device = (u8) i; 269 newfunc->device = (u8) i;
274 for (j = 0; j < 4; j++) 270 for (j = 0; j < 4; j++)
@@ -391,10 +387,9 @@ static int configure_device(struct pci_func *func)
391 387
392 io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 388 io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
393 389
394 if (!io[count]) { 390 if (!io[count])
395 err("out of system memory\n");
396 return -ENOMEM; 391 return -ENOMEM;
397 } 392
398 io[count]->type = IO; 393 io[count]->type = IO;
399 io[count]->busno = func->busno; 394 io[count]->busno = func->busno;
400 io[count]->devfunc = PCI_DEVFN(func->device, func->function); 395 io[count]->devfunc = PCI_DEVFN(func->device, func->function);
@@ -428,10 +423,9 @@ static int configure_device(struct pci_func *func)
428 debug("len[count] in PFMEM %x, count %d\n", len[count], count); 423 debug("len[count] in PFMEM %x, count %d\n", len[count], count);
429 424
430 pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 425 pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
431 if (!pfmem[count]) { 426 if (!pfmem[count])
432 err("out of system memory\n");
433 return -ENOMEM; 427 return -ENOMEM;
434 } 428
435 pfmem[count]->type = PFMEM; 429 pfmem[count]->type = PFMEM;
436 pfmem[count]->busno = func->busno; 430 pfmem[count]->busno = func->busno;
437 pfmem[count]->devfunc = PCI_DEVFN(func->device, 431 pfmem[count]->devfunc = PCI_DEVFN(func->device,
@@ -444,7 +438,6 @@ static int configure_device(struct pci_func *func)
444 } else { 438 } else {
445 mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); 439 mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL);
446 if (!mem_tmp) { 440 if (!mem_tmp) {
447 err("out of system memory\n");
448 kfree(pfmem[count]); 441 kfree(pfmem[count]);
449 return -ENOMEM; 442 return -ENOMEM;
450 } 443 }
@@ -494,10 +487,9 @@ static int configure_device(struct pci_func *func)
494 debug("len[count] in Mem %x, count %d\n", len[count], count); 487 debug("len[count] in Mem %x, count %d\n", len[count], count);
495 488
496 mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 489 mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
497 if (!mem[count]) { 490 if (!mem[count])
498 err("out of system memory\n");
499 return -ENOMEM; 491 return -ENOMEM;
500 } 492
501 mem[count]->type = MEM; 493 mem[count]->type = MEM;
502 mem[count]->busno = func->busno; 494 mem[count]->busno = func->busno;
503 mem[count]->devfunc = PCI_DEVFN(func->device, 495 mem[count]->devfunc = PCI_DEVFN(func->device,
@@ -660,7 +652,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
660 bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 652 bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
661 653
662 if (!bus_io[count]) { 654 if (!bus_io[count]) {
663 err("out of system memory\n");
664 retval = -ENOMEM; 655 retval = -ENOMEM;
665 goto error; 656 goto error;
666 } 657 }
@@ -692,7 +683,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
692 683
693 bus_pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 684 bus_pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
694 if (!bus_pfmem[count]) { 685 if (!bus_pfmem[count]) {
695 err("out of system memory\n");
696 retval = -ENOMEM; 686 retval = -ENOMEM;
697 goto error; 687 goto error;
698 } 688 }
@@ -708,7 +698,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
708 } else { 698 } else {
709 mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); 699 mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL);
710 if (!mem_tmp) { 700 if (!mem_tmp) {
711 err("out of system memory\n");
712 retval = -ENOMEM; 701 retval = -ENOMEM;
713 goto error; 702 goto error;
714 } 703 }
@@ -749,7 +738,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
749 738
750 bus_mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 739 bus_mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
751 if (!bus_mem[count]) { 740 if (!bus_mem[count]) {
752 err("out of system memory\n");
753 retval = -ENOMEM; 741 retval = -ENOMEM;
754 goto error; 742 goto error;
755 } 743 }
@@ -820,7 +808,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
820 io = kzalloc(sizeof(*io), GFP_KERNEL); 808 io = kzalloc(sizeof(*io), GFP_KERNEL);
821 809
822 if (!io) { 810 if (!io) {
823 err("out of system memory\n");
824 retval = -ENOMEM; 811 retval = -ENOMEM;
825 goto error; 812 goto error;
826 } 813 }
@@ -842,7 +829,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
842 debug("it wants %x memory behind the bridge\n", amount_needed->mem); 829 debug("it wants %x memory behind the bridge\n", amount_needed->mem);
843 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 830 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
844 if (!mem) { 831 if (!mem) {
845 err("out of system memory\n");
846 retval = -ENOMEM; 832 retval = -ENOMEM;
847 goto error; 833 goto error;
848 } 834 }
@@ -864,7 +850,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
864 debug("it wants %x pfmemory behind the bridge\n", amount_needed->pfmem); 850 debug("it wants %x pfmemory behind the bridge\n", amount_needed->pfmem);
865 pfmem = kzalloc(sizeof(*pfmem), GFP_KERNEL); 851 pfmem = kzalloc(sizeof(*pfmem), GFP_KERNEL);
866 if (!pfmem) { 852 if (!pfmem) {
867 err("out of system memory\n");
868 retval = -ENOMEM; 853 retval = -ENOMEM;
869 goto error; 854 goto error;
870 } 855 }
@@ -879,7 +864,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
879 } else { 864 } else {
880 mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); 865 mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL);
881 if (!mem_tmp) { 866 if (!mem_tmp) {
882 err("out of system memory\n");
883 retval = -ENOMEM; 867 retval = -ENOMEM;
884 goto error; 868 goto error;
885 } 869 }
@@ -910,7 +894,6 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
910 if (!bus) { 894 if (!bus) {
911 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 895 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
912 if (!bus) { 896 if (!bus) {
913 err("out of system memory\n");
914 retval = -ENOMEM; 897 retval = -ENOMEM;
915 goto error; 898 goto error;
916 } 899 }
@@ -1638,10 +1621,9 @@ static int add_new_bus(struct bus_node *bus, struct resource_node *io, struct re
1638 } 1621 }
1639 if (io) { 1622 if (io) {
1640 io_range = kzalloc(sizeof(*io_range), GFP_KERNEL); 1623 io_range = kzalloc(sizeof(*io_range), GFP_KERNEL);
1641 if (!io_range) { 1624 if (!io_range)
1642 err("out of system memory\n");
1643 return -ENOMEM; 1625 return -ENOMEM;
1644 } 1626
1645 io_range->start = io->start; 1627 io_range->start = io->start;
1646 io_range->end = io->end; 1628 io_range->end = io->end;
1647 io_range->rangeno = 1; 1629 io_range->rangeno = 1;
@@ -1650,10 +1632,9 @@ static int add_new_bus(struct bus_node *bus, struct resource_node *io, struct re
1650 } 1632 }
1651 if (mem) { 1633 if (mem) {
1652 mem_range = kzalloc(sizeof(*mem_range), GFP_KERNEL); 1634 mem_range = kzalloc(sizeof(*mem_range), GFP_KERNEL);
1653 if (!mem_range) { 1635 if (!mem_range)
1654 err("out of system memory\n");
1655 return -ENOMEM; 1636 return -ENOMEM;
1656 } 1637
1657 mem_range->start = mem->start; 1638 mem_range->start = mem->start;
1658 mem_range->end = mem->end; 1639 mem_range->end = mem->end;
1659 mem_range->rangeno = 1; 1640 mem_range->rangeno = 1;
@@ -1662,10 +1643,9 @@ static int add_new_bus(struct bus_node *bus, struct resource_node *io, struct re
1662 } 1643 }
1663 if (pfmem) { 1644 if (pfmem) {
1664 pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL); 1645 pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL);
1665 if (!pfmem_range) { 1646 if (!pfmem_range)
1666 err("out of system memory\n");
1667 return -ENOMEM; 1647 return -ENOMEM;
1668 } 1648
1669 pfmem_range->start = pfmem->start; 1649 pfmem_range->start = pfmem->start;
1670 pfmem_range->end = pfmem->end; 1650 pfmem_range->end = pfmem->end;
1671 pfmem_range->rangeno = 1; 1651 pfmem_range->rangeno = 1;
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index 41851f58b2fd..5e8caf7a4452 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -42,10 +42,8 @@ static struct bus_node * __init alloc_error_bus(struct ebda_pci_rsrc *curr, u8 b
42 } 42 }
43 43
44 newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); 44 newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL);
45 if (!newbus) { 45 if (!newbus)
46 err("out of system memory\n");
47 return NULL; 46 return NULL;
48 }
49 47
50 if (flag) 48 if (flag)
51 newbus->busno = busno; 49 newbus->busno = busno;
@@ -65,10 +63,9 @@ static struct resource_node * __init alloc_resources(struct ebda_pci_rsrc *curr)
65 } 63 }
66 64
67 rs = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 65 rs = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
68 if (!rs) { 66 if (!rs)
69 err("out of system memory\n");
70 return NULL; 67 return NULL;
71 } 68
72 rs->busno = curr->bus_num; 69 rs->busno = curr->bus_num;
73 rs->devfunc = curr->dev_fun; 70 rs->devfunc = curr->dev_fun;
74 rs->start = curr->start_addr; 71 rs->start = curr->start_addr;
@@ -85,10 +82,9 @@ static int __init alloc_bus_range(struct bus_node **new_bus, struct range_node *
85 82
86 if (first_bus) { 83 if (first_bus) {
87 newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); 84 newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL);
88 if (!newbus) { 85 if (!newbus)
89 err("out of system memory.\n");
90 return -ENOMEM; 86 return -ENOMEM;
91 } 87
92 newbus->busno = curr->bus_num; 88 newbus->busno = curr->bus_num;
93 } else { 89 } else {
94 newbus = *new_bus; 90 newbus = *new_bus;
@@ -109,7 +105,6 @@ static int __init alloc_bus_range(struct bus_node **new_bus, struct range_node *
109 if (!newrange) { 105 if (!newrange) {
110 if (first_bus) 106 if (first_bus)
111 kfree(newbus); 107 kfree(newbus);
112 err("out of system memory\n");
113 return -ENOMEM; 108 return -ENOMEM;
114 } 109 }
115 newrange->start = curr->start_addr; 110 newrange->start = curr->start_addr;
@@ -1693,10 +1688,9 @@ static int __init once_over(void)
1693 bus_cur->firstPFMemFromMem = pfmem_cur; 1688 bus_cur->firstPFMemFromMem = pfmem_cur;
1694 1689
1695 mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 1690 mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
1696 if (!mem) { 1691 if (!mem)
1697 err("out of system memory\n");
1698 return -ENOMEM; 1692 return -ENOMEM;
1699 } 1693
1700 mem->type = MEM; 1694 mem->type = MEM;
1701 mem->busno = pfmem_cur->busno; 1695 mem->busno = pfmem_cur->busno;
1702 mem->devfunc = pfmem_cur->devfunc; 1696 mem->devfunc = pfmem_cur->devfunc;
@@ -1975,10 +1969,9 @@ static int __init update_bridge_ranges(struct bus_node **bus)
1975 1969
1976 if ((start_address) && (start_address <= end_address)) { 1970 if ((start_address) && (start_address <= end_address)) {
1977 range = kzalloc(sizeof(struct range_node), GFP_KERNEL); 1971 range = kzalloc(sizeof(struct range_node), GFP_KERNEL);
1978 if (!range) { 1972 if (!range)
1979 err("out of system memory\n");
1980 return -ENOMEM; 1973 return -ENOMEM;
1981 } 1974
1982 range->start = start_address; 1975 range->start = start_address;
1983 range->end = end_address + 0xfff; 1976 range->end = end_address + 0xfff;
1984 1977
@@ -2002,7 +1995,6 @@ static int __init update_bridge_ranges(struct bus_node **bus)
2002 io = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 1995 io = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
2003 if (!io) { 1996 if (!io) {
2004 kfree(range); 1997 kfree(range);
2005 err("out of system memory\n");
2006 return -ENOMEM; 1998 return -ENOMEM;
2007 } 1999 }
2008 io->type = IO; 2000 io->type = IO;
@@ -2024,10 +2016,9 @@ static int __init update_bridge_ranges(struct bus_node **bus)
2024 if ((start_address) && (start_address <= end_address)) { 2016 if ((start_address) && (start_address <= end_address)) {
2025 2017
2026 range = kzalloc(sizeof(struct range_node), GFP_KERNEL); 2018 range = kzalloc(sizeof(struct range_node), GFP_KERNEL);
2027 if (!range) { 2019 if (!range)
2028 err("out of system memory\n");
2029 return -ENOMEM; 2020 return -ENOMEM;
2030 } 2021
2031 range->start = start_address; 2022 range->start = start_address;
2032 range->end = end_address + 0xfffff; 2023 range->end = end_address + 0xfffff;
2033 2024
@@ -2052,7 +2043,6 @@ static int __init update_bridge_ranges(struct bus_node **bus)
2052 mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 2043 mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
2053 if (!mem) { 2044 if (!mem) {
2054 kfree(range); 2045 kfree(range);
2055 err("out of system memory\n");
2056 return -ENOMEM; 2046 return -ENOMEM;
2057 } 2047 }
2058 mem->type = MEM; 2048 mem->type = MEM;
@@ -2078,10 +2068,9 @@ static int __init update_bridge_ranges(struct bus_node **bus)
2078 if ((start_address) && (start_address <= end_address)) { 2068 if ((start_address) && (start_address <= end_address)) {
2079 2069
2080 range = kzalloc(sizeof(struct range_node), GFP_KERNEL); 2070 range = kzalloc(sizeof(struct range_node), GFP_KERNEL);
2081 if (!range) { 2071 if (!range)
2082 err("out of system memory\n");
2083 return -ENOMEM; 2072 return -ENOMEM;
2084 } 2073
2085 range->start = start_address; 2074 range->start = start_address;
2086 range->end = end_address + 0xfffff; 2075 range->end = end_address + 0xfffff;
2087 2076
@@ -2105,7 +2094,6 @@ static int __init update_bridge_ranges(struct bus_node **bus)
2105 pfmem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); 2094 pfmem = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
2106 if (!pfmem) { 2095 if (!pfmem) {
2107 kfree(range); 2096 kfree(range);
2108 err("out of system memory\n");
2109 return -ENOMEM; 2097 return -ENOMEM;
2110 } 2098 }
2111 pfmem->type = PFMEM; 2099 pfmem->type = PFMEM;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index c414d59372e0..18a42f8f5dc5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -824,16 +824,22 @@ struct controller *pcie_init(struct pcie_device *dev)
824 struct pci_dev *pdev = dev->port; 824 struct pci_dev *pdev = dev->port;
825 825
826 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 826 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
827 if (!ctrl) { 827 if (!ctrl)
828 dev_err(&dev->device, "%s: Out of memory\n", __func__);
829 goto abort; 828 goto abort;
830 } 829
831 ctrl->pcie = dev; 830 ctrl->pcie = dev;
832 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); 831 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
833 832
834 if (pdev->hotplug_user_indicators) 833 if (pdev->hotplug_user_indicators)
835 slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP); 834 slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
836 835
836 /*
837 * We assume no Thunderbolt controllers support Command Complete events,
838 * but some controllers falsely claim they do.
839 */
840 if (pdev->is_thunderbolt)
841 slot_cap |= PCI_EXP_SLTCAP_NCCS;
842
837 ctrl->slot_cap = slot_cap; 843 ctrl->slot_cap = slot_cap;
838 mutex_init(&ctrl->ctrl_lock); 844 mutex_init(&ctrl->ctrl_lock);
839 init_waitqueue_head(&ctrl->queue); 845 init_waitqueue_head(&ctrl->queue);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index fd673a5ab925..3f518dea856d 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -65,7 +65,6 @@ int pciehp_configure_device(struct slot *p_slot)
65int pciehp_unconfigure_device(struct slot *p_slot) 65int pciehp_unconfigure_device(struct slot *p_slot)
66{ 66{
67 int rc = 0; 67 int rc = 0;
68 u8 bctl = 0;
69 u8 presence = 0; 68 u8 presence = 0;
70 struct pci_dev *dev, *temp; 69 struct pci_dev *dev, *temp;
71 struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; 70 struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
@@ -87,17 +86,6 @@ int pciehp_unconfigure_device(struct slot *p_slot)
87 list_for_each_entry_safe_reverse(dev, temp, &parent->devices, 86 list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
88 bus_list) { 87 bus_list) {
89 pci_dev_get(dev); 88 pci_dev_get(dev);
90 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
91 pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
92 if (bctl & PCI_BRIDGE_CTL_VGA) {
93 ctrl_err(ctrl,
94 "Cannot remove display device %s\n",
95 pci_name(dev));
96 pci_dev_put(dev);
97 rc = -EINVAL;
98 break;
99 }
100 }
101 if (!presence) { 89 if (!presence) {
102 pci_dev_set_disconnected(dev, NULL); 90 pci_dev_set_disconnected(dev, NULL);
103 if (pci_has_subordinate(dev)) 91 if (pci_has_subordinate(dev))
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 36ef485630da..23da3046f160 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -262,22 +262,18 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
262 fdt1 = kzalloc(0x10000, GFP_KERNEL); 262 fdt1 = kzalloc(0x10000, GFP_KERNEL);
263 if (!fdt1) { 263 if (!fdt1) {
264 ret = -ENOMEM; 264 ret = -ENOMEM;
265 dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n");
266 goto out; 265 goto out;
267 } 266 }
268 267
269 ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000); 268 ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
270 if (ret) { 269 if (ret) {
271 dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n", 270 pci_warn(php_slot->pdev, "Error %d getting FDT blob\n", ret);
272 ret);
273 goto free_fdt1; 271 goto free_fdt1;
274 } 272 }
275 273
276 fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL); 274 fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
277 if (!fdt) { 275 if (!fdt) {
278 ret = -ENOMEM; 276 ret = -ENOMEM;
279 dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n",
280 fdt_totalsize(fdt1));
281 goto free_fdt1; 277 goto free_fdt1;
282 } 278 }
283 279
@@ -286,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
286 dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL); 282 dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
287 if (!dt) { 283 if (!dt) {
288 ret = -EINVAL; 284 ret = -EINVAL;
289 dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n"); 285 pci_warn(php_slot->pdev, "Cannot unflatten FDT\n");
290 goto free_fdt; 286 goto free_fdt;
291 } 287 }
292 288
@@ -296,7 +292,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
296 ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn); 292 ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
297 if (ret) { 293 if (ret) {
298 pnv_php_reverse_nodes(php_slot->dn); 294 pnv_php_reverse_nodes(php_slot->dn);
299 dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n", 295 pci_warn(php_slot->pdev, "Error %d populating changeset\n",
300 ret); 296 ret);
301 goto free_dt; 297 goto free_dt;
302 } 298 }
@@ -304,8 +300,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
304 php_slot->dn->child = NULL; 300 php_slot->dn->child = NULL;
305 ret = of_changeset_apply(&php_slot->ocs); 301 ret = of_changeset_apply(&php_slot->ocs);
306 if (ret) { 302 if (ret) {
307 dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n", 303 pci_warn(php_slot->pdev, "Error %d applying changeset\n", ret);
308 ret);
309 goto destroy_changeset; 304 goto destroy_changeset;
310 } 305 }
311 306
@@ -341,14 +336,14 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
341 if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle || 336 if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
342 be64_to_cpu(msg.params[2]) != state || 337 be64_to_cpu(msg.params[2]) != state ||
343 be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) { 338 be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
344 dev_warn(&php_slot->pdev->dev, "Wrong msg (%lld, %lld, %lld)\n", 339 pci_warn(php_slot->pdev, "Wrong msg (%lld, %lld, %lld)\n",
345 be64_to_cpu(msg.params[1]), 340 be64_to_cpu(msg.params[1]),
346 be64_to_cpu(msg.params[2]), 341 be64_to_cpu(msg.params[2]),
347 be64_to_cpu(msg.params[3])); 342 be64_to_cpu(msg.params[3]));
348 return -ENOMSG; 343 return -ENOMSG;
349 } 344 }
350 } else if (ret < 0) { 345 } else if (ret < 0) {
351 dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n", 346 pci_warn(php_slot->pdev, "Error %d powering %s\n",
352 ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off"); 347 ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
353 return ret; 348 return ret;
354 } 349 }
@@ -375,7 +370,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
375 */ 370 */
376 ret = pnv_pci_get_power_state(php_slot->id, &power_state); 371 ret = pnv_pci_get_power_state(php_slot->id, &power_state);
377 if (ret) { 372 if (ret) {
378 dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n", 373 pci_warn(php_slot->pdev, "Error %d getting power status\n",
379 ret); 374 ret);
380 } else { 375 } else {
381 *state = power_state; 376 *state = power_state;
@@ -401,8 +396,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
401 slot->info->adapter_status = presence; 396 slot->info->adapter_status = presence;
402 ret = 0; 397 ret = 0;
403 } else { 398 } else {
404 dev_warn(&php_slot->pdev->dev, "Error %d getting presence\n", 399 pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
405 ret);
406 } 400 }
407 401
408 return ret; 402 return ret;
@@ -625,8 +619,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
625 ret = pci_hp_register(&php_slot->slot, php_slot->bus, 619 ret = pci_hp_register(&php_slot->slot, php_slot->bus,
626 php_slot->slot_no, php_slot->name); 620 php_slot->slot_no, php_slot->name);
627 if (ret) { 621 if (ret) {
628 dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n", 622 pci_warn(php_slot->pdev, "Error %d registering slot\n", ret);
629 ret);
630 return ret; 623 return ret;
631 } 624 }
632 625
@@ -679,7 +672,7 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
679 /* Enable MSIx */ 672 /* Enable MSIx */
680 ret = pci_enable_msix_exact(pdev, &entry, 1); 673 ret = pci_enable_msix_exact(pdev, &entry, 1);
681 if (ret) { 674 if (ret) {
682 dev_warn(&pdev->dev, "Error %d enabling MSIx\n", ret); 675 pci_warn(pdev, "Error %d enabling MSIx\n", ret);
683 return ret; 676 return ret;
684 } 677 }
685 678
@@ -723,7 +716,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
723 (sts & PCI_EXP_SLTSTA_PDC)) { 716 (sts & PCI_EXP_SLTSTA_PDC)) {
724 ret = pnv_pci_get_presence_state(php_slot->id, &presence); 717 ret = pnv_pci_get_presence_state(php_slot->id, &presence);
725 if (ret) { 718 if (ret) {
726 dev_warn(&pdev->dev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n", 719 pci_warn(pdev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
727 php_slot->name, ret, sts); 720 php_slot->name, ret, sts);
728 return IRQ_HANDLED; 721 return IRQ_HANDLED;
729 } 722 }
@@ -753,12 +746,12 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
753 */ 746 */
754 event = kzalloc(sizeof(*event), GFP_ATOMIC); 747 event = kzalloc(sizeof(*event), GFP_ATOMIC);
755 if (!event) { 748 if (!event) {
756 dev_warn(&pdev->dev, "PCI slot [%s] missed hotplug event 0x%04x\n", 749 pci_warn(pdev, "PCI slot [%s] missed hotplug event 0x%04x\n",
757 php_slot->name, sts); 750 php_slot->name, sts);
758 return IRQ_HANDLED; 751 return IRQ_HANDLED;
759 } 752 }
760 753
761 dev_info(&pdev->dev, "PCI slot [%s] %s (IRQ: %d)\n", 754 pci_info(pdev, "PCI slot [%s] %s (IRQ: %d)\n",
762 php_slot->name, added ? "added" : "removed", irq); 755 php_slot->name, added ? "added" : "removed", irq);
763 INIT_WORK(&event->work, pnv_php_event_handler); 756 INIT_WORK(&event->work, pnv_php_event_handler);
764 event->added = added; 757 event->added = added;
@@ -778,7 +771,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
778 /* Allocate workqueue */ 771 /* Allocate workqueue */
779 php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); 772 php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
780 if (!php_slot->wq) { 773 if (!php_slot->wq) {
781 dev_warn(&pdev->dev, "Cannot alloc workqueue\n"); 774 pci_warn(pdev, "Cannot alloc workqueue\n");
782 pnv_php_disable_irq(php_slot, true); 775 pnv_php_disable_irq(php_slot, true);
783 return; 776 return;
784 } 777 }
@@ -802,7 +795,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
802 php_slot->name, php_slot); 795 php_slot->name, php_slot);
803 if (ret) { 796 if (ret) {
804 pnv_php_disable_irq(php_slot, true); 797 pnv_php_disable_irq(php_slot, true);
805 dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq); 798 pci_warn(pdev, "Error %d enabling IRQ %d\n", ret, irq);
806 return; 799 return;
807 } 800 }
808 801
@@ -838,7 +831,7 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
838 831
839 ret = pci_enable_device(pdev); 832 ret = pci_enable_device(pdev);
840 if (ret) { 833 if (ret) {
841 dev_warn(&pdev->dev, "Error %d enabling device\n", ret); 834 pci_warn(pdev, "Error %d enabling device\n", ret);
842 return; 835 return;
843 } 836 }
844 837
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 42d713578524..78b6bdbb3a39 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -242,18 +242,18 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
242 242
243 243
244 if (rc == PCI_SLOT_ALREADY_UP) { 244 if (rc == PCI_SLOT_ALREADY_UP) {
245 dev_dbg(&slot->pci_bus->self->dev, "is already active\n"); 245 pci_dbg(slot->pci_bus->self, "is already active\n");
246 return 1; /* return 1 to user */ 246 return 1; /* return 1 to user */
247 } 247 }
248 248
249 if (rc == PCI_L1_ERR) { 249 if (rc == PCI_L1_ERR) {
250 dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message: %s", 250 pci_dbg(slot->pci_bus->self, "L1 failure %d with message: %s",
251 resp.resp_sub_errno, resp.resp_l1_msg); 251 resp.resp_sub_errno, resp.resp_l1_msg);
252 return -EPERM; 252 return -EPERM;
253 } 253 }
254 254
255 if (rc) { 255 if (rc) {
256 dev_dbg(&slot->pci_bus->self->dev, "insert failed with error %d sub-error %d\n", 256 pci_dbg(slot->pci_bus->self, "insert failed with error %d sub-error %d\n",
257 rc, resp.resp_sub_errno); 257 rc, resp.resp_sub_errno);
258 return -EIO; 258 return -EIO;
259 } 259 }
@@ -278,23 +278,23 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
278 278
279 if ((action == PCI_REQ_SLOT_ELIGIBLE) && 279 if ((action == PCI_REQ_SLOT_ELIGIBLE) &&
280 (rc == PCI_SLOT_ALREADY_DOWN)) { 280 (rc == PCI_SLOT_ALREADY_DOWN)) {
281 dev_dbg(&slot->pci_bus->self->dev, "Slot %s already inactive\n", slot->physical_path); 281 pci_dbg(slot->pci_bus->self, "Slot %s already inactive\n", slot->physical_path);
282 return 1; /* return 1 to user */ 282 return 1; /* return 1 to user */
283 } 283 }
284 284
285 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) { 285 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
286 dev_dbg(&slot->pci_bus->self->dev, "Cannot remove last 33MHz card\n"); 286 pci_dbg(slot->pci_bus->self, "Cannot remove last 33MHz card\n");
287 return -EPERM; 287 return -EPERM;
288 } 288 }
289 289
290 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) { 290 if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
291 dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message \n%s\n", 291 pci_dbg(slot->pci_bus->self, "L1 failure %d with message \n%s\n",
292 resp.resp_sub_errno, resp.resp_l1_msg); 292 resp.resp_sub_errno, resp.resp_l1_msg);
293 return -EPERM; 293 return -EPERM;
294 } 294 }
295 295
296 if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) { 296 if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
297 dev_dbg(&slot->pci_bus->self->dev, "remove failed with error %d sub-error %d\n", 297 pci_dbg(slot->pci_bus->self, "remove failed with error %d sub-error %d\n",
298 rc, resp.resp_sub_errno); 298 rc, resp.resp_sub_errno);
299 return -EIO; 299 return -EIO;
300 } 300 }
@@ -305,12 +305,12 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
305 if ((action == PCI_REQ_SLOT_DISABLE) && !rc) { 305 if ((action == PCI_REQ_SLOT_DISABLE) && !rc) {
306 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); 306 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
307 pcibus_info->pbi_enabled_devices &= ~(1 << device_num); 307 pcibus_info->pbi_enabled_devices &= ~(1 << device_num);
308 dev_dbg(&slot->pci_bus->self->dev, "remove successful\n"); 308 pci_dbg(slot->pci_bus->self, "remove successful\n");
309 return 0; 309 return 0;
310 } 310 }
311 311
312 if ((action == PCI_REQ_SLOT_DISABLE) && rc) { 312 if ((action == PCI_REQ_SLOT_DISABLE) && rc) {
313 dev_dbg(&slot->pci_bus->self->dev, "remove failed rc = %d\n", rc); 313 pci_dbg(slot->pci_bus->self, "remove failed rc = %d\n", rc);
314 } 314 }
315 315
316 return rc; 316 return rc;
@@ -363,7 +363,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
363 num_funcs = pci_scan_slot(slot->pci_bus, 363 num_funcs = pci_scan_slot(slot->pci_bus,
364 PCI_DEVFN(slot->device_num + 1, 0)); 364 PCI_DEVFN(slot->device_num + 1, 0));
365 if (!num_funcs) { 365 if (!num_funcs) {
366 dev_dbg(&slot->pci_bus->self->dev, "no device in slot\n"); 366 pci_dbg(slot->pci_bus->self, "no device in slot\n");
367 mutex_unlock(&sn_hotplug_mutex); 367 mutex_unlock(&sn_hotplug_mutex);
368 return -ENODEV; 368 return -ENODEV;
369 } 369 }
@@ -409,7 +409,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
409 phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); 409 phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
410 410
411 if (acpi_bus_get_device(phandle, &pdevice)) { 411 if (acpi_bus_get_device(phandle, &pdevice)) {
412 dev_dbg(&slot->pci_bus->self->dev, "no parent device, assuming NULL\n"); 412 pci_dbg(slot->pci_bus->self, "no parent device, assuming NULL\n");
413 pdevice = NULL; 413 pdevice = NULL;
414 } 414 }
415 415
@@ -460,9 +460,9 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
460 mutex_unlock(&sn_hotplug_mutex); 460 mutex_unlock(&sn_hotplug_mutex);
461 461
462 if (rc == 0) 462 if (rc == 0)
463 dev_dbg(&slot->pci_bus->self->dev, "insert operation successful\n"); 463 pci_dbg(slot->pci_bus->self, "insert operation successful\n");
464 else 464 else
465 dev_dbg(&slot->pci_bus->self->dev, "insert operation failed rc = %d\n", rc); 465 pci_dbg(slot->pci_bus->self, "insert operation failed rc = %d\n", rc);
466 466
467 return rc; 467 return rc;
468} 468}
@@ -640,16 +640,16 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
640 if (rc) 640 if (rc)
641 goto register_err; 641 goto register_err;
642 } 642 }
643 dev_dbg(&pci_bus->self->dev, "Registered bus with hotplug\n"); 643 pci_dbg(pci_bus->self, "Registered bus with hotplug\n");
644 return rc; 644 return rc;
645 645
646register_err: 646register_err:
647 dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n", 647 pci_dbg(pci_bus->self, "bus failed to register with err = %d\n",
648 rc); 648 rc);
649 649
650alloc_err: 650alloc_err:
651 if (rc == -ENOMEM) 651 if (rc == -ENOMEM)
652 dev_dbg(&pci_bus->self->dev, "Memory allocation error\n"); 652 pci_dbg(pci_bus->self, "Memory allocation error\n");
653 653
654 /* destroy THIS element */ 654 /* destroy THIS element */
655 if (bss_hotplug_slot) 655 if (bss_hotplug_slot)
@@ -682,10 +682,10 @@ static int __init sn_pci_hotplug_init(void)
682 682
683 rc = sn_pci_bus_valid(pci_bus); 683 rc = sn_pci_bus_valid(pci_bus);
684 if (rc != 1) { 684 if (rc != 1) {
685 dev_dbg(&pci_bus->self->dev, "not a valid hotplug bus\n"); 685 pci_dbg(pci_bus->self, "not a valid hotplug bus\n");
686 continue; 686 continue;
687 } 687 }
688 dev_dbg(&pci_bus->self->dev, "valid hotplug bus\n"); 688 pci_dbg(pci_bus->self, "valid hotplug bus\n");
689 689
690 rc = sn_hotplug_slot_register(pci_bus); 690 rc = sn_hotplug_slot_register(pci_bus);
691 if (!rc) { 691 if (!rc) {
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 98f867bd570f..c55730b61c9a 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -48,15 +48,15 @@ do { \
48#define ctrl_dbg(ctrl, format, arg...) \ 48#define ctrl_dbg(ctrl, format, arg...) \
49 do { \ 49 do { \
50 if (shpchp_debug) \ 50 if (shpchp_debug) \
51 dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \ 51 pci_printk(KERN_DEBUG, ctrl->pci_dev, \
52 format, ## arg); \ 52 format, ## arg); \
53 } while (0) 53 } while (0)
54#define ctrl_err(ctrl, format, arg...) \ 54#define ctrl_err(ctrl, format, arg...) \
55 dev_err(&ctrl->pci_dev->dev, format, ## arg) 55 pci_err(ctrl->pci_dev, format, ## arg)
56#define ctrl_info(ctrl, format, arg...) \ 56#define ctrl_info(ctrl, format, arg...) \
57 dev_info(&ctrl->pci_dev->dev, format, ## arg) 57 pci_info(ctrl->pci_dev, format, ## arg)
58#define ctrl_warn(ctrl, format, arg...) \ 58#define ctrl_warn(ctrl, format, arg...) \
59 dev_warn(&ctrl->pci_dev->dev, format, ## arg) 59 pci_warn(ctrl->pci_dev, format, ## arg)
60 60
61 61
62#define SLOT_NAME_SIZE 10 62#define SLOT_NAME_SIZE 10
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index f55ea1b68dff..1f0f96908b5a 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -291,10 +291,9 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
291 return -ENODEV; 291 return -ENODEV;
292 292
293 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 293 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
294 if (!ctrl) { 294 if (!ctrl)
295 dev_err(&pdev->dev, "%s: Out of memory\n", __func__);
296 goto err_out_none; 295 goto err_out_none;
297 } 296
298 INIT_LIST_HEAD(&ctrl->slot_list); 297 INIT_LIST_HEAD(&ctrl->slot_list);
299 298
300 rc = shpc_init(ctrl, pdev); 299 rc = shpc_init(ctrl, pdev);
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index c3d3919236be..115701301487 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -64,7 +64,6 @@ int shpchp_configure_device(struct slot *p_slot)
64int shpchp_unconfigure_device(struct slot *p_slot) 64int shpchp_unconfigure_device(struct slot *p_slot)
65{ 65{
66 int rc = 0; 66 int rc = 0;
67 u8 bctl = 0;
68 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 67 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
69 struct pci_dev *dev, *temp; 68 struct pci_dev *dev, *temp;
70 struct controller *ctrl = p_slot->ctrl; 69 struct controller *ctrl = p_slot->ctrl;
@@ -79,17 +78,6 @@ int shpchp_unconfigure_device(struct slot *p_slot)
79 continue; 78 continue;
80 79
81 pci_dev_get(dev); 80 pci_dev_get(dev);
82 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
83 pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
84 if (bctl & PCI_BRIDGE_CTL_VGA) {
85 ctrl_err(ctrl,
86 "Cannot remove display device %s\n",
87 pci_name(dev));
88 pci_dev_put(dev);
89 rc = -EINVAL;
90 break;
91 }
92 }
93 pci_stop_and_remove_bus_device(dev); 81 pci_stop_and_remove_bus_device(dev);
94 pci_dev_put(dev); 82 pci_dev_put(dev);
95 } 83 }
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c533325e8d86..ec69506bfde2 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -260,19 +260,19 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
260 nres++; 260 nres++;
261 } 261 }
262 if (nres != iov->nres) { 262 if (nres != iov->nres) {
263 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n"); 263 pci_err(dev, "not enough MMIO resources for SR-IOV\n");
264 return -ENOMEM; 264 return -ENOMEM;
265 } 265 }
266 266
267 bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1); 267 bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
268 if (bus > dev->bus->busn_res.end) { 268 if (bus > dev->bus->busn_res.end) {
269 dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n", 269 pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
270 nr_virtfn, bus, &dev->bus->busn_res); 270 nr_virtfn, bus, &dev->bus->busn_res);
271 return -ENOMEM; 271 return -ENOMEM;
272 } 272 }
273 273
274 if (pci_enable_resources(dev, bars)) { 274 if (pci_enable_resources(dev, bars)) {
275 dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n"); 275 pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
276 return -ENOMEM; 276 return -ENOMEM;
277 } 277 }
278 278
@@ -299,7 +299,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
299 299
300 rc = pcibios_sriov_enable(dev, initial); 300 rc = pcibios_sriov_enable(dev, initial);
301 if (rc) { 301 if (rc) {
302 dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", rc); 302 pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
303 goto err_pcibios; 303 goto err_pcibios;
304 } 304 }
305 305
@@ -433,7 +433,7 @@ found:
433 } 433 }
434 iov->barsz[i] = resource_size(res); 434 iov->barsz[i] = resource_size(res);
435 res->end = res->start + resource_size(res) * total - 1; 435 res->end = res->start + resource_size(res) * total - 1;
436 dev_info(&dev->dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n", 436 pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
437 i, res, i, total); 437 i, res, i, total);
438 i += bar64; 438 i += bar64;
439 nres++; 439 nres++;
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 10929cd43d5d..2a808e10645f 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -16,11 +16,10 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
16{ 16{
17 struct pci_dev *parent = to_pci_dev(pdev->dev.parent); 17 struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
18 18
19 dev_err(&pdev->dev, 19 pci_err(pdev, "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
20 "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
21 dev_name(&parent->dev), parent->vendor, parent->device); 20 dev_name(&parent->dev), parent->vendor, parent->device);
22 dev_err(&pdev->dev, "%s\n", reason); 21 pci_err(pdev, "%s\n", reason);
23 dev_err(&pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); 22 pci_err(pdev, "Please report to linux-kernel@vger.kernel.org\n");
24 WARN_ON(1); 23 WARN_ON(1);
25} 24}
26 25
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7f7547d200e4..8b0729c94bb7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -579,7 +579,7 @@ static int msi_verify_entries(struct pci_dev *dev)
579 for_each_pci_msi_entry(entry, dev) { 579 for_each_pci_msi_entry(entry, dev) {
580 if (!dev->no_64bit_msi || !entry->msg.address_hi) 580 if (!dev->no_64bit_msi || !entry->msg.address_hi)
581 continue; 581 continue;
582 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" 582 pci_err(dev, "Device has broken 64-bit MSI but arch"
583 " tried to assign one above 4G\n"); 583 " tried to assign one above 4G\n");
584 return -EIO; 584 return -EIO;
585 } 585 }
@@ -963,7 +963,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
963 963
964 /* Check whether driver already requested for MSI irq */ 964 /* Check whether driver already requested for MSI irq */
965 if (dev->msi_enabled) { 965 if (dev->msi_enabled) {
966 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); 966 pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
967 return -EINVAL; 967 return -EINVAL;
968 } 968 }
969 return msix_capability_init(dev, entries, nvec, affd); 969 return msix_capability_init(dev, entries, nvec, affd);
@@ -1033,8 +1033,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1033 1033
1034 /* Check whether driver already requested MSI-X irqs */ 1034 /* Check whether driver already requested MSI-X irqs */
1035 if (dev->msix_enabled) { 1035 if (dev->msix_enabled) {
1036 dev_info(&dev->dev, 1036 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
1037 "can't enable MSI (MSI-X already enabled)\n");
1038 return -EINVAL; 1037 return -EINVAL;
1039 } 1038 }
1040 1039
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index fd721b8140e7..a28355c273ae 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -4,12 +4,14 @@
4 * 4 *
5 * Copyright 2011 IBM Corp. 5 * Copyright 2011 IBM Corp.
6 */ 6 */
7#define pr_fmt(fmt) "PCI: OF: " fmt
7 8
8#include <linux/irqdomain.h> 9#include <linux/irqdomain.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/pci.h> 11#include <linux/pci.h>
11#include <linux/of.h> 12#include <linux/of.h>
12#include <linux/of_irq.h> 13#include <linux/of_irq.h>
14#include <linux/of_address.h>
13#include <linux/of_pci.h> 15#include <linux/of_pci.h>
14#include "pci.h" 16#include "pci.h"
15 17
@@ -47,8 +49,9 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
47 if (WARN_ON(bus->self || bus->parent)) 49 if (WARN_ON(bus->self || bus->parent))
48 return NULL; 50 return NULL;
49 51
50 /* Look for a node pointer in either the intermediary device we 52 /*
51 * create above the root bus or it's own parent. Normally only 53 * Look for a node pointer in either the intermediary device we
54 * create above the root bus or its own parent. Normally only
52 * the later is populated. 55 * the later is populated.
53 */ 56 */
54 if (bus->bridge->of_node) 57 if (bus->bridge->of_node)
@@ -84,3 +87,561 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
84 return NULL; 87 return NULL;
85#endif 88#endif
86} 89}
90
91static inline int __of_pci_pci_compare(struct device_node *node,
92 unsigned int data)
93{
94 int devfn;
95
96 devfn = of_pci_get_devfn(node);
97 if (devfn < 0)
98 return 0;
99
100 return devfn == data;
101}
102
103struct device_node *of_pci_find_child_device(struct device_node *parent,
104 unsigned int devfn)
105{
106 struct device_node *node, *node2;
107
108 for_each_child_of_node(parent, node) {
109 if (__of_pci_pci_compare(node, devfn))
110 return node;
111 /*
112 * Some OFs create a parent node "multifunc-device" as
113 * a fake root for all functions of a multi-function
114 * device we go down them as well.
115 */
116 if (!strcmp(node->name, "multifunc-device")) {
117 for_each_child_of_node(node, node2) {
118 if (__of_pci_pci_compare(node2, devfn)) {
119 of_node_put(node);
120 return node2;
121 }
122 }
123 }
124 }
125 return NULL;
126}
127EXPORT_SYMBOL_GPL(of_pci_find_child_device);
128
129/**
130 * of_pci_get_devfn() - Get device and function numbers for a device node
131 * @np: device node
132 *
133 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
134 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
135 * and function numbers respectively. On error a negative error code is
136 * returned.
137 */
138int of_pci_get_devfn(struct device_node *np)
139{
140 u32 reg[5];
141 int error;
142
143 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
144 if (error)
145 return error;
146
147 return (reg[0] >> 8) & 0xff;
148}
149EXPORT_SYMBOL_GPL(of_pci_get_devfn);
150
151/**
152 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
153 * @node: device node
154 * @res: address to a struct resource to return the bus-range
155 *
156 * Returns 0 on success or a negative error-code on failure.
157 */
158int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
159{
160 u32 bus_range[2];
161 int error;
162
163 error = of_property_read_u32_array(node, "bus-range", bus_range,
164 ARRAY_SIZE(bus_range));
165 if (error)
166 return error;
167
168 res->name = node->name;
169 res->start = bus_range[0];
170 res->end = bus_range[1];
171 res->flags = IORESOURCE_BUS;
172
173 return 0;
174}
175EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
176
177/**
178 * This function will try to obtain the host bridge domain number by
179 * finding a property called "linux,pci-domain" of the given device node.
180 *
181 * @node: device tree node with the domain information
182 *
183 * Returns the associated domain number from DT in the range [0-0xffff], or
184 * a negative value if the required property is not found.
185 */
186int of_get_pci_domain_nr(struct device_node *node)
187{
188 u32 domain;
189 int error;
190
191 error = of_property_read_u32(node, "linux,pci-domain", &domain);
192 if (error)
193 return error;
194
195 return (u16)domain;
196}
197EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
198
199/**
200 * This function will try to find the limitation of link speed by finding
201 * a property called "max-link-speed" of the given device node.
202 *
203 * @node: device tree node with the max link speed information
204 *
205 * Returns the associated max link speed from DT, or a negative value if the
206 * required property is not found or is invalid.
207 */
208int of_pci_get_max_link_speed(struct device_node *node)
209{
210 u32 max_link_speed;
211
212 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
213 max_link_speed > 4)
214 return -EINVAL;
215
216 return max_link_speed;
217}
218EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
219
220/**
221 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
222 * is present and valid
223 */
224void of_pci_check_probe_only(void)
225{
226 u32 val;
227 int ret;
228
229 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
230 if (ret) {
231 if (ret == -ENODATA || ret == -EOVERFLOW)
232 pr_warn("linux,pci-probe-only without valid value, ignoring\n");
233 return;
234 }
235
236 if (val)
237 pci_add_flags(PCI_PROBE_ONLY);
238 else
239 pci_clear_flags(PCI_PROBE_ONLY);
240
241 pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
242}
243EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
244
245#if defined(CONFIG_OF_ADDRESS)
246/**
247 * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
248 * @dev: device node of the host bridge having the range property
249 * @busno: bus number associated with the bridge root bus
250 * @bus_max: maximum number of buses for this bridge
251 * @resources: list where the range of resources will be added after DT parsing
252 * @io_base: pointer to a variable that will contain on return the physical
253 * address for the start of the I/O range. Can be NULL if the caller doesn't
254 * expect I/O ranges to be present in the device tree.
255 *
256 * It is the caller's job to free the @resources list.
257 *
258 * This function will parse the "ranges" property of a PCI host bridge device
259 * node and setup the resource mapping based on its content. It is expected
260 * that the property conforms with the Power ePAPR document.
261 *
262 * It returns zero if the range parsing has been successful or a standard error
263 * value if it failed.
264 */
265int of_pci_get_host_bridge_resources(struct device_node *dev,
266 unsigned char busno, unsigned char bus_max,
267 struct list_head *resources, resource_size_t *io_base)
268{
269 struct resource_entry *window;
270 struct resource *res;
271 struct resource *bus_range;
272 struct of_pci_range range;
273 struct of_pci_range_parser parser;
274 char range_type[4];
275 int err;
276
277 if (io_base)
278 *io_base = (resource_size_t)OF_BAD_ADDR;
279
280 bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
281 if (!bus_range)
282 return -ENOMEM;
283
284 pr_info("host bridge %pOF ranges:\n", dev);
285
286 err = of_pci_parse_bus_range(dev, bus_range);
287 if (err) {
288 bus_range->start = busno;
289 bus_range->end = bus_max;
290 bus_range->flags = IORESOURCE_BUS;
291 pr_info(" No bus range found for %pOF, using %pR\n",
292 dev, bus_range);
293 } else {
294 if (bus_range->end > bus_range->start + bus_max)
295 bus_range->end = bus_range->start + bus_max;
296 }
297 pci_add_resource(resources, bus_range);
298
299 /* Check for ranges property */
300 err = of_pci_range_parser_init(&parser, dev);
301 if (err)
302 goto parse_failed;
303
304 pr_debug("Parsing ranges property...\n");
305 for_each_of_pci_range(&parser, &range) {
306 /* Read next ranges element */
307 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
308 snprintf(range_type, 4, " IO");
309 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
310 snprintf(range_type, 4, "MEM");
311 else
312 snprintf(range_type, 4, "err");
313 pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
314 range.cpu_addr, range.cpu_addr + range.size - 1,
315 range.pci_addr);
316
317 /*
318 * If we failed translation or got a zero-sized region
319 * then skip this range
320 */
321 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
322 continue;
323
324 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
325 if (!res) {
326 err = -ENOMEM;
327 goto parse_failed;
328 }
329
330 err = of_pci_range_to_resource(&range, dev, res);
331 if (err) {
332 kfree(res);
333 continue;
334 }
335
336 if (resource_type(res) == IORESOURCE_IO) {
337 if (!io_base) {
338 pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
339 dev);
340 err = -EINVAL;
341 goto conversion_failed;
342 }
343 if (*io_base != (resource_size_t)OF_BAD_ADDR)
344 pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
345 dev);
346 *io_base = range.cpu_addr;
347 }
348
349 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
350 }
351
352 return 0;
353
354conversion_failed:
355 kfree(res);
356parse_failed:
357 resource_list_for_each_entry(window, resources)
358 kfree(window->res);
359 pci_free_resource_list(resources);
360 return err;
361}
362EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
363#endif /* CONFIG_OF_ADDRESS */
364
365/**
366 * of_pci_map_rid - Translate a requester ID through a downstream mapping.
367 * @np: root complex device node.
368 * @rid: PCI requester ID to map.
369 * @map_name: property name of the map to use.
370 * @map_mask_name: optional property name of the mask to use.
371 * @target: optional pointer to a target device node.
372 * @id_out: optional pointer to receive the translated ID.
373 *
374 * Given a PCI requester ID, look up the appropriate implementation-defined
375 * platform ID and/or the target device which receives transactions on that
376 * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
377 * @id_out may be NULL if only the other is required. If @target points to
378 * a non-NULL device node pointer, only entries targeting that node will be
379 * matched; if it points to a NULL value, it will receive the device node of
380 * the first matching target phandle, with a reference held.
381 *
382 * Return: 0 on success or a standard error code on failure.
383 */
384int of_pci_map_rid(struct device_node *np, u32 rid,
385 const char *map_name, const char *map_mask_name,
386 struct device_node **target, u32 *id_out)
387{
388 u32 map_mask, masked_rid;
389 int map_len;
390 const __be32 *map = NULL;
391
392 if (!np || !map_name || (!target && !id_out))
393 return -EINVAL;
394
395 map = of_get_property(np, map_name, &map_len);
396 if (!map) {
397 if (target)
398 return -ENODEV;
399 /* Otherwise, no map implies no translation */
400 *id_out = rid;
401 return 0;
402 }
403
404 if (!map_len || map_len % (4 * sizeof(*map))) {
405 pr_err("%pOF: Error: Bad %s length: %d\n", np,
406 map_name, map_len);
407 return -EINVAL;
408 }
409
410 /* The default is to select all bits. */
411 map_mask = 0xffffffff;
412
413 /*
414 * Can be overridden by "{iommu,msi}-map-mask" property.
415 * If of_property_read_u32() fails, the default is used.
416 */
417 if (map_mask_name)
418 of_property_read_u32(np, map_mask_name, &map_mask);
419
420 masked_rid = map_mask & rid;
421 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
422 struct device_node *phandle_node;
423 u32 rid_base = be32_to_cpup(map + 0);
424 u32 phandle = be32_to_cpup(map + 1);
425 u32 out_base = be32_to_cpup(map + 2);
426 u32 rid_len = be32_to_cpup(map + 3);
427
428 if (rid_base & ~map_mask) {
429 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
430 np, map_name, map_name,
431 map_mask, rid_base);
432 return -EFAULT;
433 }
434
435 if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
436 continue;
437
438 phandle_node = of_find_node_by_phandle(phandle);
439 if (!phandle_node)
440 return -ENODEV;
441
442 if (target) {
443 if (*target)
444 of_node_put(phandle_node);
445 else
446 *target = phandle_node;
447
448 if (*target != phandle_node)
449 continue;
450 }
451
452 if (id_out)
453 *id_out = masked_rid - rid_base + out_base;
454
455 pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
456 np, map_name, map_mask, rid_base, out_base,
457 rid_len, rid, masked_rid - rid_base + out_base);
458 return 0;
459 }
460
461 pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
462 np, map_name, rid, target && *target ? *target : NULL);
463 return -EFAULT;
464}
465
466#if IS_ENABLED(CONFIG_OF_IRQ)
467/**
468 * of_irq_parse_pci - Resolve the interrupt for a PCI device
469 * @pdev: the device whose interrupt is to be resolved
470 * @out_irq: structure of_irq filled by this function
471 *
472 * This function resolves the PCI interrupt for a given PCI device. If a
473 * device-node exists for a given pci_dev, it will use normal OF tree
474 * walking. If not, it will implement standard swizzling and walk up the
475 * PCI tree until an device-node is found, at which point it will finish
476 * resolving using the OF tree walking.
477 */
478static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
479{
480 struct device_node *dn, *ppnode;
481 struct pci_dev *ppdev;
482 __be32 laddr[3];
483 u8 pin;
484 int rc;
485
486 /*
487 * Check if we have a device node, if yes, fallback to standard
488 * device tree parsing
489 */
490 dn = pci_device_to_OF_node(pdev);
491 if (dn) {
492 rc = of_irq_parse_one(dn, 0, out_irq);
493 if (!rc)
494 return rc;
495 }
496
497 /*
498 * Ok, we don't, time to have fun. Let's start by building up an
499 * interrupt spec. we assume #interrupt-cells is 1, which is standard
500 * for PCI. If you do different, then don't use that routine.
501 */
502 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
503 if (rc != 0)
504 goto err;
505 /* No pin, exit with no error message. */
506 if (pin == 0)
507 return -ENODEV;
508
509 /* Now we walk up the PCI tree */
510 for (;;) {
511 /* Get the pci_dev of our parent */
512 ppdev = pdev->bus->self;
513
514 /* Ouch, it's a host bridge... */
515 if (ppdev == NULL) {
516 ppnode = pci_bus_to_OF_node(pdev->bus);
517
518 /* No node for host bridge ? give up */
519 if (ppnode == NULL) {
520 rc = -EINVAL;
521 goto err;
522 }
523 } else {
524 /* We found a P2P bridge, check if it has a node */
525 ppnode = pci_device_to_OF_node(ppdev);
526 }
527
528 /*
529 * Ok, we have found a parent with a device-node, hand over to
530 * the OF parsing code.
531 * We build a unit address from the linux device to be used for
532 * resolution. Note that we use the linux bus number which may
533 * not match your firmware bus numbering.
534 * Fortunately, in most cases, interrupt-map-mask doesn't
535 * include the bus number as part of the matching.
536 * You should still be careful about that though if you intend
537 * to rely on this function (you ship a firmware that doesn't
538 * create device nodes for all PCI devices).
539 */
540 if (ppnode)
541 break;
542
543 /*
544 * We can only get here if we hit a P2P bridge with no node;
545 * let's do standard swizzling and try again
546 */
547 pin = pci_swizzle_interrupt_pin(pdev, pin);
548 pdev = ppdev;
549 }
550
551 out_irq->np = ppnode;
552 out_irq->args_count = 1;
553 out_irq->args[0] = pin;
554 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
555 laddr[1] = laddr[2] = cpu_to_be32(0);
556 rc = of_irq_parse_raw(laddr, out_irq);
557 if (rc)
558 goto err;
559 return 0;
560err:
561 if (rc == -ENOENT) {
562 dev_warn(&pdev->dev,
563 "%s: no interrupt-map found, INTx interrupts not available\n",
564 __func__);
565 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
566 __func__);
567 } else {
568 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
569 }
570 return rc;
571}
572
573/**
574 * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
575 * @dev: The PCI device needing an IRQ
576 * @slot: PCI slot number; passed when used as map_irq callback. Unused
577 * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
578 *
579 * @slot and @pin are unused, but included in the function so that this
580 * function can be used directly as the map_irq callback to
581 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
582 */
583int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
584{
585 struct of_phandle_args oirq;
586 int ret;
587
588 ret = of_irq_parse_pci(dev, &oirq);
589 if (ret)
590 return 0; /* Proper return code 0 == NO_IRQ */
591
592 return irq_create_of_mapping(&oirq);
593}
594EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
595#endif /* CONFIG_OF_IRQ */
596
597int pci_parse_request_of_pci_ranges(struct device *dev,
598 struct list_head *resources,
599 struct resource **bus_range)
600{
601 int err, res_valid = 0;
602 struct device_node *np = dev->of_node;
603 resource_size_t iobase;
604 struct resource_entry *win, *tmp;
605
606 INIT_LIST_HEAD(resources);
607 err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
608 if (err)
609 return err;
610
611 err = devm_request_pci_bus_resources(dev, resources);
612 if (err)
613 goto out_release_res;
614
615 resource_list_for_each_entry_safe(win, tmp, resources) {
616 struct resource *res = win->res;
617
618 switch (resource_type(res)) {
619 case IORESOURCE_IO:
620 err = pci_remap_iospace(res, iobase);
621 if (err) {
622 dev_warn(dev, "error %d: failed to map resource %pR\n",
623 err, res);
624 resource_list_destroy_entry(win);
625 }
626 break;
627 case IORESOURCE_MEM:
628 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
629 break;
630 case IORESOURCE_BUS:
631 if (bus_range)
632 *bus_range = res;
633 break;
634 }
635 }
636
637 if (res_valid)
638 return 0;
639
640 dev_err(dev, "non-prefetchable memory resource required\n");
641 err = -EINVAL;
642
643 out_release_res:
644 pci_free_resource_list(resources);
645 return err;
646}
647
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3b82bb86a203..78157688dcc9 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -543,7 +543,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
543 } 543 }
544 544
545 if (!error) 545 if (!error)
546 dev_dbg(&dev->dev, "power state changed by ACPI to %s\n", 546 pci_dbg(dev, "power state changed by ACPI to %s\n",
547 acpi_power_state_string(state_conv[state])); 547 acpi_power_state_string(state_conv[state]));
548 548
549 return error; 549 return error;
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index a952007b7608..10d54f939048 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -27,7 +27,7 @@ MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
27 27
28static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) 28static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
29{ 29{
30 dev_info(&dev->dev, "claimed by stub\n"); 30 pci_info(dev, "claimed by stub\n");
31 return 0; 31 return 0;
32} 32}
33 33
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 06c7f0b85cd2..eb6bee8724cc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -278,6 +278,16 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
278} 278}
279static DEVICE_ATTR_RO(subordinate_bus_number); 279static DEVICE_ATTR_RO(subordinate_bus_number);
280 280
281static ssize_t ari_enabled_show(struct device *dev,
282 struct device_attribute *attr,
283 char *buf)
284{
285 struct pci_dev *pci_dev = to_pci_dev(dev);
286
287 return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
288}
289static DEVICE_ATTR_RO(ari_enabled);
290
281static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 291static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
282 char *buf) 292 char *buf)
283{ 293{
@@ -348,7 +358,7 @@ static ssize_t numa_node_store(struct device *dev,
348 return -EINVAL; 358 return -EINVAL;
349 359
350 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 360 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
351 dev_alert(&pdev->dev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 361 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
352 node); 362 node);
353 363
354 dev->numa_node = node; 364 dev->numa_node = node;
@@ -411,7 +421,7 @@ static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
411 */ 421 */
412 if (!subordinate) { 422 if (!subordinate) {
413 pdev->no_msi = !val; 423 pdev->no_msi = !val;
414 dev_info(&pdev->dev, "MSI/MSI-X %s for future drivers\n", 424 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
415 val ? "allowed" : "disallowed"); 425 val ? "allowed" : "disallowed");
416 return count; 426 return count;
417 } 427 }
@@ -613,7 +623,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
613 623
614 /* is PF driver loaded w/callback */ 624 /* is PF driver loaded w/callback */
615 if (!pdev->driver || !pdev->driver->sriov_configure) { 625 if (!pdev->driver || !pdev->driver->sriov_configure) {
616 dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n"); 626 pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n");
617 ret = -ENOENT; 627 ret = -ENOENT;
618 goto exit; 628 goto exit;
619 } 629 }
@@ -626,7 +636,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
626 636
627 /* enable VFs */ 637 /* enable VFs */
628 if (pdev->sriov->num_VFs) { 638 if (pdev->sriov->num_VFs) {
629 dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n", 639 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
630 pdev->sriov->num_VFs, num_vfs); 640 pdev->sriov->num_VFs, num_vfs);
631 ret = -EBUSY; 641 ret = -EBUSY;
632 goto exit; 642 goto exit;
@@ -637,7 +647,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
637 goto exit; 647 goto exit;
638 648
639 if (ret != num_vfs) 649 if (ret != num_vfs)
640 dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n", 650 pci_warn(pdev, "%d VFs requested; only %d enabled\n",
641 num_vfs, ret); 651 num_vfs, ret);
642 652
643exit: 653exit:
@@ -786,6 +796,7 @@ static struct attribute *pci_dev_attrs[] = {
786 &dev_attr_devspec.attr, 796 &dev_attr_devspec.attr,
787#endif 797#endif
788 &dev_attr_driver_override.attr, 798 &dev_attr_driver_override.attr,
799 &dev_attr_ari_enabled.attr,
789 NULL, 800 NULL,
790}; 801};
791 802
@@ -1216,14 +1227,9 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
1216 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1227 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1217 return -EINVAL; 1228 return -EINVAL;
1218 1229
1219 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) { 1230 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1220 WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
1221 current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
1222 pci_name(pdev), bar,
1223 (u64)pci_resource_start(pdev, bar),
1224 (u64)pci_resource_len(pdev, bar));
1225 return -EINVAL; 1231 return -EINVAL;
1226 } 1232
1227 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1233 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1228 1234
1229 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1235 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 50e716b3e2b8..006814babdce 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -157,7 +157,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
157 * Make sure the BAR is actually a memory resource, not an IO resource 157 * Make sure the BAR is actually a memory resource, not an IO resource
158 */ 158 */
159 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { 159 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
160 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res); 160 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
161 return NULL; 161 return NULL;
162 } 162 }
163 return ioremap_nocache(res->start, resource_size(res)); 163 return ioremap_nocache(res->start, resource_size(res));
@@ -649,7 +649,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
649 */ 649 */
650 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 650 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
651 && dev->current_state > state) { 651 && dev->current_state > state) {
652 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n", 652 pci_err(dev, "invalid power transition (from state %d to %d)\n",
653 dev->current_state, state); 653 dev->current_state, state);
654 return -EINVAL; 654 return -EINVAL;
655 } 655 }
@@ -697,7 +697,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
697 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 697 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
698 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 698 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
699 if (dev->current_state != state && printk_ratelimit()) 699 if (dev->current_state != state && printk_ratelimit())
700 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n", 700 pci_info(dev, "Refused to change power state, currently in D%d\n",
701 dev->current_state); 701 dev->current_state);
702 702
703 /* 703 /*
@@ -971,7 +971,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
971 case PM_EVENT_HIBERNATE: 971 case PM_EVENT_HIBERNATE:
972 return PCI_D3hot; 972 return PCI_D3hot;
973 default: 973 default:
974 dev_info(&dev->dev, "unrecognized suspend event %d\n", 974 pci_info(dev, "unrecognized suspend event %d\n",
975 state.event); 975 state.event);
976 BUG(); 976 BUG();
977 } 977 }
@@ -1014,7 +1014,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
1014 1014
1015 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 1015 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1016 if (!save_state) { 1016 if (!save_state) {
1017 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 1017 pci_err(dev, "buffer not found in %s\n", __func__);
1018 return -ENOMEM; 1018 return -ENOMEM;
1019 } 1019 }
1020 1020
@@ -1062,7 +1062,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
1062 1062
1063 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 1063 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1064 if (!save_state) { 1064 if (!save_state) {
1065 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 1065 pci_err(dev, "buffer not found in %s\n", __func__);
1066 return -ENOMEM; 1066 return -ENOMEM;
1067 } 1067 }
1068 1068
@@ -1122,7 +1122,7 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1122 return; 1122 return;
1123 1123
1124 for (;;) { 1124 for (;;) {
1125 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n", 1125 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1126 offset, val, saved_val); 1126 offset, val, saved_val);
1127 pci_write_config_dword(pdev, offset, saved_val); 1127 pci_write_config_dword(pdev, offset, saved_val);
1128 if (retry-- <= 0) 1128 if (retry-- <= 0)
@@ -1359,7 +1359,7 @@ static void pci_enable_bridge(struct pci_dev *dev)
1359 1359
1360 retval = pci_enable_device(dev); 1360 retval = pci_enable_device(dev);
1361 if (retval) 1361 if (retval)
1362 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1362 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1363 retval); 1363 retval);
1364 pci_set_master(dev); 1364 pci_set_master(dev);
1365} 1365}
@@ -1864,7 +1864,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1864 pme_dev = kmalloc(sizeof(struct pci_pme_device), 1864 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1865 GFP_KERNEL); 1865 GFP_KERNEL);
1866 if (!pme_dev) { 1866 if (!pme_dev) {
1867 dev_warn(&dev->dev, "can't enable PME#\n"); 1867 pci_warn(dev, "can't enable PME#\n");
1868 return; 1868 return;
1869 } 1869 }
1870 pme_dev->dev = dev; 1870 pme_dev->dev = dev;
@@ -1888,7 +1888,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1888 } 1888 }
1889 } 1889 }
1890 1890
1891 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); 1891 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
1892} 1892}
1893EXPORT_SYMBOL(pci_pme_active); 1893EXPORT_SYMBOL(pci_pme_active);
1894 1894
@@ -2425,7 +2425,7 @@ void pci_pm_init(struct pci_dev *dev)
2425 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 2425 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2426 2426
2427 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 2427 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2428 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 2428 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2429 pmc & PCI_PM_CAP_VER_MASK); 2429 pmc & PCI_PM_CAP_VER_MASK);
2430 return; 2430 return;
2431 } 2431 }
@@ -2445,15 +2445,14 @@ void pci_pm_init(struct pci_dev *dev)
2445 dev->d2_support = true; 2445 dev->d2_support = true;
2446 2446
2447 if (dev->d1_support || dev->d2_support) 2447 if (dev->d1_support || dev->d2_support)
2448 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 2448 pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
2449 dev->d1_support ? " D1" : "", 2449 dev->d1_support ? " D1" : "",
2450 dev->d2_support ? " D2" : ""); 2450 dev->d2_support ? " D2" : "");
2451 } 2451 }
2452 2452
2453 pmc &= PCI_PM_CAP_PME_MASK; 2453 pmc &= PCI_PM_CAP_PME_MASK;
2454 if (pmc) { 2454 if (pmc) {
2455 dev_printk(KERN_DEBUG, &dev->dev, 2455 pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
2456 "PME# supported from%s%s%s%s%s\n",
2457 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 2456 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2458 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 2457 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2459 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 2458 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -2545,13 +2544,13 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
2545 2544
2546 res = pci_ea_get_resource(dev, bei, prop); 2545 res = pci_ea_get_resource(dev, bei, prop);
2547 if (!res) { 2546 if (!res) {
2548 dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei); 2547 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2549 goto out; 2548 goto out;
2550 } 2549 }
2551 2550
2552 flags = pci_ea_flags(dev, prop); 2551 flags = pci_ea_flags(dev, prop);
2553 if (!flags) { 2552 if (!flags) {
2554 dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop); 2553 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2555 goto out; 2554 goto out;
2556 } 2555 }
2557 2556
@@ -2601,13 +2600,12 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
2601 } 2600 }
2602 2601
2603 if (end < start) { 2602 if (end < start) {
2604 dev_err(&dev->dev, "EA Entry crosses address boundary\n"); 2603 pci_err(dev, "EA Entry crosses address boundary\n");
2605 goto out; 2604 goto out;
2606 } 2605 }
2607 2606
2608 if (ent_size != ent_offset - offset) { 2607 if (ent_size != ent_offset - offset) {
2609 dev_err(&dev->dev, 2608 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2610 "EA Entry Size (%d) does not match length read (%d)\n",
2611 ent_size, ent_offset - offset); 2609 ent_size, ent_offset - offset);
2612 goto out; 2610 goto out;
2613 } 2611 }
@@ -2618,16 +2616,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
2618 res->flags = flags; 2616 res->flags = flags;
2619 2617
2620 if (bei <= PCI_EA_BEI_BAR5) 2618 if (bei <= PCI_EA_BEI_BAR5)
2621 dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", 2619 pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2622 bei, res, prop); 2620 bei, res, prop);
2623 else if (bei == PCI_EA_BEI_ROM) 2621 else if (bei == PCI_EA_BEI_ROM)
2624 dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", 2622 pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2625 res, prop); 2623 res, prop);
2626 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) 2624 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2627 dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", 2625 pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2628 bei - PCI_EA_BEI_VF_BAR0, res, prop); 2626 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2629 else 2627 else
2630 dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", 2628 pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2631 bei, res, prop); 2629 bei, res, prop);
2632 2630
2633out: 2631out:
@@ -2724,13 +2722,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2724 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 2722 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2725 PCI_EXP_SAVE_REGS * sizeof(u16)); 2723 PCI_EXP_SAVE_REGS * sizeof(u16));
2726 if (error) 2724 if (error)
2727 dev_err(&dev->dev, 2725 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
2728 "unable to preallocate PCI Express save buffer\n");
2729 2726
2730 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 2727 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2731 if (error) 2728 if (error)
2732 dev_err(&dev->dev, 2729 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
2733 "unable to preallocate PCI-X save buffer\n");
2734 2730
2735 pci_allocate_vc_save_buffers(dev); 2731 pci_allocate_vc_save_buffers(dev);
2736} 2732}
@@ -3067,6 +3063,81 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3067} 3063}
3068 3064
3069/** 3065/**
3066 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3067 * @dev: the PCI device
3068 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3069 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3070 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3071 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3072 *
3073 * Return 0 if all upstream bridges support AtomicOp routing, egress
3074 * blocking is disabled on all upstream ports, and the root port supports
3075 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3076 * AtomicOp completion), or negative otherwise.
3077 */
3078int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3079{
3080 struct pci_bus *bus = dev->bus;
3081 struct pci_dev *bridge;
3082 u32 cap, ctl2;
3083
3084 if (!pci_is_pcie(dev))
3085 return -EINVAL;
3086
3087 /*
3088 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3089 * AtomicOp requesters. For now, we only support endpoints as
3090 * requesters and root ports as completers. No endpoints as
3091 * completers, and no peer-to-peer.
3092 */
3093
3094 switch (pci_pcie_type(dev)) {
3095 case PCI_EXP_TYPE_ENDPOINT:
3096 case PCI_EXP_TYPE_LEG_END:
3097 case PCI_EXP_TYPE_RC_END:
3098 break;
3099 default:
3100 return -EINVAL;
3101 }
3102
3103 while (bus->parent) {
3104 bridge = bus->self;
3105
3106 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3107
3108 switch (pci_pcie_type(bridge)) {
3109 /* Ensure switch ports support AtomicOp routing */
3110 case PCI_EXP_TYPE_UPSTREAM:
3111 case PCI_EXP_TYPE_DOWNSTREAM:
3112 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3113 return -EINVAL;
3114 break;
3115
3116 /* Ensure root port supports all the sizes we care about */
3117 case PCI_EXP_TYPE_ROOT_PORT:
3118 if ((cap & cap_mask) != cap_mask)
3119 return -EINVAL;
3120 break;
3121 }
3122
3123 /* Ensure upstream ports don't block AtomicOps on egress */
3124 if (!bridge->has_secondary_link) {
3125 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3126 &ctl2);
3127 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3128 return -EINVAL;
3129 }
3130
3131 bus = bus->parent;
3132 }
3133
3134 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3135 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3136 return 0;
3137}
3138EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3139
3140/**
3070 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 3141 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3071 * @dev: the PCI device 3142 * @dev: the PCI device
3072 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) 3143 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
@@ -3199,7 +3270,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
3199 return 0; 3270 return 0;
3200 3271
3201err_out: 3272err_out:
3202 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 3273 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3203 &pdev->resource[bar]); 3274 &pdev->resource[bar]);
3204 return -EBUSY; 3275 return -EBUSY;
3205} 3276}
@@ -3622,7 +3693,7 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
3622 else 3693 else
3623 cmd = old_cmd & ~PCI_COMMAND_MASTER; 3694 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3624 if (cmd != old_cmd) { 3695 if (cmd != old_cmd) {
3625 dev_dbg(&dev->dev, "%s bus mastering\n", 3696 pci_dbg(dev, "%s bus mastering\n",
3626 enable ? "enabling" : "disabling"); 3697 enable ? "enabling" : "disabling");
3627 pci_write_config_word(dev, PCI_COMMAND, cmd); 3698 pci_write_config_word(dev, PCI_COMMAND, cmd);
3628 } 3699 }
@@ -3723,7 +3794,7 @@ int pci_set_cacheline_size(struct pci_dev *dev)
3723 if (cacheline_size == pci_cache_line_size) 3794 if (cacheline_size == pci_cache_line_size)
3724 return 0; 3795 return 0;
3725 3796
3726 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n", 3797 pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
3727 pci_cache_line_size << 2); 3798 pci_cache_line_size << 2);
3728 3799
3729 return -EINVAL; 3800 return -EINVAL;
@@ -3752,7 +3823,7 @@ int pci_set_mwi(struct pci_dev *dev)
3752 3823
3753 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3824 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3754 if (!(cmd & PCI_COMMAND_INVALIDATE)) { 3825 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3755 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 3826 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
3756 cmd |= PCI_COMMAND_INVALIDATE; 3827 cmd |= PCI_COMMAND_INVALIDATE;
3757 pci_write_config_word(dev, PCI_COMMAND, cmd); 3828 pci_write_config_word(dev, PCI_COMMAND, cmd);
3758 } 3829 }
@@ -3948,13 +4019,13 @@ static void pci_flr_wait(struct pci_dev *dev)
3948 pci_read_config_dword(dev, PCI_COMMAND, &id); 4019 pci_read_config_dword(dev, PCI_COMMAND, &id);
3949 while (id == ~0) { 4020 while (id == ~0) {
3950 if (delay > timeout) { 4021 if (delay > timeout) {
3951 dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n", 4022 pci_warn(dev, "not ready %dms after FLR; giving up\n",
3952 100 + delay - 1); 4023 100 + delay - 1);
3953 return; 4024 return;
3954 } 4025 }
3955 4026
3956 if (delay > 1000) 4027 if (delay > 1000)
3957 dev_info(&dev->dev, "not ready %dms after FLR; waiting\n", 4028 pci_info(dev, "not ready %dms after FLR; waiting\n",
3958 100 + delay - 1); 4029 100 + delay - 1);
3959 4030
3960 msleep(delay); 4031 msleep(delay);
@@ -3963,7 +4034,7 @@ static void pci_flr_wait(struct pci_dev *dev)
3963 } 4034 }
3964 4035
3965 if (delay > 1000) 4036 if (delay > 1000)
3966 dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1); 4037 pci_info(dev, "ready %dms after FLR\n", 100 + delay - 1);
3967} 4038}
3968 4039
3969/** 4040/**
@@ -3995,7 +4066,7 @@ static bool pcie_has_flr(struct pci_dev *dev)
3995void pcie_flr(struct pci_dev *dev) 4066void pcie_flr(struct pci_dev *dev)
3996{ 4067{
3997 if (!pci_wait_for_pending_transaction(dev)) 4068 if (!pci_wait_for_pending_transaction(dev))
3998 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); 4069 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3999 4070
4000 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 4071 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4001 pci_flr_wait(dev); 4072 pci_flr_wait(dev);
@@ -4028,7 +4099,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
4028 */ 4099 */
4029 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, 4100 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4030 PCI_AF_STATUS_TP << 8)) 4101 PCI_AF_STATUS_TP << 8))
4031 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); 4102 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4032 4103
4033 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 4104 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4034 pci_flr_wait(dev); 4105 pci_flr_wait(dev);
@@ -5151,12 +5222,12 @@ void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5151 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX), 5222 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5152 sizeof(long), GFP_KERNEL); 5223 sizeof(long), GFP_KERNEL);
5153 if (!dev->dma_alias_mask) { 5224 if (!dev->dma_alias_mask) {
5154 dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n"); 5225 pci_warn(dev, "Unable to allocate DMA alias mask\n");
5155 return; 5226 return;
5156 } 5227 }
5157 5228
5158 set_bit(devfn, dev->dma_alias_mask); 5229 set_bit(devfn, dev->dma_alias_mask);
5159 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", 5230 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5160 PCI_SLOT(devfn), PCI_FUNC(devfn)); 5231 PCI_SLOT(devfn), PCI_FUNC(devfn));
5161} 5232}
5162 5233
@@ -5305,7 +5376,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5305 return; 5376 return;
5306 5377
5307 if (r->flags & IORESOURCE_PCI_FIXED) { 5378 if (r->flags & IORESOURCE_PCI_FIXED) {
5308 dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n", 5379 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5309 bar, r, (unsigned long long)align); 5380 bar, r, (unsigned long long)align);
5310 return; 5381 return;
5311 } 5382 }
@@ -5342,7 +5413,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5342 * devices and we use the second. 5413 * devices and we use the second.
5343 */ 5414 */
5344 5415
5345 dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n", 5416 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
5346 bar, r, (unsigned long long)align); 5417 bar, r, (unsigned long long)align);
5347 5418
5348 if (resize) { 5419 if (resize) {
@@ -5388,13 +5459,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5388 5459
5389 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && 5460 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5390 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { 5461 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5391 dev_warn(&dev->dev, 5462 pci_warn(dev, "Can't reassign resources to host bridge\n");
5392 "Can't reassign resources to host bridge.\n");
5393 return; 5463 return;
5394 } 5464 }
5395 5465
5396 dev_info(&dev->dev, 5466 pci_info(dev, "Disabling memory decoding and releasing memory resources\n");
5397 "Disabling memory decoding and releasing memory resources.\n");
5398 pci_read_config_word(dev, PCI_COMMAND, &command); 5467 pci_read_config_word(dev, PCI_COMMAND, &command);
5399 command &= ~PCI_COMMAND_MEMORY; 5468 command &= ~PCI_COMMAND_MEMORY;
5400 pci_write_config_word(dev, PCI_COMMAND, command); 5469 pci_write_config_word(dev, PCI_COMMAND, command);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f6b58b32a67c..fcd81911b127 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -38,21 +38,21 @@ int pci_probe_reset_function(struct pci_dev *dev);
38 * struct pci_platform_pm_ops - Firmware PM callbacks 38 * struct pci_platform_pm_ops - Firmware PM callbacks
39 * 39 *
40 * @is_manageable: returns 'true' if given device is power manageable by the 40 * @is_manageable: returns 'true' if given device is power manageable by the
41 * platform firmware 41 * platform firmware
42 * 42 *
43 * @set_state: invokes the platform firmware to set the device's power state 43 * @set_state: invokes the platform firmware to set the device's power state
44 * 44 *
45 * @get_state: queries the platform firmware for a device's current power state 45 * @get_state: queries the platform firmware for a device's current power state
46 * 46 *
47 * @choose_state: returns PCI power state of given device preferred by the 47 * @choose_state: returns PCI power state of given device preferred by the
48 * platform; to be used during system-wide transitions from a 48 * platform; to be used during system-wide transitions from a
49 * sleeping state to the working state and vice versa 49 * sleeping state to the working state and vice versa
50 * 50 *
51 * @set_wakeup: enables/disables wakeup capability for the device 51 * @set_wakeup: enables/disables wakeup capability for the device
52 * 52 *
53 * @need_resume: returns 'true' if the given device (which is currently 53 * @need_resume: returns 'true' if the given device (which is currently
54 * suspended) needs to be resumed to be configured for system 54 * suspended) needs to be resumed to be configured for system
55 * wakeup. 55 * wakeup.
56 * 56 *
57 * If given platform is generally capable of power managing PCI devices, all of 57 * If given platform is generally capable of power managing PCI devices, all of
58 * these callbacks are mandatory. 58 * these callbacks are mandatory.
@@ -112,7 +112,7 @@ struct pci_vpd_ops {
112 112
113struct pci_vpd { 113struct pci_vpd {
114 const struct pci_vpd_ops *ops; 114 const struct pci_vpd_ops *ops;
115 struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ 115 struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
116 struct mutex lock; 116 struct mutex lock;
117 unsigned int len; 117 unsigned int len;
118 u16 flag; 118 u16 flag;
@@ -199,7 +199,7 @@ extern const struct attribute_group *pci_bus_groups[];
199 199
200/** 200/**
201 * pci_match_one_device - Tell if a PCI device structure has a matching 201 * pci_match_one_device - Tell if a PCI device structure has a matching
202 * PCI device id structure 202 * PCI device id structure
203 * @id: single PCI device id structure to match 203 * @id: single PCI device id structure to match
204 * @dev: the PCI device structure to match against 204 * @dev: the PCI device structure to match against
205 * 205 *
@@ -231,7 +231,7 @@ struct pci_slot_attribute {
231 231
232enum pci_bar_type { 232enum pci_bar_type {
233 pci_bar_unknown, /* Standard PCI BAR probe */ 233 pci_bar_unknown, /* Standard PCI BAR probe */
234 pci_bar_io, /* An io port BAR */ 234 pci_bar_io, /* An I/O port BAR */
235 pci_bar_mem32, /* A 32-bit memory BAR */ 235 pci_bar_mem32, /* A 32-bit memory BAR */
236 pci_bar_mem64, /* A 64-bit memory BAR */ 236 pci_bar_mem64, /* A 64-bit memory BAR */
237}; 237};
@@ -255,24 +255,24 @@ void pci_disable_bridge_window(struct pci_dev *dev);
255 255
256/* Single Root I/O Virtualization */ 256/* Single Root I/O Virtualization */
257struct pci_sriov { 257struct pci_sriov {
258 int pos; /* capability position */ 258 int pos; /* Capability position */
259 int nres; /* number of resources */ 259 int nres; /* Number of resources */
260 u32 cap; /* SR-IOV Capabilities */ 260 u32 cap; /* SR-IOV Capabilities */
261 u16 ctrl; /* SR-IOV Control */ 261 u16 ctrl; /* SR-IOV Control */
262 u16 total_VFs; /* total VFs associated with the PF */ 262 u16 total_VFs; /* Total VFs associated with the PF */
263 u16 initial_VFs; /* initial VFs associated with the PF */ 263 u16 initial_VFs; /* Initial VFs associated with the PF */
264 u16 num_VFs; /* number of VFs available */ 264 u16 num_VFs; /* Number of VFs available */
265 u16 offset; /* first VF Routing ID offset */ 265 u16 offset; /* First VF Routing ID offset */
266 u16 stride; /* following VF stride */ 266 u16 stride; /* Following VF stride */
267 u16 vf_device; /* VF device ID */ 267 u16 vf_device; /* VF device ID */
268 u32 pgsz; /* page size for BAR alignment */ 268 u32 pgsz; /* Page size for BAR alignment */
269 u8 link; /* Function Dependency Link */ 269 u8 link; /* Function Dependency Link */
270 u8 max_VF_buses; /* max buses consumed by VFs */ 270 u8 max_VF_buses; /* Max buses consumed by VFs */
271 u16 driver_max_VFs; /* max num VFs driver supports */ 271 u16 driver_max_VFs; /* Max num VFs driver supports */
272 struct pci_dev *dev; /* lowest numbered PF */ 272 struct pci_dev *dev; /* Lowest numbered PF */
273 struct pci_dev *self; /* this PF */ 273 struct pci_dev *self; /* This PF */
274 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ 274 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
275 bool drivers_autoprobe; /* auto probing of VFs by driver */ 275 bool drivers_autoprobe; /* Auto probing of VFs by driver */
276}; 276};
277 277
278/* pci_dev priv_flags */ 278/* pci_dev priv_flags */
@@ -335,13 +335,33 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
335 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) 335 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
336 return pci_sriov_resource_alignment(dev, resno); 336 return pci_sriov_resource_alignment(dev, resno);
337#endif 337#endif
338 if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) 338 if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
339 return pci_cardbus_resource_alignment(res); 339 return pci_cardbus_resource_alignment(res);
340 return resource_alignment(res); 340 return resource_alignment(res);
341} 341}
342 342
343void pci_enable_acs(struct pci_dev *dev); 343void pci_enable_acs(struct pci_dev *dev);
344 344
345#ifdef CONFIG_PCIEASPM
346void pcie_aspm_init_link_state(struct pci_dev *pdev);
347void pcie_aspm_exit_link_state(struct pci_dev *pdev);
348void pcie_aspm_pm_state_change(struct pci_dev *pdev);
349void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
350#else
351static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
352static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
353static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
354static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
355#endif
356
357#ifdef CONFIG_PCIEASPM_DEBUG
358void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
359void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
360#else
361static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { }
362static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { }
363#endif
364
345#ifdef CONFIG_PCIE_PTM 365#ifdef CONFIG_PCIE_PTM
346void pci_ptm_init(struct pci_dev *dev); 366void pci_ptm_init(struct pci_dev *dev);
347#else 367#else
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 6d75a2eb6ecb..b12e28b3d8f9 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -93,7 +93,7 @@ config PCIE_PME
93 93
94config PCIE_DPC 94config PCIE_DPC
95 bool "PCIe Downstream Port Containment support" 95 bool "PCIe Downstream Port Containment support"
96 depends on PCIEPORTBUS 96 depends on PCIEPORTBUS && PCIEAER
97 default n 97 default n
98 help 98 help
99 This enables PCI Express Downstream Port Containment (DPC) 99 This enables PCI Express Downstream Port Containment (DPC)
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 7e2235484dab..25e1feb962c5 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -339,14 +339,14 @@ static int aer_inject(struct aer_error_inj *einj)
339 return -ENODEV; 339 return -ENODEV;
340 rpdev = pcie_find_root_port(dev); 340 rpdev = pcie_find_root_port(dev);
341 if (!rpdev) { 341 if (!rpdev) {
342 dev_err(&dev->dev, "aer_inject: Root port not found\n"); 342 pci_err(dev, "aer_inject: Root port not found\n");
343 ret = -ENODEV; 343 ret = -ENODEV;
344 goto out_put; 344 goto out_put;
345 } 345 }
346 346
347 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 347 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
348 if (!pos_cap_err) { 348 if (!pos_cap_err) {
349 dev_err(&dev->dev, "aer_inject: Device doesn't support AER\n"); 349 pci_err(dev, "aer_inject: Device doesn't support AER\n");
350 ret = -EPROTONOSUPPORT; 350 ret = -EPROTONOSUPPORT;
351 goto out_put; 351 goto out_put;
352 } 352 }
@@ -357,8 +357,7 @@ static int aer_inject(struct aer_error_inj *einj)
357 357
358 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); 358 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
359 if (!rp_pos_cap_err) { 359 if (!rp_pos_cap_err) {
360 dev_err(&rpdev->dev, 360 pci_err(rpdev, "aer_inject: Root port doesn't support AER\n");
361 "aer_inject: Root port doesn't support AER\n");
362 ret = -EPROTONOSUPPORT; 361 ret = -EPROTONOSUPPORT;
363 goto out_put; 362 goto out_put;
364 } 363 }
@@ -406,16 +405,14 @@ static int aer_inject(struct aer_error_inj *einj)
406 if (!aer_mask_override && einj->cor_status && 405 if (!aer_mask_override && einj->cor_status &&
407 !(einj->cor_status & ~cor_mask)) { 406 !(einj->cor_status & ~cor_mask)) {
408 ret = -EINVAL; 407 ret = -EINVAL;
409 dev_warn(&dev->dev, 408 pci_warn(dev, "aer_inject: The correctable error(s) is masked by device\n");
410 "aer_inject: The correctable error(s) is masked by device\n");
411 spin_unlock_irqrestore(&inject_lock, flags); 409 spin_unlock_irqrestore(&inject_lock, flags);
412 goto out_put; 410 goto out_put;
413 } 411 }
414 if (!aer_mask_override && einj->uncor_status && 412 if (!aer_mask_override && einj->uncor_status &&
415 !(einj->uncor_status & ~uncor_mask)) { 413 !(einj->uncor_status & ~uncor_mask)) {
416 ret = -EINVAL; 414 ret = -EINVAL;
417 dev_warn(&dev->dev, 415 pci_warn(dev, "aer_inject: The uncorrectable error(s) is masked by device\n");
418 "aer_inject: The uncorrectable error(s) is masked by device\n");
419 spin_unlock_irqrestore(&inject_lock, flags); 416 spin_unlock_irqrestore(&inject_lock, flags);
420 goto out_put; 417 goto out_put;
421 } 418 }
@@ -478,7 +475,7 @@ static int aer_inject(struct aer_error_inj *einj)
478 einj->cor_status, einj->uncor_status, pci_name(dev)); 475 einj->cor_status, einj->uncor_status, pci_name(dev));
479 aer_irq(-1, edev); 476 aer_irq(-1, edev);
480 } else { 477 } else {
481 dev_err(&rpdev->dev, "aer_inject: AER device not found\n"); 478 pci_err(rpdev, "aer_inject: AER device not found\n");
482 ret = -ENODEV; 479 ret = -ENODEV;
483 } 480 }
484out_put: 481out_put:
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 3f8c58f897f0..da8331f5684d 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -323,7 +323,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
323 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); 323 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
324 324
325 pci_reset_bridge_secondary_bus(dev); 325 pci_reset_bridge_secondary_bus(dev);
326 dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); 326 pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
327 327
328 /* Clear Root Error Status */ 328 /* Clear Root Error Status */
329 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32); 329 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index df45383c35f7..109d43fab40e 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -223,9 +223,8 @@ static bool find_source_device(struct pci_dev *parent,
223 pci_walk_bus(parent->subordinate, find_device_iter, e_info); 223 pci_walk_bus(parent->subordinate, find_device_iter, e_info);
224 224
225 if (!e_info->error_dev_num) { 225 if (!e_info->error_dev_num) {
226 dev_printk(KERN_DEBUG, &parent->dev, 226 pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n",
227 "can't find device of ID%04x\n", 227 e_info->id);
228 e_info->id);
229 return false; 228 return false;
230 } 229 }
231 return true; 230 return true;
@@ -253,7 +252,7 @@ static int report_error_detected(struct pci_dev *dev, void *data)
253 * of a driver for this device is unaware of 252 * of a driver for this device is unaware of
254 * its hw state. 253 * its hw state.
255 */ 254 */
256 dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n", 255 pci_printk(KERN_DEBUG, dev, "device has %s\n",
257 dev->driver ? 256 dev->driver ?
258 "no AER-aware driver" : "no driver"); 257 "no AER-aware driver" : "no driver");
259 } 258 }
@@ -361,7 +360,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
361{ 360{
362 struct aer_broadcast_data result_data; 361 struct aer_broadcast_data result_data;
363 362
364 dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg); 363 pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
365 result_data.state = state; 364 result_data.state = state;
366 if (cb == report_error_detected) 365 if (cb == report_error_detected)
367 result_data.result = PCI_ERS_RESULT_CAN_RECOVER; 366 result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
@@ -410,7 +409,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
410static pci_ers_result_t default_reset_link(struct pci_dev *dev) 409static pci_ers_result_t default_reset_link(struct pci_dev *dev)
411{ 410{
412 pci_reset_bridge_secondary_bus(dev); 411 pci_reset_bridge_secondary_bus(dev);
413 dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n"); 412 pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
414 return PCI_ERS_RESULT_RECOVERED; 413 return PCI_ERS_RESULT_RECOVERED;
415} 414}
416 415
@@ -462,15 +461,13 @@ static pci_ers_result_t reset_link(struct pci_dev *dev)
462 } else if (udev->has_secondary_link) { 461 } else if (udev->has_secondary_link) {
463 status = default_reset_link(udev); 462 status = default_reset_link(udev);
464 } else { 463 } else {
465 dev_printk(KERN_DEBUG, &dev->dev, 464 pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
466 "no link-reset support at upstream device %s\n",
467 pci_name(udev)); 465 pci_name(udev));
468 return PCI_ERS_RESULT_DISCONNECT; 466 return PCI_ERS_RESULT_DISCONNECT;
469 } 467 }
470 468
471 if (status != PCI_ERS_RESULT_RECOVERED) { 469 if (status != PCI_ERS_RESULT_RECOVERED) {
472 dev_printk(KERN_DEBUG, &dev->dev, 470 pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
473 "link reset at upstream device %s failed\n",
474 pci_name(udev)); 471 pci_name(udev));
475 return PCI_ERS_RESULT_DISCONNECT; 472 return PCI_ERS_RESULT_DISCONNECT;
476 } 473 }
@@ -534,12 +531,12 @@ static void do_recovery(struct pci_dev *dev, int severity)
534 "resume", 531 "resume",
535 report_resume); 532 report_resume);
536 533
537 dev_info(&dev->dev, "AER: Device recovery successful\n"); 534 pci_info(dev, "AER: Device recovery successful\n");
538 return; 535 return;
539 536
540failed: 537failed:
541 /* TODO: Should kernel panic here? */ 538 /* TODO: Should kernel panic here? */
542 dev_info(&dev->dev, "AER: Device recovery failed\n"); 539 pci_info(dev, "AER: Device recovery failed\n");
543} 540}
544 541
545/** 542/**
@@ -630,7 +627,8 @@ static void aer_recover_work_func(struct work_struct *work)
630 continue; 627 continue;
631 } 628 }
632 cper_print_aer(pdev, entry.severity, entry.regs); 629 cper_print_aer(pdev, entry.severity, entry.regs);
633 do_recovery(pdev, entry.severity); 630 if (entry.severity != AER_CORRECTABLE)
631 do_recovery(pdev, entry.severity);
634 pci_dev_put(pdev); 632 pci_dev_put(pdev);
635 } 633 }
636} 634}
@@ -657,7 +655,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
657 655
658 /* The device might not support AER */ 656 /* The device might not support AER */
659 if (!pos) 657 if (!pos)
660 return 1; 658 return 0;
661 659
662 if (info->severity == AER_CORRECTABLE) { 660 if (info->severity == AER_CORRECTABLE) {
663 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, 661 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 1f0456f802af..6a352e638699 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -129,7 +129,7 @@ static const char *aer_agent_string[] = {
129static void __print_tlp_header(struct pci_dev *dev, 129static void __print_tlp_header(struct pci_dev *dev,
130 struct aer_header_log_regs *t) 130 struct aer_header_log_regs *t)
131{ 131{
132 dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n", 132 pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
133 t->dw0, t->dw1, t->dw2, t->dw3); 133 t->dw0, t->dw1, t->dw2, t->dw3);
134} 134}
135 135
@@ -152,10 +152,10 @@ static void __aer_print_error(struct pci_dev *dev,
152 aer_uncorrectable_error_string[i] : NULL; 152 aer_uncorrectable_error_string[i] : NULL;
153 153
154 if (errmsg) 154 if (errmsg)
155 dev_err(&dev->dev, " [%2d] %-22s%s\n", i, errmsg, 155 pci_err(dev, " [%2d] %-22s%s\n", i, errmsg,
156 info->first_error == i ? " (First)" : ""); 156 info->first_error == i ? " (First)" : "");
157 else 157 else
158 dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n", 158 pci_err(dev, " [%2d] Unknown Error Bit%s\n",
159 i, info->first_error == i ? " (First)" : ""); 159 i, info->first_error == i ? " (First)" : "");
160 } 160 }
161} 161}
@@ -166,7 +166,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
166 int id = ((dev->bus->number << 8) | dev->devfn); 166 int id = ((dev->bus->number << 8) | dev->devfn);
167 167
168 if (!info->status) { 168 if (!info->status) {
169 dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n", 169 pci_err(dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
170 aer_error_severity_string[info->severity], id); 170 aer_error_severity_string[info->severity], id);
171 goto out; 171 goto out;
172 } 172 }
@@ -174,11 +174,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
174 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 174 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
175 agent = AER_GET_AGENT(info->severity, info->status); 175 agent = AER_GET_AGENT(info->severity, info->status);
176 176
177 dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 177 pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
178 aer_error_severity_string[info->severity], 178 aer_error_severity_string[info->severity],
179 aer_error_layer[layer], id, aer_agent_string[agent]); 179 aer_error_layer[layer], id, aer_agent_string[agent]);
180 180
181 dev_err(&dev->dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", 181 pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
182 dev->vendor, dev->device, 182 dev->vendor, dev->device,
183 info->status, info->mask); 183 info->status, info->mask);
184 184
@@ -189,7 +189,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
189 189
190out: 190out:
191 if (info->id && info->error_dev_num > 1 && info->id == id) 191 if (info->id && info->error_dev_num > 1 && info->id == id)
192 dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id); 192 pci_err(dev, " Error of this Agent(%04x) is reported first\n", id);
193 193
194 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), 194 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
195 info->severity); 195 info->severity);
@@ -197,7 +197,7 @@ out:
197 197
198void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) 198void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
199{ 199{
200 dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n", 200 pci_info(dev, "AER: %s%s error received: id=%04x\n",
201 info->multi_error_valid ? "Multiple " : "", 201 info->multi_error_valid ? "Multiple " : "",
202 aer_error_severity_string[info->severity], info->id); 202 aer_error_severity_string[info->severity], info->id);
203} 203}
@@ -239,13 +239,13 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
239 layer = AER_GET_LAYER_ERROR(aer_severity, status); 239 layer = AER_GET_LAYER_ERROR(aer_severity, status);
240 agent = AER_GET_AGENT(aer_severity, status); 240 agent = AER_GET_AGENT(aer_severity, status);
241 241
242 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); 242 pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
243 cper_print_bits("", status, status_strs, status_strs_size); 243 cper_print_bits("", status, status_strs, status_strs_size);
244 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", 244 pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
245 aer_error_layer[layer], aer_agent_string[agent]); 245 aer_error_layer[layer], aer_agent_string[agent]);
246 246
247 if (aer_severity != AER_CORRECTABLE) 247 if (aer_severity != AER_CORRECTABLE)
248 dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", 248 pci_err(dev, "aer_uncor_severity: 0x%08x\n",
249 aer->uncor_severity); 249 aer->uncor_severity);
250 250
251 if (tlp_header_valid) 251 if (tlp_header_valid)
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 9783e10da3a9..57feef2ecfe7 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -43,18 +43,6 @@
43#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \ 43#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
44 ASPM_STATE_L1SS) 44 ASPM_STATE_L1SS)
45 45
46/*
47 * When L1 substates are enabled, the LTR L1.2 threshold is a timing parameter
48 * that decides whether L1.1 or L1.2 is entered (Refer PCIe spec for details).
49 * Not sure is there is a way to "calculate" this on the fly, but maybe we
50 * could turn it into a parameter in future. This value has been taken from
51 * the following files from Intel's coreboot (which is the only code I found
52 * to have used this):
53 * https://www.coreboot.org/pipermail/coreboot-gerrit/2015-March/021134.html
54 * https://review.coreboot.org/#/c/8832/
55 */
56#define LTR_L1_2_THRESHOLD_BITS ((1 << 21) | (1 << 23) | (1 << 30))
57
58struct aspm_latency { 46struct aspm_latency {
59 u32 l0s; /* L0s latency (nsec) */ 47 u32 l0s; /* L0s latency (nsec) */
60 u32 l1; /* L1 latency (nsec) */ 48 u32 l1; /* L1 latency (nsec) */
@@ -278,7 +266,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
278 return; 266 return;
279 267
280 /* Training failed. Restore common clock configurations */ 268 /* Training failed. Restore common clock configurations */
281 dev_err(&parent->dev, "ASPM: Could not configure common clock\n"); 269 pci_err(parent, "ASPM: Could not configure common clock\n");
282 list_for_each_entry(child, &linkbus->devices, bus_list) 270 list_for_each_entry(child, &linkbus->devices, bus_list)
283 pcie_capability_write_word(child, PCI_EXP_LNKCTL, 271 pcie_capability_write_word(child, PCI_EXP_LNKCTL,
284 child_reg[PCI_FUNC(child->devfn)]); 272 child_reg[PCI_FUNC(child->devfn)]);
@@ -328,11 +316,36 @@ static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
328 case 2: 316 case 2:
329 return val * 100; 317 return val * 100;
330 } 318 }
331 dev_err(&pdev->dev, "%s: Invalid T_PwrOn scale: %u\n", 319 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
332 __func__, scale);
333 return 0; 320 return 0;
334} 321}
335 322
323static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
324{
325 u64 threshold_ns = threshold_us * 1000;
326
327 /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
328 if (threshold_ns < 32) {
329 *scale = 0;
330 *value = threshold_ns;
331 } else if (threshold_ns < 1024) {
332 *scale = 1;
333 *value = threshold_ns >> 5;
334 } else if (threshold_ns < 32768) {
335 *scale = 2;
336 *value = threshold_ns >> 10;
337 } else if (threshold_ns < 1048576) {
338 *scale = 3;
339 *value = threshold_ns >> 15;
340 } else if (threshold_ns < 33554432) {
341 *scale = 4;
342 *value = threshold_ns >> 20;
343 } else {
344 *scale = 5;
345 *value = threshold_ns >> 25;
346 }
347}
348
336struct aspm_register_info { 349struct aspm_register_info {
337 u32 support:2; 350 u32 support:2;
338 u32 enabled:2; 351 u32 enabled:2;
@@ -443,6 +456,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
443 struct aspm_register_info *dwreg) 456 struct aspm_register_info *dwreg)
444{ 457{
445 u32 val1, val2, scale1, scale2; 458 u32 val1, val2, scale1, scale2;
459 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
446 460
447 link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr; 461 link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
448 link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr; 462 link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
@@ -454,16 +468,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
454 /* Choose the greater of the two Port Common_Mode_Restore_Times */ 468 /* Choose the greater of the two Port Common_Mode_Restore_Times */
455 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; 469 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
456 val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; 470 val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
457 if (val1 > val2) 471 t_common_mode = max(val1, val2);
458 link->l1ss.ctl1 |= val1 << 8;
459 else
460 link->l1ss.ctl1 |= val2 << 8;
461
462 /*
463 * We currently use LTR L1.2 threshold to be fixed constant picked from
464 * Intel's coreboot.
465 */
466 link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS;
467 472
468 /* Choose the greater of the two Port T_POWER_ON times */ 473 /* Choose the greater of the two Port T_POWER_ON times */
469 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; 474 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
@@ -472,10 +477,27 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
472 scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; 477 scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
473 478
474 if (calc_l1ss_pwron(link->pdev, scale1, val1) > 479 if (calc_l1ss_pwron(link->pdev, scale1, val1) >
475 calc_l1ss_pwron(link->downstream, scale2, val2)) 480 calc_l1ss_pwron(link->downstream, scale2, val2)) {
476 link->l1ss.ctl2 |= scale1 | (val1 << 3); 481 link->l1ss.ctl2 |= scale1 | (val1 << 3);
477 else 482 t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1);
483 } else {
478 link->l1ss.ctl2 |= scale2 | (val2 << 3); 484 link->l1ss.ctl2 |= scale2 | (val2 << 3);
485 t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2);
486 }
487
488 /*
489 * Set LTR_L1.2_THRESHOLD to the time required to transition the
490 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
491 * downstream devices report (via LTR) that they can tolerate at
492 * least that much latency.
493 *
494 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
495 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
496 * least 4us.
497 */
498 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
499 encode_l12_threshold(l1_2_threshold, &scale, &value);
500 link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
479} 501}
480 502
481static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 503static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
@@ -786,7 +808,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
786 */ 808 */
787 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32); 809 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
788 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 810 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
789 dev_info(&child->dev, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n"); 811 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
790 return -EINVAL; 812 return -EINVAL;
791 } 813 }
792 } 814 }
@@ -1027,7 +1049,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
1027 * ignored in this situation. 1049 * ignored in this situation.
1028 */ 1050 */
1029 if (aspm_disabled) { 1051 if (aspm_disabled) {
1030 dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n"); 1052 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1031 return; 1053 return;
1032 } 1054 }
1033 1055
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index d1fb5cf9379e..38e40c6c576f 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -12,34 +12,15 @@
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/pcieport_if.h> 13#include <linux/pcieport_if.h>
14#include "../pci.h" 14#include "../pci.h"
15 15#include "aer/aerdrv.h"
16struct rp_pio_header_log_regs {
17 u32 dw0;
18 u32 dw1;
19 u32 dw2;
20 u32 dw3;
21};
22
23struct dpc_rp_pio_regs {
24 u32 status;
25 u32 mask;
26 u32 severity;
27 u32 syserror;
28 u32 exception;
29
30 struct rp_pio_header_log_regs header_log;
31 u32 impspec_log;
32 u32 tlp_prefix_log[4];
33 u32 log_size;
34 u16 first_error;
35};
36 16
37struct dpc_dev { 17struct dpc_dev {
38 struct pcie_device *dev; 18 struct pcie_device *dev;
39 struct work_struct work; 19 struct work_struct work;
40 int cap_pos; 20 u16 cap_pos;
41 bool rp; 21 bool rp_extensions;
42 u32 rp_pio_status; 22 u32 rp_pio_status;
23 u8 rp_log_size;
43}; 24};
44 25
45static const char * const rp_pio_error_string[] = { 26static const char * const rp_pio_error_string[] = {
@@ -69,13 +50,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
69 unsigned long timeout = jiffies + HZ; 50 unsigned long timeout = jiffies + HZ;
70 struct pci_dev *pdev = dpc->dev->port; 51 struct pci_dev *pdev = dpc->dev->port;
71 struct device *dev = &dpc->dev->device; 52 struct device *dev = &dpc->dev->device;
72 u16 status; 53 u16 cap = dpc->cap_pos, status;
73 54
74 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); 55 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
75 while (status & PCI_EXP_DPC_RP_BUSY && 56 while (status & PCI_EXP_DPC_RP_BUSY &&
76 !time_after(jiffies, timeout)) { 57 !time_after(jiffies, timeout)) {
77 msleep(10); 58 msleep(10);
78 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); 59 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
79 } 60 }
80 if (status & PCI_EXP_DPC_RP_BUSY) { 61 if (status & PCI_EXP_DPC_RP_BUSY) {
81 dev_warn(dev, "DPC root port still busy\n"); 62 dev_warn(dev, "DPC root port still busy\n");
@@ -101,11 +82,12 @@ static void dpc_wait_link_inactive(struct dpc_dev *dpc)
101 dev_warn(dev, "Link state not disabled for DPC event\n"); 82 dev_warn(dev, "Link state not disabled for DPC event\n");
102} 83}
103 84
104static void interrupt_event_handler(struct work_struct *work) 85static void dpc_work(struct work_struct *work)
105{ 86{
106 struct dpc_dev *dpc = container_of(work, struct dpc_dev, work); 87 struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
107 struct pci_dev *dev, *temp, *pdev = dpc->dev->port; 88 struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
108 struct pci_bus *parent = pdev->subordinate; 89 struct pci_bus *parent = pdev->subordinate;
90 u16 cap = dpc->cap_pos, ctl;
109 91
110 pci_lock_rescan_remove(); 92 pci_lock_rescan_remove();
111 list_for_each_entry_safe_reverse(dev, temp, &parent->devices, 93 list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
@@ -121,159 +103,127 @@ static void interrupt_event_handler(struct work_struct *work)
121 pci_unlock_rescan_remove(); 103 pci_unlock_rescan_remove();
122 104
123 dpc_wait_link_inactive(dpc); 105 dpc_wait_link_inactive(dpc);
124 if (dpc->rp && dpc_wait_rp_inactive(dpc)) 106 if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
125 return; 107 return;
126 if (dpc->rp && dpc->rp_pio_status) { 108 if (dpc->rp_extensions && dpc->rp_pio_status) {
127 pci_write_config_dword(pdev, 109 pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
128 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_STATUS, 110 dpc->rp_pio_status);
129 dpc->rp_pio_status);
130 dpc->rp_pio_status = 0; 111 dpc->rp_pio_status = 0;
131 } 112 }
132 113
133 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, 114 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
134 PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT); 115 PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
135}
136 116
137static void dpc_rp_pio_print_tlp_header(struct device *dev, 117 pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
138 struct rp_pio_header_log_regs *t) 118 pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
139{ 119 ctl | PCI_EXP_DPC_CTL_INT_EN);
140 dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n",
141 t->dw0, t->dw1, t->dw2, t->dw3);
142} 120}
143 121
144static void dpc_rp_pio_print_error(struct dpc_dev *dpc, 122static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
145 struct dpc_rp_pio_regs *rp_pio)
146{ 123{
147 struct device *dev = &dpc->dev->device; 124 struct device *dev = &dpc->dev->device;
125 struct pci_dev *pdev = dpc->dev->port;
126 u16 cap = dpc->cap_pos, dpc_status, first_error;
127 u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
148 int i; 128 int i;
149 u32 status;
150 129
130 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
131 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
151 dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", 132 dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
152 rp_pio->status, rp_pio->mask); 133 status, mask);
134
135 dpc->rp_pio_status = status;
153 136
137 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
138 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
139 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
154 dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", 140 dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
155 rp_pio->severity, rp_pio->syserror, rp_pio->exception); 141 sev, syserr, exc);
156 142
157 status = (rp_pio->status & ~rp_pio->mask); 143 /* Get First Error Pointer */
144 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
145 first_error = (dpc_status & 0x1f00) >> 8;
158 146
147 status &= ~mask;
159 for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) { 148 for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
160 if (!(status & (1 << i))) 149 if (status & (1 << i))
161 continue; 150 dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
162 151 first_error == i ? " (First)" : "");
163 dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
164 rp_pio->first_error == i ? " (First)" : "");
165 } 152 }
166 153
167 dpc_rp_pio_print_tlp_header(dev, &rp_pio->header_log); 154 if (dpc->rp_log_size < 4)
168 if (rp_pio->log_size == 4)
169 return; 155 return;
170 dev_err(dev, "RP PIO ImpSpec Log %#010x\n", rp_pio->impspec_log); 156 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
157 &dw0);
158 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
159 &dw1);
160 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
161 &dw2);
162 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
163 &dw3);
164 dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n",
165 dw0, dw1, dw2, dw3);
171 166
172 for (i = 0; i < rp_pio->log_size - 5; i++) 167 if (dpc->rp_log_size < 5)
173 dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, 168 return;
174 rp_pio->tlp_prefix_log[i]); 169 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
170 dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log);
171
172 for (i = 0; i < dpc->rp_log_size - 5; i++) {
173 pci_read_config_dword(pdev,
174 cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
175 dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
176 }
175} 177}
176 178
177static void dpc_rp_pio_get_info(struct dpc_dev *dpc, 179static irqreturn_t dpc_irq(int irq, void *context)
178 struct dpc_rp_pio_regs *rp_pio)
179{ 180{
181 struct dpc_dev *dpc = (struct dpc_dev *)context;
180 struct pci_dev *pdev = dpc->dev->port; 182 struct pci_dev *pdev = dpc->dev->port;
181 struct device *dev = &dpc->dev->device; 183 struct device *dev = &dpc->dev->device;
182 int i; 184 u16 cap = dpc->cap_pos, ctl, status, source, reason, ext_reason;
183 u16 cap;
184 u16 status;
185
186 pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_STATUS,
187 &rp_pio->status);
188 pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_MASK,
189 &rp_pio->mask);
190
191 pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_SEVERITY,
192 &rp_pio->severity);
193 pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_SYSERROR,
194 &rp_pio->syserror);
195 pci_read_config_dword(pdev, dpc->cap_pos + PCI_EXP_DPC_RP_PIO_EXCEPTION,
196 &rp_pio->exception);
197 185
198 /* Get First Error Pointer */ 186 pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
199 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
200 rp_pio->first_error = (status & 0x1f00) >> 8;
201 187
202 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); 188 if (!(ctl & PCI_EXP_DPC_CTL_INT_EN) || ctl == (u16)(~0))
203 rp_pio->log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; 189 return IRQ_NONE;
204 if (rp_pio->log_size < 4 || rp_pio->log_size > 9) {
205 dev_err(dev, "RP PIO log size %u is invalid\n",
206 rp_pio->log_size);
207 return;
208 }
209
210 pci_read_config_dword(pdev,
211 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
212 &rp_pio->header_log.dw0);
213 pci_read_config_dword(pdev,
214 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
215 &rp_pio->header_log.dw1);
216 pci_read_config_dword(pdev,
217 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
218 &rp_pio->header_log.dw2);
219 pci_read_config_dword(pdev,
220 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
221 &rp_pio->header_log.dw3);
222 if (rp_pio->log_size == 4)
223 return;
224
225 pci_read_config_dword(pdev,
226 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG,
227 &rp_pio->impspec_log);
228 for (i = 0; i < rp_pio->log_size - 5; i++)
229 pci_read_config_dword(pdev,
230 dpc->cap_pos + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
231 &rp_pio->tlp_prefix_log[i]);
232}
233 190
234static void dpc_process_rp_pio_error(struct dpc_dev *dpc) 191 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
235{
236 struct dpc_rp_pio_regs rp_pio_regs;
237 192
238 dpc_rp_pio_get_info(dpc, &rp_pio_regs); 193 if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT))
239 dpc_rp_pio_print_error(dpc, &rp_pio_regs); 194 return IRQ_NONE;
240 195
241 dpc->rp_pio_status = rp_pio_regs.status; 196 if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
242} 197 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
198 PCI_EXP_DPC_STATUS_INTERRUPT);
199 return IRQ_HANDLED;
200 }
243 201
244static irqreturn_t dpc_irq(int irq, void *context) 202 pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
245{ 203 ctl & ~PCI_EXP_DPC_CTL_INT_EN);
246 struct dpc_dev *dpc = (struct dpc_dev *)context;
247 struct pci_dev *pdev = dpc->dev->port;
248 struct device *dev = &dpc->dev->device;
249 u16 status, source;
250 204
251 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); 205 pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
252 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
253 &source); 206 &source);
254 if (!status || status == (u16)(~0))
255 return IRQ_NONE;
256 207
257 dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n", 208 dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n",
258 status, source); 209 status, source);
259 210
260 if (status & PCI_EXP_DPC_STATUS_TRIGGER) { 211 reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
261 u16 reason = (status >> 1) & 0x3; 212 ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
262 u16 ext_reason = (status >> 5) & 0x3; 213
263 214 dev_warn(dev, "DPC %s detected, remove downstream devices\n",
264 dev_warn(dev, "DPC %s detected, remove downstream devices\n", 215 (reason == 0) ? "unmasked uncorrectable error" :
265 (reason == 0) ? "unmasked uncorrectable error" : 216 (reason == 1) ? "ERR_NONFATAL" :
266 (reason == 1) ? "ERR_NONFATAL" : 217 (reason == 2) ? "ERR_FATAL" :
267 (reason == 2) ? "ERR_FATAL" : 218 (ext_reason == 0) ? "RP PIO error" :
268 (ext_reason == 0) ? "RP PIO error" : 219 (ext_reason == 1) ? "software trigger" :
269 (ext_reason == 1) ? "software trigger" : 220 "reserved error");
270 "reserved error"); 221 /* show RP PIO error detail information */
271 /* show RP PIO error detail information */ 222 if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
272 if (reason == 3 && ext_reason == 0) 223 dpc_process_rp_pio_error(dpc);
273 dpc_process_rp_pio_error(dpc); 224
274 225 schedule_work(&dpc->work);
275 schedule_work(&dpc->work); 226
276 }
277 return IRQ_HANDLED; 227 return IRQ_HANDLED;
278} 228}
279 229
@@ -286,13 +236,16 @@ static int dpc_probe(struct pcie_device *dev)
286 int status; 236 int status;
287 u16 ctl, cap; 237 u16 ctl, cap;
288 238
239 if (pcie_aer_get_firmware_first(pdev))
240 return -ENOTSUPP;
241
289 dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL); 242 dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
290 if (!dpc) 243 if (!dpc)
291 return -ENOMEM; 244 return -ENOMEM;
292 245
293 dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC); 246 dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
294 dpc->dev = dev; 247 dpc->dev = dev;
295 INIT_WORK(&dpc->work, interrupt_event_handler); 248 INIT_WORK(&dpc->work, dpc_work);
296 set_service_data(dev, dpc); 249 set_service_data(dev, dpc);
297 250
298 status = devm_request_irq(device, dev->irq, dpc_irq, IRQF_SHARED, 251 status = devm_request_irq(device, dev->irq, dpc_irq, IRQF_SHARED,
@@ -306,15 +259,23 @@ static int dpc_probe(struct pcie_device *dev)
306 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); 259 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
307 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); 260 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
308 261
309 dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT); 262 dpc->rp_extensions = (cap & PCI_EXP_DPC_CAP_RP_EXT);
263 if (dpc->rp_extensions) {
264 dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
265 if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) {
266 dev_err(device, "RP PIO log size %u is invalid\n",
267 dpc->rp_log_size);
268 dpc->rp_log_size = 0;
269 }
270 }
310 271
311 ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; 272 ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
312 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); 273 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
313 274
314 dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", 275 dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
315 cap & 0xf, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), 276 cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
316 FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), 277 FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
317 FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf, 278 FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
318 FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); 279 FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
319 return status; 280 return status;
320} 281}
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 55bdef4fb558..5480f54f7612 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -195,14 +195,14 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
195 * assuming that the PME was reported by a PCIe-PCI bridge that 195 * assuming that the PME was reported by a PCIe-PCI bridge that
196 * used devfn different from zero. 196 * used devfn different from zero.
197 */ 197 */
198 dev_dbg(&port->dev, "PME interrupt generated for non-existent device %02x:%02x.%d\n", 198 pci_dbg(port, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
199 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); 199 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
200 found = pcie_pme_from_pci_bridge(bus, 0); 200 found = pcie_pme_from_pci_bridge(bus, 0);
201 } 201 }
202 202
203 out: 203 out:
204 if (!found) 204 if (!found)
205 dev_dbg(&port->dev, "Spurious native PME interrupt!\n"); 205 pci_dbg(port, "Spurious native PME interrupt!\n");
206} 206}
207 207
208/** 208/**
@@ -342,7 +342,7 @@ static int pcie_pme_probe(struct pcie_device *srv)
342 return ret; 342 return ret;
343 } 343 }
344 344
345 dev_info(&port->dev, "Signaling PME with IRQ %d\n", srv->irq); 345 pci_info(port, "Signaling PME with IRQ %d\n", srv->irq);
346 346
347 pcie_pme_mark_devices(port); 347 pcie_pme_mark_devices(port);
348 pcie_pme_interrupt_enable(port, true); 348 pcie_pme_interrupt_enable(port, true);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index a59210350c44..ef3bad4ad010 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -216,9 +216,9 @@ static int get_port_device_capability(struct pci_dev *dev)
216 return 0; 216 return 0;
217 217
218 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP 218 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
219 | PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC; 219 | PCIE_PORT_SERVICE_VC;
220 if (pci_aer_available()) 220 if (pci_aer_available())
221 cap_mask |= PCIE_PORT_SERVICE_AER; 221 cap_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
222 222
223 if (pcie_ports_auto) 223 if (pcie_ports_auto)
224 pcie_port_platform_notify(dev, &cap_mask); 224 pcie_port_platform_notify(dev, &cap_mask);
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 1071b8c6208c..98cfa30f3fae 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -25,7 +25,7 @@ static void pci_ptm_info(struct pci_dev *dev)
25 dev->ptm_granularity); 25 dev->ptm_granularity);
26 break; 26 break;
27 } 27 }
28 dev_info(&dev->dev, "PTM enabled%s, %s granularity\n", 28 pci_info(dev, "PTM enabled%s, %s granularity\n",
29 dev->ptm_root ? " (root)" : "", clock_desc); 29 dev->ptm_root ? " (root)" : "", clock_desc);
30} 30}
31 31
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3ea2d610d607..ef5377438a1e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -70,8 +70,8 @@ static int find_anything(struct device *dev, void *data)
70} 70}
71 71
72/* 72/*
73 * Some device drivers need know if pci is initiated. 73 * Some device drivers need know if PCI is initiated.
74 * Basically, we think pci is not initiated when there 74 * Basically, we think PCI is not initiated when there
75 * is no device to be found on the pci_bus_type. 75 * is no device to be found on the pci_bus_type.
76 */ 76 */
77int no_pci_devices(void) 77int no_pci_devices(void)
@@ -117,12 +117,16 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
117 if (!size) 117 if (!size)
118 return 0; 118 return 0;
119 119
120 /* Get the lowest of them to find the decode size, and 120 /*
121 from that the extent. */ 121 * Get the lowest of them to find the decode size, and from that
122 * the extent.
123 */
122 size = (size & ~(size-1)) - 1; 124 size = (size & ~(size-1)) - 1;
123 125
124 /* base == maxbase can be valid only if the BAR has 126 /*
125 already been programmed with all 1s. */ 127 * base == maxbase can be valid only if the BAR has already been
128 * programmed with all 1s.
129 */
126 if (base == maxbase && ((base | size) & mask) != mask) 130 if (base == maxbase && ((base | size) & mask) != mask)
127 return 0; 131 return 0;
128 132
@@ -165,7 +169,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
165#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) 169#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
166 170
167/** 171/**
168 * pci_read_base - read a PCI BAR 172 * pci_read_base - Read a PCI BAR
169 * @dev: the PCI device 173 * @dev: the PCI device
170 * @type: type of the BAR 174 * @type: type of the BAR
171 * @res: resource buffer to be filled in 175 * @res: resource buffer to be filled in
@@ -254,7 +258,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
254 258
255 sz64 = pci_size(l64, sz64, mask64); 259 sz64 = pci_size(l64, sz64, mask64);
256 if (!sz64) { 260 if (!sz64) {
257 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", 261 pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
258 pos); 262 pos);
259 goto fail; 263 goto fail;
260 } 264 }
@@ -265,7 +269,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
265 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 269 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
266 res->start = 0; 270 res->start = 0;
267 res->end = 0; 271 res->end = 0;
268 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", 272 pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
269 pos, (unsigned long long)sz64); 273 pos, (unsigned long long)sz64);
270 goto out; 274 goto out;
271 } 275 }
@@ -275,7 +279,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
275 res->flags |= IORESOURCE_UNSET; 279 res->flags |= IORESOURCE_UNSET;
276 res->start = 0; 280 res->start = 0;
277 res->end = sz64; 281 res->end = sz64;
278 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", 282 pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
279 pos, (unsigned long long)l64); 283 pos, (unsigned long long)l64);
280 goto out; 284 goto out;
281 } 285 }
@@ -302,7 +306,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
302 res->flags |= IORESOURCE_UNSET; 306 res->flags |= IORESOURCE_UNSET;
303 res->start = 0; 307 res->start = 0;
304 res->end = region.end - region.start; 308 res->end = region.end - region.start;
305 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n", 309 pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
306 pos, (unsigned long long)region.start); 310 pos, (unsigned long long)region.start);
307 } 311 }
308 312
@@ -313,7 +317,7 @@ fail:
313 res->flags = 0; 317 res->flags = 0;
314out: 318out:
315 if (res->flags) 319 if (res->flags)
316 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); 320 pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res);
317 321
318 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 322 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
319} 323}
@@ -376,7 +380,7 @@ static void pci_read_bridge_io(struct pci_bus *child)
376 region.start = base; 380 region.start = base;
377 region.end = limit + io_granularity - 1; 381 region.end = limit + io_granularity - 1;
378 pcibios_bus_to_resource(dev->bus, res, &region); 382 pcibios_bus_to_resource(dev->bus, res, &region);
379 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 383 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
380 } 384 }
381} 385}
382 386
@@ -398,7 +402,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child)
398 region.start = base; 402 region.start = base;
399 region.end = limit + 0xfffff; 403 region.end = limit + 0xfffff;
400 pcibios_bus_to_resource(dev->bus, res, &region); 404 pcibios_bus_to_resource(dev->bus, res, &region);
401 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 405 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
402 } 406 }
403} 407}
404 408
@@ -438,7 +442,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
438 limit = (pci_bus_addr_t) limit64; 442 limit = (pci_bus_addr_t) limit64;
439 443
440 if (base != base64) { 444 if (base != base64) {
441 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", 445 pci_err(dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
442 (unsigned long long) base64); 446 (unsigned long long) base64);
443 return; 447 return;
444 } 448 }
@@ -451,7 +455,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
451 region.start = base; 455 region.start = base;
452 region.end = limit + 0xfffff; 456 region.end = limit + 0xfffff;
453 pcibios_bus_to_resource(dev->bus, res, &region); 457 pcibios_bus_to_resource(dev->bus, res, &region);
454 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 458 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res);
455 } 459 }
456} 460}
457 461
@@ -464,7 +468,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
464 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 468 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
465 return; 469 return;
466 470
467 dev_info(&dev->dev, "PCI bridge to %pR%s\n", 471 pci_info(dev, "PCI bridge to %pR%s\n",
468 &child->busn_res, 472 &child->busn_res,
469 dev->transparent ? " (subtractive decode)" : ""); 473 dev->transparent ? " (subtractive decode)" : "");
470 474
@@ -481,7 +485,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
481 if (res && res->flags) { 485 if (res && res->flags) {
482 pci_bus_add_resource(child, res, 486 pci_bus_add_resource(child, res,
483 PCI_SUBTRACTIVE_DECODE); 487 PCI_SUBTRACTIVE_DECODE);
484 dev_printk(KERN_DEBUG, &dev->dev, 488 pci_printk(KERN_DEBUG, dev,
485 " bridge window %pR (subtractive decode)\n", 489 " bridge window %pR (subtractive decode)\n",
486 res); 490 res);
487 } 491 }
@@ -765,7 +769,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
765 769
766 bridge->bus = bus; 770 bridge->bus = bus;
767 771
768 /* temporarily move resources off the list */ 772 /* Temporarily move resources off the list */
769 list_splice_init(&bridge->windows, &resources); 773 list_splice_init(&bridge->windows, &resources);
770 bus->sysdata = bridge->sysdata; 774 bus->sysdata = bridge->sysdata;
771 bus->msi = bridge->msi; 775 bus->msi = bridge->msi;
@@ -777,7 +781,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
777 781
778 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); 782 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
779 if (b) { 783 if (b) {
780 /* If we already got to this bus through a different bridge, ignore it */ 784 /* Ignore it if we already got here via a different bridge */
781 dev_dbg(&b->dev, "bus already known\n"); 785 dev_dbg(&b->dev, "bus already known\n");
782 err = -EEXIST; 786 err = -EEXIST;
783 goto free; 787 goto free;
@@ -870,9 +874,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
870 int i; 874 int i;
871 int ret; 875 int ret;
872 876
873 /* 877 /* Allocate a new bus and inherit stuff from the parent */
874 * Allocate a new bus, and inherit stuff from the parent..
875 */
876 child = pci_alloc_bus(parent); 878 child = pci_alloc_bus(parent);
877 if (!child) 879 if (!child)
878 return NULL; 880 return NULL;
@@ -883,16 +885,14 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
883 child->sysdata = parent->sysdata; 885 child->sysdata = parent->sysdata;
884 child->bus_flags = parent->bus_flags; 886 child->bus_flags = parent->bus_flags;
885 887
886 /* initialize some portions of the bus device, but don't register it 888 /*
887 * now as the parent is not properly set up yet. 889 * Initialize some portions of the bus device, but don't register
890 * it now as the parent is not properly set up yet.
888 */ 891 */
889 child->dev.class = &pcibus_class; 892 child->dev.class = &pcibus_class;
890 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 893 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
891 894
892 /* 895 /* Set up the primary, secondary and subordinate bus numbers */
893 * Set up the primary, secondary and subordinate
894 * bus numbers.
895 */
896 child->number = child->busn_res.start = busnr; 896 child->number = child->busn_res.start = busnr;
897 child->primary = parent->busn_res.start; 897 child->primary = parent->busn_res.start;
898 child->busn_res.end = 0xff; 898 child->busn_res.end = 0xff;
@@ -908,7 +908,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
908 pci_set_bus_of_node(child); 908 pci_set_bus_of_node(child);
909 pci_set_bus_speed(child); 909 pci_set_bus_speed(child);
910 910
911 /* Set up default resource pointers and names.. */ 911 /* Set up default resource pointers and names */
912 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 912 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
913 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 913 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
914 child->resource[i]->name = child->name; 914 child->resource[i]->name = child->name;
@@ -1006,11 +1006,11 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1006 secondary = (buses >> 8) & 0xFF; 1006 secondary = (buses >> 8) & 0xFF;
1007 subordinate = (buses >> 16) & 0xFF; 1007 subordinate = (buses >> 16) & 0xFF;
1008 1008
1009 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 1009 pci_dbg(dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1010 secondary, subordinate, pass); 1010 secondary, subordinate, pass);
1011 1011
1012 if (!primary && (primary != bus->number) && secondary && subordinate) { 1012 if (!primary && (primary != bus->number) && secondary && subordinate) {
1013 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); 1013 pci_warn(dev, "Primary bus is hard wired to 0\n");
1014 primary = bus->number; 1014 primary = bus->number;
1015 } 1015 }
1016 1016
@@ -1018,13 +1018,15 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1018 if (!pass && 1018 if (!pass &&
1019 (primary != bus->number || secondary <= bus->number || 1019 (primary != bus->number || secondary <= bus->number ||
1020 secondary > subordinate)) { 1020 secondary > subordinate)) {
1021 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 1021 pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1022 secondary, subordinate); 1022 secondary, subordinate);
1023 broken = 1; 1023 broken = 1;
1024 } 1024 }
1025 1025
1026 /* Disable MasterAbortMode during probing to avoid reporting 1026 /*
1027 of bus errors (in some architectures) */ 1027 * Disable Master-Abort Mode during probing to avoid reporting of
1028 * bus errors in some architectures.
1029 */
1028 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 1030 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1029 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 1031 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1030 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 1032 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
@@ -1034,18 +1036,19 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1034 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 1036 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1035 !is_cardbus && !broken) { 1037 !is_cardbus && !broken) {
1036 unsigned int cmax; 1038 unsigned int cmax;
1039
1037 /* 1040 /*
1038 * Bus already configured by firmware, process it in the first 1041 * Bus already configured by firmware, process it in the
1039 * pass and just note the configuration. 1042 * first pass and just note the configuration.
1040 */ 1043 */
1041 if (pass) 1044 if (pass)
1042 goto out; 1045 goto out;
1043 1046
1044 /* 1047 /*
1045 * The bus might already exist for two reasons: Either we are 1048 * The bus might already exist for two reasons: Either we
1046 * rescanning the bus or the bus is reachable through more than 1049 * are rescanning the bus or the bus is reachable through
1047 * one bridge. The second case can happen with the i450NX 1050 * more than one bridge. The second case can happen with
1048 * chipset. 1051 * the i450NX chipset.
1049 */ 1052 */
1050 child = pci_find_bus(pci_domain_nr(bus), secondary); 1053 child = pci_find_bus(pci_domain_nr(bus), secondary);
1051 if (!child) { 1054 if (!child) {
@@ -1059,24 +1062,29 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1059 1062
1060 cmax = pci_scan_child_bus(child); 1063 cmax = pci_scan_child_bus(child);
1061 if (cmax > subordinate) 1064 if (cmax > subordinate)
1062 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", 1065 pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
1063 subordinate, cmax); 1066 subordinate, cmax);
1064 /* subordinate should equal child->busn_res.end */ 1067
1068 /* Subordinate should equal child->busn_res.end */
1065 if (subordinate > max) 1069 if (subordinate > max)
1066 max = subordinate; 1070 max = subordinate;
1067 } else { 1071 } else {
1072
1068 /* 1073 /*
1069 * We need to assign a number to this bus which we always 1074 * We need to assign a number to this bus which we always
1070 * do in the second pass. 1075 * do in the second pass.
1071 */ 1076 */
1072 if (!pass) { 1077 if (!pass) {
1073 if (pcibios_assign_all_busses() || broken || is_cardbus) 1078 if (pcibios_assign_all_busses() || broken || is_cardbus)
1074 /* Temporarily disable forwarding of the 1079
1075 configuration cycles on all bridges in 1080 /*
1076 this bus segment to avoid possible 1081 * Temporarily disable forwarding of the
1077 conflicts in the second pass between two 1082 * configuration cycles on all bridges in
1078 bridges programmed with overlapping 1083 * this bus segment to avoid possible
1079 bus ranges. */ 1084 * conflicts in the second pass between two
1085 * bridges programmed with overlapping bus
1086 * ranges.
1087 */
1080 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 1088 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1081 buses & ~0xffffff); 1089 buses & ~0xffffff);
1082 goto out; 1090 goto out;
@@ -1085,9 +1093,11 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1085 /* Clear errors */ 1093 /* Clear errors */
1086 pci_write_config_word(dev, PCI_STATUS, 0xffff); 1094 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1087 1095
1088 /* Prevent assigning a bus number that already exists. 1096 /*
1089 * This can happen when a bridge is hot-plugged, so in 1097 * Prevent assigning a bus number that already exists.
1090 * this case we only re-scan this bus. */ 1098 * This can happen when a bridge is hot-plugged, so in this
1099 * case we only re-scan this bus.
1100 */
1091 child = pci_find_bus(pci_domain_nr(bus), max+1); 1101 child = pci_find_bus(pci_domain_nr(bus), max+1);
1092 if (!child) { 1102 if (!child) {
1093 child = pci_add_new_bus(bus, dev, max+1); 1103 child = pci_add_new_bus(bus, dev, max+1);
@@ -1114,19 +1124,18 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1114 buses |= CARDBUS_LATENCY_TIMER << 24; 1124 buses |= CARDBUS_LATENCY_TIMER << 24;
1115 } 1125 }
1116 1126
1117 /* 1127 /* We need to blast all three values with a single write */
1118 * We need to blast all three values with a single write.
1119 */
1120 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 1128 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1121 1129
1122 if (!is_cardbus) { 1130 if (!is_cardbus) {
1123 child->bridge_ctl = bctl; 1131 child->bridge_ctl = bctl;
1124 max = pci_scan_child_bus_extend(child, available_buses); 1132 max = pci_scan_child_bus_extend(child, available_buses);
1125 } else { 1133 } else {
1134
1126 /* 1135 /*
1127 * For CardBus bridges, we leave 4 bus numbers 1136 * For CardBus bridges, we leave 4 bus numbers as
1128 * as cards with a PCI-to-PCI bridge can be 1137 * cards with a PCI-to-PCI bridge can be inserted
1129 * inserted later. 1138 * later.
1130 */ 1139 */
1131 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) { 1140 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1132 struct pci_bus *parent = bus; 1141 struct pci_bus *parent = bus;
@@ -1142,10 +1151,11 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1142 parent = parent->parent; 1151 parent = parent->parent;
1143 } 1152 }
1144 if (j) { 1153 if (j) {
1154
1145 /* 1155 /*
1146 * Often, there are two cardbus bridges 1156 * Often, there are two CardBus
1147 * -- try to leave one valid bus number 1157 * bridges -- try to leave one
1148 * for each one. 1158 * valid bus number for each one.
1149 */ 1159 */
1150 i /= 2; 1160 i /= 2;
1151 break; 1161 break;
@@ -1153,9 +1163,8 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1153 } 1163 }
1154 max += i; 1164 max += i;
1155 } 1165 }
1156 /* 1166
1157 * Set the subordinate bus number to its real value. 1167 /* Set subordinate bus number to its real value */
1158 */
1159 pci_bus_update_busn_res_end(child, max); 1168 pci_bus_update_busn_res_end(child, max);
1160 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 1169 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1161 } 1170 }
@@ -1296,7 +1305,7 @@ static void set_pcie_thunderbolt(struct pci_dev *dev)
1296} 1305}
1297 1306
1298/** 1307/**
1299 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config? 1308 * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config?
1300 * @dev: PCI device 1309 * @dev: PCI device
1301 * 1310 *
1302 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that 1311 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
@@ -1333,7 +1342,7 @@ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1333} 1342}
1334 1343
1335/** 1344/**
1336 * pci_cfg_space_size - get the configuration space size of the PCI device. 1345 * pci_cfg_space_size - Get the configuration space size of the PCI device
1337 * @dev: PCI device 1346 * @dev: PCI device
1338 * 1347 *
1339 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1348 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
@@ -1399,7 +1408,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1399} 1408}
1400 1409
1401/** 1410/**
1402 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability 1411 * pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability
1403 * @dev: PCI device 1412 * @dev: PCI device
1404 * 1413 *
1405 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this 1414 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
@@ -1427,11 +1436,11 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
1427} 1436}
1428 1437
1429/** 1438/**
1430 * pci_setup_device - fill in class and map information of a device 1439 * pci_setup_device - Fill in class and map information of a device
1431 * @dev: the device structure to fill 1440 * @dev: the device structure to fill
1432 * 1441 *
1433 * Initialize the device structure with information about the device's 1442 * Initialize the device structure with information about the device's
1434 * vendor,class,memory and IO-space addresses,IRQ lines etc. 1443 * vendor,class,memory and IO-space addresses, IRQ lines etc.
1435 * Called at initialisation of the PCI subsystem and by CardBus services. 1444 * Called at initialisation of the PCI subsystem and by CardBus services.
1436 * Returns 0 on success and negative if unknown type of device (not normal, 1445 * Returns 0 on success and negative if unknown type of device (not normal,
1437 * bridge or CardBus). 1446 * bridge or CardBus).
@@ -1457,8 +1466,11 @@ int pci_setup_device(struct pci_dev *dev)
1457 set_pcie_port_type(dev); 1466 set_pcie_port_type(dev);
1458 1467
1459 pci_dev_assign_slot(dev); 1468 pci_dev_assign_slot(dev);
1460 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 1469
1461 set this higher, assuming the system even supports it. */ 1470 /*
1471 * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1472 * set this higher, assuming the system even supports it.
1473 */
1462 dev->dma_mask = 0xffffffff; 1474 dev->dma_mask = 0xffffffff;
1463 1475
1464 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 1476 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
@@ -1469,13 +1481,13 @@ int pci_setup_device(struct pci_dev *dev)
1469 dev->revision = class & 0xff; 1481 dev->revision = class & 0xff;
1470 dev->class = class >> 8; /* upper 3 bytes */ 1482 dev->class = class >> 8; /* upper 3 bytes */
1471 1483
1472 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", 1484 pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n",
1473 dev->vendor, dev->device, dev->hdr_type, dev->class); 1485 dev->vendor, dev->device, dev->hdr_type, dev->class);
1474 1486
1475 /* need to have dev->class ready */ 1487 /* Need to have dev->class ready */
1476 dev->cfg_size = pci_cfg_space_size(dev); 1488 dev->cfg_size = pci_cfg_space_size(dev);
1477 1489
1478 /* need to have dev->cfg_size ready */ 1490 /* Need to have dev->cfg_size ready */
1479 set_pcie_thunderbolt(dev); 1491 set_pcie_thunderbolt(dev);
1480 1492
1481 /* "Unknown power state" */ 1493 /* "Unknown power state" */
@@ -1483,13 +1495,14 @@ int pci_setup_device(struct pci_dev *dev)
1483 1495
1484 /* Early fixups, before probing the BARs */ 1496 /* Early fixups, before probing the BARs */
1485 pci_fixup_device(pci_fixup_early, dev); 1497 pci_fixup_device(pci_fixup_early, dev);
1486 /* device class may be changed after fixup */ 1498
1499 /* Device class may be changed after fixup */
1487 class = dev->class >> 8; 1500 class = dev->class >> 8;
1488 1501
1489 if (dev->non_compliant_bars) { 1502 if (dev->non_compliant_bars) {
1490 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1503 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1491 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { 1504 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1492 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); 1505 pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1493 cmd &= ~PCI_COMMAND_IO; 1506 cmd &= ~PCI_COMMAND_IO;
1494 cmd &= ~PCI_COMMAND_MEMORY; 1507 cmd &= ~PCI_COMMAND_MEMORY;
1495 pci_write_config_word(dev, PCI_COMMAND, cmd); 1508 pci_write_config_word(dev, PCI_COMMAND, cmd);
@@ -1522,14 +1535,14 @@ int pci_setup_device(struct pci_dev *dev)
1522 res = &dev->resource[0]; 1535 res = &dev->resource[0];
1523 res->flags = LEGACY_IO_RESOURCE; 1536 res->flags = LEGACY_IO_RESOURCE;
1524 pcibios_bus_to_resource(dev->bus, res, &region); 1537 pcibios_bus_to_resource(dev->bus, res, &region);
1525 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n", 1538 pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n",
1526 res); 1539 res);
1527 region.start = 0x3F6; 1540 region.start = 0x3F6;
1528 region.end = 0x3F6; 1541 region.end = 0x3F6;
1529 res = &dev->resource[1]; 1542 res = &dev->resource[1];
1530 res->flags = LEGACY_IO_RESOURCE; 1543 res->flags = LEGACY_IO_RESOURCE;
1531 pcibios_bus_to_resource(dev->bus, res, &region); 1544 pcibios_bus_to_resource(dev->bus, res, &region);
1532 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n", 1545 pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n",
1533 res); 1546 res);
1534 } 1547 }
1535 if ((progif & 4) == 0) { 1548 if ((progif & 4) == 0) {
@@ -1538,14 +1551,14 @@ int pci_setup_device(struct pci_dev *dev)
1538 res = &dev->resource[2]; 1551 res = &dev->resource[2];
1539 res->flags = LEGACY_IO_RESOURCE; 1552 res->flags = LEGACY_IO_RESOURCE;
1540 pcibios_bus_to_resource(dev->bus, res, &region); 1553 pcibios_bus_to_resource(dev->bus, res, &region);
1541 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n", 1554 pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n",
1542 res); 1555 res);
1543 region.start = 0x376; 1556 region.start = 0x376;
1544 region.end = 0x376; 1557 region.end = 0x376;
1545 res = &dev->resource[3]; 1558 res = &dev->resource[3];
1546 res->flags = LEGACY_IO_RESOURCE; 1559 res->flags = LEGACY_IO_RESOURCE;
1547 pcibios_bus_to_resource(dev->bus, res, &region); 1560 pcibios_bus_to_resource(dev->bus, res, &region);
1548 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n", 1561 pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1549 res); 1562 res);
1550 } 1563 }
1551 } 1564 }
@@ -1554,9 +1567,12 @@ int pci_setup_device(struct pci_dev *dev)
1554 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1567 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1555 if (class != PCI_CLASS_BRIDGE_PCI) 1568 if (class != PCI_CLASS_BRIDGE_PCI)
1556 goto bad; 1569 goto bad;
1557 /* The PCI-to-PCI bridge spec requires that subtractive 1570
1558 decoding (i.e. transparent) bridge must have programming 1571 /*
1559 interface code of 0x01. */ 1572 * The PCI-to-PCI bridge spec requires that subtractive
1573 * decoding (i.e. transparent) bridge must have programming
1574 * interface code of 0x01.
1575 */
1560 pci_read_irq(dev); 1576 pci_read_irq(dev);
1561 dev->transparent = ((dev->class & 0xff) == 1); 1577 dev->transparent = ((dev->class & 0xff) == 1);
1562 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1578 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
@@ -1578,12 +1594,12 @@ int pci_setup_device(struct pci_dev *dev)
1578 break; 1594 break;
1579 1595
1580 default: /* unknown header */ 1596 default: /* unknown header */
1581 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n", 1597 pci_err(dev, "unknown header type %02x, ignoring device\n",
1582 dev->hdr_type); 1598 dev->hdr_type);
1583 return -EIO; 1599 return -EIO;
1584 1600
1585 bad: 1601 bad:
1586 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n", 1602 pci_err(dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1587 dev->class, dev->hdr_type); 1603 dev->class, dev->hdr_type);
1588 dev->class = PCI_CLASS_NOT_DEFINED << 8; 1604 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1589 } 1605 }
@@ -1607,7 +1623,7 @@ static void pci_configure_mps(struct pci_dev *dev)
1607 return; 1623 return;
1608 1624
1609 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { 1625 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1610 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1626 pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1611 mps, pci_name(bridge), p_mps); 1627 mps, pci_name(bridge), p_mps);
1612 return; 1628 return;
1613 } 1629 }
@@ -1621,12 +1637,12 @@ static void pci_configure_mps(struct pci_dev *dev)
1621 1637
1622 rc = pcie_set_mps(dev, p_mps); 1638 rc = pcie_set_mps(dev, p_mps);
1623 if (rc) { 1639 if (rc) {
1624 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1640 pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1625 p_mps); 1641 p_mps);
1626 return; 1642 return;
1627 } 1643 }
1628 1644
1629 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n", 1645 pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
1630 p_mps, mps, 128 << dev->pcie_mpss); 1646 p_mps, mps, 128 << dev->pcie_mpss);
1631} 1647}
1632 1648
@@ -1646,8 +1662,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1646 hpp = &pci_default_type0; 1662 hpp = &pci_default_type0;
1647 1663
1648 if (hpp->revision > 1) { 1664 if (hpp->revision > 1) {
1649 dev_warn(&dev->dev, 1665 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
1650 "PCI settings rev %d not supported; using defaults\n",
1651 hpp->revision); 1666 hpp->revision);
1652 hpp = &pci_default_type0; 1667 hpp = &pci_default_type0;
1653 } 1668 }
@@ -1685,7 +1700,7 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1685 if (!pos) 1700 if (!pos)
1686 return; 1701 return;
1687 1702
1688 dev_warn(&dev->dev, "PCI-X settings not supported\n"); 1703 pci_warn(dev, "PCI-X settings not supported\n");
1689} 1704}
1690 1705
1691static bool pcie_root_rcb_set(struct pci_dev *dev) 1706static bool pcie_root_rcb_set(struct pci_dev *dev)
@@ -1715,7 +1730,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1715 return; 1730 return;
1716 1731
1717 if (hpp->revision > 1) { 1732 if (hpp->revision > 1) {
1718 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", 1733 pci_warn(dev, "PCIe settings rev %d not supported\n",
1719 hpp->revision); 1734 hpp->revision);
1720 return; 1735 return;
1721 } 1736 }
@@ -1773,6 +1788,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1773 /* Initialize Advanced Error Capabilities and Control Register */ 1788 /* Initialize Advanced Error Capabilities and Control Register */
1774 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32); 1789 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1775 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; 1790 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1791
1776 /* Don't enable ECRC generation or checking if unsupported */ 1792 /* Don't enable ECRC generation or checking if unsupported */
1777 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) 1793 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1778 reg32 &= ~PCI_ERR_CAP_ECRC_GENE; 1794 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
@@ -1819,7 +1835,7 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1819 */ 1835 */
1820 if (host->no_ext_tags) { 1836 if (host->no_ext_tags) {
1821 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { 1837 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1822 dev_info(&dev->dev, "disabling Extended Tags\n"); 1838 pci_info(dev, "disabling Extended Tags\n");
1823 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 1839 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1824 PCI_EXP_DEVCTL_EXT_TAG); 1840 PCI_EXP_DEVCTL_EXT_TAG);
1825 } 1841 }
@@ -1827,7 +1843,7 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1827 } 1843 }
1828 1844
1829 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) { 1845 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1830 dev_info(&dev->dev, "enabling Extended Tags\n"); 1846 pci_info(dev, "enabling Extended Tags\n");
1831 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, 1847 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1832 PCI_EXP_DEVCTL_EXT_TAG); 1848 PCI_EXP_DEVCTL_EXT_TAG);
1833 } 1849 }
@@ -1872,10 +1888,42 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1872 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { 1888 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1873 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 1889 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1874 PCI_EXP_DEVCTL_RELAX_EN); 1890 PCI_EXP_DEVCTL_RELAX_EN);
1875 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n"); 1891 pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n");
1876 } 1892 }
1877} 1893}
1878 1894
1895static void pci_configure_ltr(struct pci_dev *dev)
1896{
1897#ifdef CONFIG_PCIEASPM
1898 u32 cap;
1899 struct pci_dev *bridge;
1900
1901 if (!pci_is_pcie(dev))
1902 return;
1903
1904 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
1905 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1906 return;
1907
1908 /*
1909 * Software must not enable LTR in an Endpoint unless the Root
1910 * Complex and all intermediate Switches indicate support for LTR.
1911 * PCIe r3.1, sec 6.18.
1912 */
1913 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1914 dev->ltr_path = 1;
1915 else {
1916 bridge = pci_upstream_bridge(dev);
1917 if (bridge && bridge->ltr_path)
1918 dev->ltr_path = 1;
1919 }
1920
1921 if (dev->ltr_path)
1922 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
1923 PCI_EXP_DEVCTL2_LTR_EN);
1924#endif
1925}
1926
1879static void pci_configure_device(struct pci_dev *dev) 1927static void pci_configure_device(struct pci_dev *dev)
1880{ 1928{
1881 struct hotplug_params hpp; 1929 struct hotplug_params hpp;
@@ -1884,6 +1932,7 @@ static void pci_configure_device(struct pci_dev *dev)
1884 pci_configure_mps(dev); 1932 pci_configure_mps(dev);
1885 pci_configure_extended_tags(dev, NULL); 1933 pci_configure_extended_tags(dev, NULL);
1886 pci_configure_relaxed_ordering(dev); 1934 pci_configure_relaxed_ordering(dev);
1935 pci_configure_ltr(dev);
1887 1936
1888 memset(&hpp, 0, sizeof(hpp)); 1937 memset(&hpp, 0, sizeof(hpp));
1889 ret = pci_get_hp_params(dev, &hpp); 1938 ret = pci_get_hp_params(dev, &hpp);
@@ -1903,10 +1952,11 @@ static void pci_release_capabilities(struct pci_dev *dev)
1903} 1952}
1904 1953
1905/** 1954/**
1906 * pci_release_dev - free a pci device structure when all users of it are finished. 1955 * pci_release_dev - Free a PCI device structure when all users of it are
1956 * finished
1907 * @dev: device that's been disconnected 1957 * @dev: device that's been disconnected
1908 * 1958 *
1909 * Will be called only by the device core when all users of this pci device are 1959 * Will be called only by the device core when all users of this PCI device are
1910 * done. 1960 * done.
1911 */ 1961 */
1912static void pci_release_dev(struct device *dev) 1962static void pci_release_dev(struct device *dev)
@@ -1994,7 +2044,7 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1994 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 2044 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1995 return false; 2045 return false;
1996 2046
1997 /* some broken boards return 0 or ~0 if a slot is empty: */ 2047 /* Some broken boards return 0 or ~0 if a slot is empty: */
1998 if (*l == 0xffffffff || *l == 0x00000000 || 2048 if (*l == 0xffffffff || *l == 0x00000000 ||
1999 *l == 0x0000ffff || *l == 0xffff0000) 2049 *l == 0x0000ffff || *l == 0xffff0000)
2000 return false; 2050 return false;
@@ -2007,8 +2057,8 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2007EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 2057EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2008 2058
2009/* 2059/*
2010 * Read the config data for a PCI device, sanity-check it 2060 * Read the config data for a PCI device, sanity-check it,
2011 * and fill in the dev structure... 2061 * and fill in the dev structure.
2012 */ 2062 */
2013static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 2063static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2014{ 2064{
@@ -2074,7 +2124,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
2074} 2124}
2075 2125
2076/* 2126/*
2077 * This is the equivalent of pci_host_bridge_msi_domain that acts on 2127 * This is the equivalent of pci_host_bridge_msi_domain() that acts on
2078 * devices. Firmware interfaces that can select the MSI domain on a 2128 * devices. Firmware interfaces that can select the MSI domain on a
2079 * per-device basis should be called from here. 2129 * per-device basis should be called from here.
2080 */ 2130 */
@@ -2083,7 +2133,7 @@ static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2083 struct irq_domain *d; 2133 struct irq_domain *d;
2084 2134
2085 /* 2135 /*
2086 * If a domain has been set through the pcibios_add_device 2136 * If a domain has been set through the pcibios_add_device()
2087 * callback, then this is the one (platform code knows best). 2137 * callback, then this is the one (platform code knows best).
2088 */ 2138 */
2089 d = dev_get_msi_domain(&dev->dev); 2139 d = dev_get_msi_domain(&dev->dev);
@@ -2137,10 +2187,10 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2137 /* Fix up broken headers */ 2187 /* Fix up broken headers */
2138 pci_fixup_device(pci_fixup_header, dev); 2188 pci_fixup_device(pci_fixup_header, dev);
2139 2189
2140 /* moved out from quirk header fixup code */ 2190 /* Moved out from quirk header fixup code */
2141 pci_reassigndev_resource_alignment(dev); 2191 pci_reassigndev_resource_alignment(dev);
2142 2192
2143 /* Clear the state_saved flag. */ 2193 /* Clear the state_saved flag */
2144 dev->state_saved = false; 2194 dev->state_saved = false;
2145 2195
2146 /* Initialize various capabilities */ 2196 /* Initialize various capabilities */
@@ -2157,7 +2207,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2157 ret = pcibios_add_device(dev); 2207 ret = pcibios_add_device(dev);
2158 WARN_ON(ret < 0); 2208 WARN_ON(ret < 0);
2159 2209
2160 /* Setup MSI irq domain */ 2210 /* Set up MSI IRQ domain */
2161 pci_set_msi_domain(dev); 2211 pci_set_msi_domain(dev);
2162 2212
2163 /* Notifier could use PCI capabilities */ 2213 /* Notifier could use PCI capabilities */
@@ -2216,29 +2266,34 @@ static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2216 2266
2217static int only_one_child(struct pci_bus *bus) 2267static int only_one_child(struct pci_bus *bus)
2218{ 2268{
2219 struct pci_dev *parent = bus->self; 2269 struct pci_dev *bridge = bus->self;
2220 2270
2221 if (!parent || !pci_is_pcie(parent)) 2271 /*
2272 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2273 * we scan for all possible devices, not just Device 0.
2274 */
2275 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2222 return 0; 2276 return 0;
2223 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2224 return 1;
2225 2277
2226 /* 2278 /*
2227 * PCIe downstream ports are bridges that normally lead to only a 2279 * A PCIe Downstream Port normally leads to a Link with only Device
2228 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all 2280 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2229 * possible devices, not just device 0. See PCIe spec r3.0, 2281 * only for Device 0 in that situation.
2230 * sec 7.3.1. 2282 *
2283 * Checking has_secondary_link is a hack to identify Downstream
2284 * Ports because sometimes Switches are configured such that the
2285 * PCIe Port Type labels are backwards.
2231 */ 2286 */
2232 if (parent->has_secondary_link && 2287 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link)
2233 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2234 return 1; 2288 return 1;
2289
2235 return 0; 2290 return 0;
2236} 2291}
2237 2292
2238/** 2293/**
2239 * pci_scan_slot - scan a PCI slot on a bus for devices. 2294 * pci_scan_slot - Scan a PCI slot on a bus for devices
2240 * @bus: PCI bus to scan 2295 * @bus: PCI bus to scan
2241 * @devfn: slot number to scan (must have zero function.) 2296 * @devfn: slot number to scan (must have zero function)
2242 * 2297 *
2243 * Scan a PCI slot on the specified PCI bus for devices, adding 2298 * Scan a PCI slot on the specified PCI bus for devices, adding
2244 * discovered devices to the @bus->devices list. New devices 2299 * discovered devices to the @bus->devices list. New devices
@@ -2269,7 +2324,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
2269 } 2324 }
2270 } 2325 }
2271 2326
2272 /* only one slot has pcie device */ 2327 /* Only one slot has PCIe device */
2273 if (bus->self && nr) 2328 if (bus->self && nr)
2274 pcie_aspm_init_link_state(bus->self); 2329 pcie_aspm_init_link_state(bus->self);
2275 2330
@@ -2318,7 +2373,9 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
2318 2373
2319 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 2374 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2320 dev->bus->self) 2375 dev->bus->self)
2321 /* For "Performance", the assumption is made that 2376
2377 /*
2378 * For "Performance", the assumption is made that
2322 * downstream communication will never be larger than 2379 * downstream communication will never be larger than
2323 * the MRRS. So, the MPS only needs to be configured 2380 * the MRRS. So, the MPS only needs to be configured
2324 * for the upstream communication. This being the case, 2381 * for the upstream communication. This being the case,
@@ -2335,27 +2392,30 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
2335 2392
2336 rc = pcie_set_mps(dev, mps); 2393 rc = pcie_set_mps(dev, mps);
2337 if (rc) 2394 if (rc)
2338 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 2395 pci_err(dev, "Failed attempting to set the MPS\n");
2339} 2396}
2340 2397
2341static void pcie_write_mrrs(struct pci_dev *dev) 2398static void pcie_write_mrrs(struct pci_dev *dev)
2342{ 2399{
2343 int rc, mrrs; 2400 int rc, mrrs;
2344 2401
2345 /* In the "safe" case, do not configure the MRRS. There appear to be 2402 /*
2403 * In the "safe" case, do not configure the MRRS. There appear to be
2346 * issues with setting MRRS to 0 on a number of devices. 2404 * issues with setting MRRS to 0 on a number of devices.
2347 */ 2405 */
2348 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 2406 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2349 return; 2407 return;
2350 2408
2351 /* For Max performance, the MRRS must be set to the largest supported 2409 /*
2410 * For max performance, the MRRS must be set to the largest supported
2352 * value. However, it cannot be configured larger than the MPS the 2411 * value. However, it cannot be configured larger than the MPS the
2353 * device or the bus can support. This should already be properly 2412 * device or the bus can support. This should already be properly
2354 * configured by a prior call to pcie_write_mps. 2413 * configured by a prior call to pcie_write_mps().
2355 */ 2414 */
2356 mrrs = pcie_get_mps(dev); 2415 mrrs = pcie_get_mps(dev);
2357 2416
2358 /* MRRS is a R/W register. Invalid values can be written, but a 2417 /*
2418 * MRRS is a R/W register. Invalid values can be written, but a
2359 * subsequent read will verify if the value is acceptable or not. 2419 * subsequent read will verify if the value is acceptable or not.
2360 * If the MRRS value provided is not acceptable (e.g., too large), 2420 * If the MRRS value provided is not acceptable (e.g., too large),
2361 * shrink the value until it is acceptable to the HW. 2421 * shrink the value until it is acceptable to the HW.
@@ -2365,12 +2425,12 @@ static void pcie_write_mrrs(struct pci_dev *dev)
2365 if (!rc) 2425 if (!rc)
2366 break; 2426 break;
2367 2427
2368 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 2428 pci_warn(dev, "Failed attempting to set the MRRS\n");
2369 mrrs /= 2; 2429 mrrs /= 2;
2370 } 2430 }
2371 2431
2372 if (mrrs < 128) 2432 if (mrrs < 128)
2373 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n"); 2433 pci_err(dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
2374} 2434}
2375 2435
2376static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 2436static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
@@ -2390,14 +2450,15 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2390 pcie_write_mps(dev, mps); 2450 pcie_write_mps(dev, mps);
2391 pcie_write_mrrs(dev); 2451 pcie_write_mrrs(dev);
2392 2452
2393 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n", 2453 pci_info(dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2394 pcie_get_mps(dev), 128 << dev->pcie_mpss, 2454 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2395 orig_mps, pcie_get_readrq(dev)); 2455 orig_mps, pcie_get_readrq(dev));
2396 2456
2397 return 0; 2457 return 0;
2398} 2458}
2399 2459
2400/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 2460/*
2461 * pcie_bus_configure_settings() requires that pci_walk_bus work in a top-down,
2401 * parents then children fashion. If this changes, then this code will not 2462 * parents then children fashion. If this changes, then this code will not
2402 * work as designed. 2463 * work as designed.
2403 */ 2464 */
@@ -2411,7 +2472,8 @@ void pcie_bus_configure_settings(struct pci_bus *bus)
2411 if (!pci_is_pcie(bus->self)) 2472 if (!pci_is_pcie(bus->self))
2412 return; 2473 return;
2413 2474
2414 /* FIXME - Peer to peer DMA is possible, though the endpoint would need 2475 /*
2476 * FIXME - Peer to peer DMA is possible, though the endpoint would need
2415 * to be aware of the MPS of the destination. To work around this, 2477 * to be aware of the MPS of the destination. To work around this,
2416 * simply force the MPS of the entire system to the smallest possible. 2478 * simply force the MPS of the entire system to the smallest possible.
2417 */ 2479 */
@@ -2465,7 +2527,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2465 for (devfn = 0; devfn < 0x100; devfn += 8) 2527 for (devfn = 0; devfn < 0x100; devfn += 8)
2466 pci_scan_slot(bus, devfn); 2528 pci_scan_slot(bus, devfn);
2467 2529
2468 /* Reserve buses for SR-IOV capability. */ 2530 /* Reserve buses for SR-IOV capability */
2469 used_buses = pci_iov_bus_range(bus); 2531 used_buses = pci_iov_bus_range(bus);
2470 max += used_buses; 2532 max += used_buses;
2471 2533
@@ -2507,6 +2569,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2507 unsigned int buses = 0; 2569 unsigned int buses = 0;
2508 2570
2509 if (!hotplug_bridges && normal_bridges == 1) { 2571 if (!hotplug_bridges && normal_bridges == 1) {
2572
2510 /* 2573 /*
2511 * There is only one bridge on the bus (upstream 2574 * There is only one bridge on the bus (upstream
2512 * port) so it gets all available buses which it 2575 * port) so it gets all available buses which it
@@ -2515,6 +2578,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2515 */ 2578 */
2516 buses = available_buses; 2579 buses = available_buses;
2517 } else if (dev->is_hotplug_bridge) { 2580 } else if (dev->is_hotplug_bridge) {
2581
2518 /* 2582 /*
2519 * Distribute the extra buses between hotplug 2583 * Distribute the extra buses between hotplug
2520 * bridges if any. 2584 * bridges if any.
@@ -2573,8 +2637,8 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
2573EXPORT_SYMBOL_GPL(pci_scan_child_bus); 2637EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2574 2638
2575/** 2639/**
2576 * pcibios_root_bridge_prepare - Platform-specific host bridge setup. 2640 * pcibios_root_bridge_prepare - Platform-specific host bridge setup
2577 * @bridge: Host bridge to set up. 2641 * @bridge: Host bridge to set up
2578 * 2642 *
2579 * Default empty implementation. Replace with an architecture-specific setup 2643 * Default empty implementation. Replace with an architecture-specific setup
2580 * routine, if necessary. 2644 * routine, if necessary.
@@ -2621,6 +2685,39 @@ err_out:
2621} 2685}
2622EXPORT_SYMBOL_GPL(pci_create_root_bus); 2686EXPORT_SYMBOL_GPL(pci_create_root_bus);
2623 2687
2688int pci_host_probe(struct pci_host_bridge *bridge)
2689{
2690 struct pci_bus *bus, *child;
2691 int ret;
2692
2693 ret = pci_scan_root_bus_bridge(bridge);
2694 if (ret < 0) {
2695 dev_err(bridge->dev.parent, "Scanning root bridge failed");
2696 return ret;
2697 }
2698
2699 bus = bridge->bus;
2700
2701 /*
2702 * We insert PCI resources into the iomem_resource and
2703 * ioport_resource trees in either pci_bus_claim_resources()
2704 * or pci_bus_assign_resources().
2705 */
2706 if (pci_has_flag(PCI_PROBE_ONLY)) {
2707 pci_bus_claim_resources(bus);
2708 } else {
2709 pci_bus_size_bridges(bus);
2710 pci_bus_assign_resources(bus);
2711
2712 list_for_each_entry(child, &bus->children, node)
2713 pcie_bus_configure_settings(child);
2714 }
2715
2716 pci_bus_add_devices(bus);
2717 return 0;
2718}
2719EXPORT_SYMBOL_GPL(pci_host_probe);
2720
2624int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 2721int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2625{ 2722{
2626 struct resource *res = &b->busn_res; 2723 struct resource *res = &b->busn_res;
@@ -2777,7 +2874,7 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2777EXPORT_SYMBOL(pci_scan_bus); 2874EXPORT_SYMBOL(pci_scan_bus);
2778 2875
2779/** 2876/**
2780 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. 2877 * pci_rescan_bus_bridge_resize - Scan a PCI bus for devices
2781 * @bridge: PCI bridge for the bus to scan 2878 * @bridge: PCI bridge for the bus to scan
2782 * 2879 *
2783 * Scan a PCI bus and child buses for new devices, add them, 2880 * Scan a PCI bus and child buses for new devices, add them,
@@ -2802,11 +2899,11 @@ unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2802} 2899}
2803 2900
2804/** 2901/**
2805 * pci_rescan_bus - scan a PCI bus for devices. 2902 * pci_rescan_bus - Scan a PCI bus for devices
2806 * @bus: PCI bus to scan 2903 * @bus: PCI bus to scan
2807 * 2904 *
2808 * Scan a PCI bus and child buses for new devices, adds them, 2905 * Scan a PCI bus and child buses for new devices, add them,
2809 * and enables them. 2906 * and enable them.
2810 * 2907 *
2811 * Returns the max number of subordinate bus discovered. 2908 * Returns the max number of subordinate bus discovered.
2812 */ 2909 */
@@ -2875,7 +2972,7 @@ int pci_hp_add_bridge(struct pci_dev *dev)
2875 break; 2972 break;
2876 } 2973 }
2877 if (busnr-- > end) { 2974 if (busnr-- > end) {
2878 dev_err(&dev->dev, "No bus number available for hot-added bridge\n"); 2975 pci_err(dev, "No bus number available for hot-added bridge\n");
2879 return -1; 2976 return -1;
2880 } 2977 }
2881 2978
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 10684b17d0bd..fc734014206f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -19,7 +19,6 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/acpi.h> 21#include <linux/acpi.h>
22#include <linux/kallsyms.h>
23#include <linux/dmi.h> 22#include <linux/dmi.h>
24#include <linux/pci-aspm.h> 23#include <linux/pci-aspm.h>
25#include <linux/ioport.h> 24#include <linux/ioport.h>
@@ -66,7 +65,7 @@ static void quirk_passive_release(struct pci_dev *dev)
66 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { 65 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
67 pci_read_config_byte(d, 0x82, &dlc); 66 pci_read_config_byte(d, 0x82, &dlc);
68 if (!(dlc & 1<<1)) { 67 if (!(dlc & 1<<1)) {
69 dev_info(&d->dev, "PIIX3: Enabling Passive Release\n"); 68 pci_info(d, "PIIX3: Enabling Passive Release\n");
70 dlc |= 1<<1; 69 dlc |= 1<<1;
71 pci_write_config_byte(d, 0x82, dlc); 70 pci_write_config_byte(d, 0x82, dlc);
72 } 71 }
@@ -86,7 +85,7 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
86{ 85{
87 if (!isa_dma_bridge_buggy) { 86 if (!isa_dma_bridge_buggy) {
88 isa_dma_bridge_buggy = 1; 87 isa_dma_bridge_buggy = 1;
89 dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n"); 88 pci_info(dev, "Activating ISA DMA hang workarounds\n");
90 } 89 }
91} 90}
92 /* 91 /*
@@ -115,7 +114,7 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
115 pm1a = inw(pmbase); 114 pm1a = inw(pmbase);
116 115
117 if (pm1a & 0x10) { 116 if (pm1a & 0x10) {
118 dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); 117 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
119 outw(0x10, pmbase); 118 outw(0x10, pmbase);
120 } 119 }
121} 120}
@@ -127,7 +126,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk
127static void quirk_nopcipci(struct pci_dev *dev) 126static void quirk_nopcipci(struct pci_dev *dev)
128{ 127{
129 if ((pci_pci_problems & PCIPCI_FAIL) == 0) { 128 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
130 dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n"); 129 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
131 pci_pci_problems |= PCIPCI_FAIL; 130 pci_pci_problems |= PCIPCI_FAIL;
132 } 131 }
133} 132}
@@ -140,7 +139,7 @@ static void quirk_nopciamd(struct pci_dev *dev)
140 pci_read_config_byte(dev, 0x08, &rev); 139 pci_read_config_byte(dev, 0x08, &rev);
141 if (rev == 0x13) { 140 if (rev == 0x13) {
142 /* Erratum 24 */ 141 /* Erratum 24 */
143 dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n"); 142 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
144 pci_pci_problems |= PCIAGP_FAIL; 143 pci_pci_problems |= PCIAGP_FAIL;
145 } 144 }
146} 145}
@@ -152,7 +151,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopci
152static void quirk_triton(struct pci_dev *dev) 151static void quirk_triton(struct pci_dev *dev)
153{ 152{
154 if ((pci_pci_problems&PCIPCI_TRITON) == 0) { 153 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
155 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); 154 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
156 pci_pci_problems |= PCIPCI_TRITON; 155 pci_pci_problems |= PCIPCI_TRITON;
157 } 156 }
158} 157}
@@ -212,7 +211,7 @@ static void quirk_vialatency(struct pci_dev *dev)
212 busarb &= ~(1<<5); 211 busarb &= ~(1<<5);
213 busarb |= (1<<4); 212 busarb |= (1<<4);
214 pci_write_config_byte(dev, 0x76, busarb); 213 pci_write_config_byte(dev, 0x76, busarb);
215 dev_info(&dev->dev, "Applying VIA southbridge workaround\n"); 214 pci_info(dev, "Applying VIA southbridge workaround\n");
216exit: 215exit:
217 pci_dev_put(p); 216 pci_dev_put(p);
218} 217}
@@ -230,7 +229,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_viala
230static void quirk_viaetbf(struct pci_dev *dev) 229static void quirk_viaetbf(struct pci_dev *dev)
231{ 230{
232 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) { 231 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
233 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); 232 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
234 pci_pci_problems |= PCIPCI_VIAETBF; 233 pci_pci_problems |= PCIPCI_VIAETBF;
235 } 234 }
236} 235}
@@ -239,7 +238,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_via
239static void quirk_vsfx(struct pci_dev *dev) 238static void quirk_vsfx(struct pci_dev *dev)
240{ 239{
241 if ((pci_pci_problems&PCIPCI_VSFX) == 0) { 240 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
242 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); 241 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
243 pci_pci_problems |= PCIPCI_VSFX; 242 pci_pci_problems |= PCIPCI_VSFX;
244 } 243 }
245} 244}
@@ -254,7 +253,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
254static void quirk_alimagik(struct pci_dev *dev) 253static void quirk_alimagik(struct pci_dev *dev)
255{ 254{
256 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) { 255 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
257 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); 256 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
258 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; 257 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
259 } 258 }
260} 259}
@@ -268,7 +267,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagi
268static void quirk_natoma(struct pci_dev *dev) 267static void quirk_natoma(struct pci_dev *dev)
269{ 268{
270 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) { 269 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
271 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); 270 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
272 pci_pci_problems |= PCIPCI_NATOMA; 271 pci_pci_problems |= PCIPCI_NATOMA;
273 } 272 }
274} 273}
@@ -313,7 +312,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
313 r->end = PAGE_SIZE - 1; 312 r->end = PAGE_SIZE - 1;
314 r->start = 0; 313 r->start = 0;
315 r->flags |= IORESOURCE_UNSET; 314 r->flags |= IORESOURCE_UNSET;
316 dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n", 315 pci_info(dev, "expanded BAR %d to page size: %pR\n",
317 i, r); 316 i, r);
318 } 317 }
319 } 318 }
@@ -360,7 +359,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
360 bus_region.end = region + size - 1; 359 bus_region.end = region + size - 1;
361 pcibios_bus_to_resource(dev->bus, res, &bus_region); 360 pcibios_bus_to_resource(dev->bus, res, &bus_region);
362 361
363 dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", 362 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
364 name, PCI_BASE_ADDRESS_0 + (pos << 2), res); 363 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
365} 364}
366 365
@@ -381,7 +380,7 @@ static void quirk_cs5536_vsa(struct pci_dev *dev)
381 quirk_io(dev, 0, 8, name); /* SMB */ 380 quirk_io(dev, 0, 8, name); /* SMB */
382 quirk_io(dev, 1, 256, name); /* GPIO */ 381 quirk_io(dev, 1, 256, name); /* GPIO */
383 quirk_io(dev, 2, 64, name); /* MFGPT */ 382 quirk_io(dev, 2, 64, name); /* MFGPT */
384 dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", 383 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
385 name); 384 name);
386 } 385 }
387} 386}
@@ -409,7 +408,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
409 pcibios_bus_to_resource(dev->bus, res, &bus_region); 408 pcibios_bus_to_resource(dev->bus, res, &bus_region);
410 409
411 if (!pci_claim_resource(dev, nr)) 410 if (!pci_claim_resource(dev, nr))
412 dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); 411 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
413} 412}
414 413
415/* 414/*
@@ -418,7 +417,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
418 */ 417 */
419static void quirk_ati_exploding_mce(struct pci_dev *dev) 418static void quirk_ati_exploding_mce(struct pci_dev *dev)
420{ 419{
421 dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n"); 420 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
422 /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */ 421 /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
423 request_region(0x3b0, 0x0C, "RadeonIGP"); 422 request_region(0x3b0, 0x0C, "RadeonIGP");
424 request_region(0x3d3, 0x01, "RadeonIGP"); 423 request_region(0x3d3, 0x01, "RadeonIGP");
@@ -441,7 +440,7 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
441 440
442 /* Use "USB Device (not host controller)" class */ 441 /* Use "USB Device (not host controller)" class */
443 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; 442 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
444 dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", 443 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
445 class, pdev->class); 444 class, pdev->class);
446} 445}
447DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB, 446DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
@@ -488,8 +487,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
488 * let's get enough confirmation reports first. 487 * let's get enough confirmation reports first.
489 */ 488 */
490 base &= -size; 489 base &= -size;
491 dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, 490 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
492 base + size - 1);
493} 491}
494 492
495static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) 493static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
@@ -514,8 +512,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
514 * reserve it, but let's get enough confirmation reports first. 512 * reserve it, but let's get enough confirmation reports first.
515 */ 513 */
516 base &= -size; 514 base &= -size;
517 dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, 515 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
518 base + size - 1);
519} 516}
520 517
521/* 518/*
@@ -644,7 +641,7 @@ static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
644 base &= ~(size-1); 641 base &= ~(size-1);
645 642
646 /* Just print it out for now. We should reserve it after more debugging */ 643 /* Just print it out for now. We should reserve it after more debugging */
647 dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); 644 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
648} 645}
649 646
650static void quirk_ich6_lpc(struct pci_dev *dev) 647static void quirk_ich6_lpc(struct pci_dev *dev)
@@ -679,7 +676,7 @@ static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
679 mask |= 3; 676 mask |= 3;
680 677
681 /* Just print it out for now. We should reserve it after more debugging */ 678 /* Just print it out for now. We should reserve it after more debugging */
682 dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); 679 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
683} 680}
684 681
685/* ICH7-10 has the same common LPC generic IO decode registers */ 682/* ICH7-10 has the same common LPC generic IO decode registers */
@@ -758,7 +755,7 @@ static void quirk_xio2000a(struct pci_dev *dev)
758 struct pci_dev *pdev; 755 struct pci_dev *pdev;
759 u16 command; 756 u16 command;
760 757
761 dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n"); 758 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
762 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { 759 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
763 pci_read_config_word(pdev, PCI_COMMAND, &command); 760 pci_read_config_word(pdev, PCI_COMMAND, &command);
764 if (command & PCI_COMMAND_FAST_BACK) 761 if (command & PCI_COMMAND_FAST_BACK)
@@ -788,7 +785,7 @@ static void quirk_via_ioapic(struct pci_dev *dev)
788 else 785 else
789 tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ 786 tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
790 787
791 dev_info(&dev->dev, "%sbling VIA external APIC routing\n", 788 pci_info(dev, "%sbling VIA external APIC routing\n",
792 tmp == 0 ? "Disa" : "Ena"); 789 tmp == 0 ? "Disa" : "Ena");
793 790
794 /* Offset 0x58: External APIC IRQ output control */ 791 /* Offset 0x58: External APIC IRQ output control */
@@ -810,7 +807,7 @@ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
810 807
811 pci_read_config_byte(dev, 0x5B, &misc_control2); 808 pci_read_config_byte(dev, 0x5B, &misc_control2);
812 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) { 809 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
813 dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n"); 810 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
814 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT); 811 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
815 } 812 }
816} 813}
@@ -829,8 +826,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk
829static void quirk_amd_ioapic(struct pci_dev *dev) 826static void quirk_amd_ioapic(struct pci_dev *dev)
830{ 827{
831 if (dev->revision >= 0x02) { 828 if (dev->revision >= 0x02) {
832 dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n"); 829 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
833 dev_warn(&dev->dev, " : booting with the \"noapic\" option\n"); 830 pci_warn(dev, " : booting with the \"noapic\" option\n");
834 } 831 }
835} 832}
836DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); 833DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
@@ -854,7 +851,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_lin
854static void quirk_amd_8131_mmrbc(struct pci_dev *dev) 851static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
855{ 852{
856 if (dev->subordinate && dev->revision <= 0x12) { 853 if (dev->subordinate && dev->revision <= 0x12) {
857 dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n", 854 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
858 dev->revision); 855 dev->revision);
859 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC; 856 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
860 } 857 }
@@ -962,7 +959,7 @@ static void quirk_via_vlink(struct pci_dev *dev)
962 959
963 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 960 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
964 if (new_irq != irq) { 961 if (new_irq != irq) {
965 dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n", 962 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
966 irq, new_irq); 963 irq, new_irq);
967 udelay(15); /* unknown if delay really needed */ 964 udelay(15); /* unknown if delay really needed */
968 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); 965 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
@@ -1011,7 +1008,7 @@ static void quirk_amd_ordering(struct pci_dev *dev)
1011 pci_read_config_dword(dev, 0x4C, &pcic); 1008 pci_read_config_dword(dev, 0x4C, &pcic);
1012 if ((pcic & 6) != 6) { 1009 if ((pcic & 6) != 6) {
1013 pcic |= 6; 1010 pcic |= 6;
1014 dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n"); 1011 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1015 pci_write_config_dword(dev, 0x4C, pcic); 1012 pci_write_config_dword(dev, 0x4C, pcic);
1016 pci_read_config_dword(dev, 0x84, &pcic); 1013 pci_read_config_dword(dev, 0x84, &pcic);
1017 pcic |= (1 << 23); /* Required in this mode */ 1014 pcic |= (1 << 23); /* Required in this mode */
@@ -1064,7 +1061,7 @@ static void quirk_mediagx_master(struct pci_dev *dev)
1064 pci_read_config_byte(dev, 0x41, &reg); 1061 pci_read_config_byte(dev, 0x41, &reg);
1065 if (reg & 2) { 1062 if (reg & 2) {
1066 reg &= ~2; 1063 reg &= ~2;
1067 dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", 1064 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1068 reg); 1065 reg);
1069 pci_write_config_byte(dev, 0x41, reg); 1066 pci_write_config_byte(dev, 0x41, reg);
1070 } 1067 }
@@ -1087,7 +1084,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
1087 if (config & (1<<6)) { 1084 if (config & (1<<6)) {
1088 config &= ~(1<<6); 1085 config &= ~(1<<6);
1089 pci_write_config_word(pdev, 0x40, config); 1086 pci_write_config_word(pdev, 0x40, config);
1090 dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n"); 1087 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1091 } 1088 }
1092} 1089}
1093DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 1090DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
@@ -1107,7 +1104,7 @@ static void quirk_amd_ide_mode(struct pci_dev *pdev)
1107 pci_write_config_byte(pdev, 0x40, tmp); 1104 pci_write_config_byte(pdev, 0x40, tmp);
1108 1105
1109 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI; 1106 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1110 dev_info(&pdev->dev, "set SATA to AHCI mode\n"); 1107 pci_info(pdev, "set SATA to AHCI mode\n");
1111 } 1108 }
1112} 1109}
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 1110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
@@ -1145,7 +1142,7 @@ static void quirk_ide_samemode(struct pci_dev *pdev)
1145 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); 1142 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1146 1143
1147 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) { 1144 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1148 dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n"); 1145 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1149 prog &= ~5; 1146 prog &= ~5;
1150 pdev->class &= ~5; 1147 pdev->class &= ~5;
1151 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); 1148 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
@@ -1356,10 +1353,10 @@ static void asus_hides_smbus_lpc(struct pci_dev *dev)
1356 pci_write_config_word(dev, 0xF2, val & (~0x8)); 1353 pci_write_config_word(dev, 0xF2, val & (~0x8));
1357 pci_read_config_word(dev, 0xF2, &val); 1354 pci_read_config_word(dev, 0xF2, &val);
1358 if (val & 0x8) 1355 if (val & 0x8)
1359 dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", 1356 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1360 val); 1357 val);
1361 else 1358 else
1362 dev_info(&dev->dev, "Enabled i801 SMBus device\n"); 1359 pci_info(dev, "Enabled i801 SMBus device\n");
1363 } 1360 }
1364} 1361}
1365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); 1362DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
@@ -1411,7 +1408,7 @@ static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1411 return; 1408 return;
1412 iounmap(asus_rcba_base); 1409 iounmap(asus_rcba_base);
1413 asus_rcba_base = NULL; 1410 asus_rcba_base = NULL;
1414 dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); 1411 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1415} 1412}
1416 1413
1417static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) 1414static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
@@ -1433,7 +1430,7 @@ static void quirk_sis_96x_smbus(struct pci_dev *dev)
1433 u8 val = 0; 1430 u8 val = 0;
1434 pci_read_config_byte(dev, 0x77, &val); 1431 pci_read_config_byte(dev, 0x77, &val);
1435 if (val & 0x10) { 1432 if (val & 0x10) {
1436 dev_info(&dev->dev, "Enabling SiS 96x SMBus\n"); 1433 pci_info(dev, "Enabling SiS 96x SMBus\n");
1437 pci_write_config_byte(dev, 0x77, val & ~0x10); 1434 pci_write_config_byte(dev, 0x77, val & ~0x10);
1438 } 1435 }
1439} 1436}
@@ -1505,10 +1502,10 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
1505 pci_write_config_byte(dev, 0x50, val & (~0xc0)); 1502 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1506 pci_read_config_byte(dev, 0x50, &val); 1503 pci_read_config_byte(dev, 0x50, &val);
1507 if (val & 0xc0) 1504 if (val & 0xc0)
1508 dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", 1505 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1509 val); 1506 val);
1510 else 1507 else
1511 dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n"); 1508 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1512 } 1509 }
1513} 1510}
1514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); 1511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
@@ -1599,7 +1596,7 @@ static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1599{ 1596{
1600 if (dev->multifunction) { 1597 if (dev->multifunction) {
1601 device_disable_async_suspend(&dev->dev); 1598 device_disable_async_suspend(&dev->dev);
1602 dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n"); 1599 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1603 } 1600 }
1604} 1601}
1605DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend); 1602DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
@@ -1636,8 +1633,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev)
1636DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); 1633DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1637DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); 1634DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1638DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); 1635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1639DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
1640 1636
1637DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1641 1638
1642/* 1639/*
1643 * It's possible for the MSI to get corrupted if shpc and acpi 1640 * It's possible for the MSI to get corrupted if shpc and acpi
@@ -1646,7 +1643,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
1646static void quirk_pcie_pxh(struct pci_dev *dev) 1643static void quirk_pcie_pxh(struct pci_dev *dev)
1647{ 1644{
1648 dev->no_msi = 1; 1645 dev->no_msi = 1;
1649 dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n"); 1646 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1650} 1647}
1651DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh); 1648DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1652DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh); 1649DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
@@ -1692,7 +1689,7 @@ static void quirk_radeon_pm(struct pci_dev *dev)
1692 dev->subsystem_device == 0x00e2) { 1689 dev->subsystem_device == 0x00e2) {
1693 if (dev->d3_delay < 20) { 1690 if (dev->d3_delay < 20) {
1694 dev->d3_delay = 20; 1691 dev->d3_delay = 20;
1695 dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n", 1692 pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
1696 dev->d3_delay); 1693 dev->d3_delay);
1697 } 1694 }
1698 } 1695 }
@@ -1736,7 +1733,7 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1736 return; 1733 return;
1737 1734
1738 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; 1735 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1739 dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n", 1736 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1740 dev->vendor, dev->device); 1737 dev->vendor, dev->device);
1741} 1738}
1742DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); 1739DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
@@ -1779,7 +1776,7 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1779 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; 1776 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
1780 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); 1777 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
1781 1778
1782 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1779 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
1783 dev->vendor, dev->device); 1780 dev->vendor, dev->device);
1784} 1781}
1785DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1782DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
@@ -1812,7 +1809,7 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1812 1809
1813 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); 1810 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
1814 1811
1815 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1812 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
1816 dev->vendor, dev->device); 1813 dev->vendor, dev->device);
1817} 1814}
1818DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1815DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
@@ -1845,7 +1842,7 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1845 pci_config_dword &= ~AMD_813X_NOIOAMODE; 1842 pci_config_dword &= ~AMD_813X_NOIOAMODE;
1846 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); 1843 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
1847 1844
1848 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1845 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
1849 dev->vendor, dev->device); 1846 dev->vendor, dev->device);
1850} 1847}
1851DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); 1848DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
@@ -1864,12 +1861,12 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
1864 1861
1865 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); 1862 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
1866 if (!pci_config_word) { 1863 if (!pci_config_word) {
1867 dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n", 1864 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
1868 dev->vendor, dev->device); 1865 dev->vendor, dev->device);
1869 return; 1866 return;
1870 } 1867 }
1871 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); 1868 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
1872 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1869 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
1873 dev->vendor, dev->device); 1870 dev->vendor, dev->device);
1874} 1871}
1875DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); 1872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
@@ -1913,7 +1910,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev)
1913 if (pci_resource_len(dev, bar) == 0x80 && 1910 if (pci_resource_len(dev, bar) == 0x80 &&
1914 (pci_resource_start(dev, bar) & 0x80)) { 1911 (pci_resource_start(dev, bar) & 0x80)) {
1915 struct resource *r = &dev->resource[bar]; 1912 struct resource *r = &dev->resource[bar];
1916 dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n", 1913 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
1917 bar); 1914 bar);
1918 r->flags |= IORESOURCE_UNSET; 1915 r->flags |= IORESOURCE_UNSET;
1919 r->start = 0; 1916 r->start = 0;
@@ -1960,7 +1957,7 @@ static void quirk_netmos(struct pci_dev *dev)
1960 case PCI_DEVICE_ID_NETMOS_9845: 1957 case PCI_DEVICE_ID_NETMOS_9845:
1961 case PCI_DEVICE_ID_NETMOS_9855: 1958 case PCI_DEVICE_ID_NETMOS_9855:
1962 if (num_parallel) { 1959 if (num_parallel) {
1963 dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n", 1960 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
1964 dev->device, num_parallel, num_serial); 1961 dev->device, num_parallel, num_serial);
1965 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) | 1962 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
1966 (dev->class & 0xff); 1963 (dev->class & 0xff);
@@ -2046,13 +2043,13 @@ static void quirk_e100_interrupt(struct pci_dev *dev)
2046 /* Convert from PCI bus to resource space. */ 2043 /* Convert from PCI bus to resource space. */
2047 csr = ioremap(pci_resource_start(dev, 0), 8); 2044 csr = ioremap(pci_resource_start(dev, 0), 8);
2048 if (!csr) { 2045 if (!csr) {
2049 dev_warn(&dev->dev, "Can't map e100 registers\n"); 2046 pci_warn(dev, "Can't map e100 registers\n");
2050 return; 2047 return;
2051 } 2048 }
2052 2049
2053 cmd_hi = readb(csr + 3); 2050 cmd_hi = readb(csr + 3);
2054 if (cmd_hi == 0) { 2051 if (cmd_hi == 0) {
2055 dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n"); 2052 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2056 writeb(1, csr + 3); 2053 writeb(1, csr + 3);
2057 } 2054 }
2058 2055
@@ -2067,7 +2064,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2067 */ 2064 */
2068static void quirk_disable_aspm_l0s(struct pci_dev *dev) 2065static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2069{ 2066{
2070 dev_info(&dev->dev, "Disabling L0s\n"); 2067 pci_info(dev, "Disabling L0s\n");
2071 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S); 2068 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2072} 2069}
2073DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s); 2070DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
@@ -2097,7 +2094,7 @@ static void fixup_rev1_53c810(struct pci_dev *dev)
2097 return; 2094 return;
2098 2095
2099 dev->class = PCI_CLASS_STORAGE_SCSI << 8; 2096 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2100 dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n", 2097 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2101 class, dev->class); 2098 class, dev->class);
2102} 2099}
2103DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); 2100DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
@@ -2110,7 +2107,7 @@ static void quirk_p64h2_1k_io(struct pci_dev *dev)
2110 pci_read_config_word(dev, 0x40, &en1k); 2107 pci_read_config_word(dev, 0x40, &en1k);
2111 2108
2112 if (en1k & 0x200) { 2109 if (en1k & 0x200) {
2113 dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n"); 2110 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2114 dev->io_window_1k = 1; 2111 dev->io_window_1k = 1;
2115 } 2112 }
2116} 2113}
@@ -2126,7 +2123,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2126 if (pci_read_config_byte(dev, 0xf41, &b) == 0) { 2123 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2127 if (!(b & 0x20)) { 2124 if (!(b & 0x20)) {
2128 pci_write_config_byte(dev, 0xf41, b | 0x20); 2125 pci_write_config_byte(dev, 0xf41, b | 0x20);
2129 dev_info(&dev->dev, "Linking AER extended capability\n"); 2126 pci_info(dev, "Linking AER extended capability\n");
2130 } 2127 }
2131 } 2128 }
2132} 2129}
@@ -2164,7 +2161,7 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2164 /* Turn off PCI Bus Parking */ 2161 /* Turn off PCI Bus Parking */
2165 pci_write_config_byte(dev, 0x76, b ^ 0x40); 2162 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2166 2163
2167 dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n"); 2164 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2168 } 2165 }
2169 } 2166 }
2170 2167
@@ -2179,7 +2176,7 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2179 /* Disable "Read FIFO Timer" */ 2176 /* Disable "Read FIFO Timer" */
2180 pci_write_config_byte(dev, 0x77, 0x0); 2177 pci_write_config_byte(dev, 0x77, 0x0);
2181 2178
2182 dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n"); 2179 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2183 } 2180 }
2184 } 2181 }
2185} 2182}
@@ -2196,7 +2193,7 @@ static void quirk_blacklist_vpd(struct pci_dev *dev)
2196{ 2193{
2197 if (dev->vpd) { 2194 if (dev->vpd) {
2198 dev->vpd->len = 0; 2195 dev->vpd->len = 0;
2199 dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n"); 2196 pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
2200 } 2197 }
2201} 2198}
2202 2199
@@ -2312,7 +2309,7 @@ static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2312 u8 reg; 2309 u8 reg;
2313 2310
2314 if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) { 2311 if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
2315 dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n"); 2312 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2316 pci_write_config_byte(dev, 0xF4, reg | 0x02); 2313 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2317 } 2314 }
2318} 2315}
@@ -2351,7 +2348,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
2351static void quirk_disable_all_msi(struct pci_dev *dev) 2348static void quirk_disable_all_msi(struct pci_dev *dev)
2352{ 2349{
2353 pci_no_msi(); 2350 pci_no_msi();
2354 dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n"); 2351 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2355} 2352}
2356DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi); 2353DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2357DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi); 2354DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
@@ -2366,7 +2363,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2366static void quirk_disable_msi(struct pci_dev *dev) 2363static void quirk_disable_msi(struct pci_dev *dev)
2367{ 2364{
2368 if (dev->subordinate) { 2365 if (dev->subordinate) {
2369 dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n"); 2366 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2370 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 2367 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2371 } 2368 }
2372} 2369}
@@ -2406,7 +2403,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
2406 2403
2407 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2404 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2408 &flags) == 0) { 2405 &flags) == 0) {
2409 dev_info(&dev->dev, "Found %s HT MSI Mapping\n", 2406 pci_info(dev, "Found %s HT MSI Mapping\n",
2410 flags & HT_MSI_FLAGS_ENABLE ? 2407 flags & HT_MSI_FLAGS_ENABLE ?
2411 "enabled" : "disabled"); 2408 "enabled" : "disabled");
2412 return (flags & HT_MSI_FLAGS_ENABLE) != 0; 2409 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
@@ -2422,7 +2419,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
2422static void quirk_msi_ht_cap(struct pci_dev *dev) 2419static void quirk_msi_ht_cap(struct pci_dev *dev)
2423{ 2420{
2424 if (dev->subordinate && !msi_ht_cap_enabled(dev)) { 2421 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2425 dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n"); 2422 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2426 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 2423 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2427 } 2424 }
2428} 2425}
@@ -2446,7 +2443,7 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2446 if (!pdev) 2443 if (!pdev)
2447 return; 2444 return;
2448 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) { 2445 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2449 dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n"); 2446 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2450 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 2447 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2451 } 2448 }
2452 pci_dev_put(pdev); 2449 pci_dev_put(pdev);
@@ -2465,7 +2462,7 @@ static void ht_enable_msi_mapping(struct pci_dev *dev)
2465 2462
2466 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2463 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2467 &flags) == 0) { 2464 &flags) == 0) {
2468 dev_info(&dev->dev, "Enabling HT MSI Mapping\n"); 2465 pci_info(dev, "Enabling HT MSI Mapping\n");
2469 2466
2470 pci_write_config_byte(dev, pos + HT_MSI_FLAGS, 2467 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2471 flags | HT_MSI_FLAGS_ENABLE); 2468 flags | HT_MSI_FLAGS_ENABLE);
@@ -2492,7 +2489,7 @@ static void nvenet_msi_disable(struct pci_dev *dev)
2492 if (board_name && 2489 if (board_name &&
2493 (strstr(board_name, "P5N32-SLI PREMIUM") || 2490 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2494 strstr(board_name, "P5N32-E SLI"))) { 2491 strstr(board_name, "P5N32-E SLI"))) {
2495 dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n"); 2492 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2496 dev->no_msi = 1; 2493 dev->no_msi = 1;
2497 } 2494 }
2498} 2495}
@@ -2669,7 +2666,7 @@ static void ht_disable_msi_mapping(struct pci_dev *dev)
2669 2666
2670 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2667 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2671 &flags) == 0) { 2668 &flags) == 0) {
2672 dev_info(&dev->dev, "Disabling HT MSI Mapping\n"); 2669 pci_info(dev, "Disabling HT MSI Mapping\n");
2673 2670
2674 pci_write_config_byte(dev, pos + HT_MSI_FLAGS, 2671 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2675 flags & ~HT_MSI_FLAGS_ENABLE); 2672 flags & ~HT_MSI_FLAGS_ENABLE);
@@ -2699,9 +2696,10 @@ static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2699 * HT MSI mapping should be disabled on devices that are below 2696 * HT MSI mapping should be disabled on devices that are below
2700 * a non-Hypertransport host bridge. Locate the host bridge... 2697 * a non-Hypertransport host bridge. Locate the host bridge...
2701 */ 2698 */
2702 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 2699 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2700 PCI_DEVFN(0, 0));
2703 if (host_bridge == NULL) { 2701 if (host_bridge == NULL) {
2704 dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n"); 2702 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2705 return; 2703 return;
2706 } 2704 }
2707 2705
@@ -2770,7 +2768,7 @@ static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2770{ 2768{
2771 /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */ 2769 /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
2772 if (dev->revision < 0x18) { 2770 if (dev->revision < 0x18) {
2773 dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n"); 2771 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
2774 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; 2772 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2775 } 2773 }
2776} 2774}
@@ -2899,8 +2897,8 @@ static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
2899 pci_write_config_byte(dev, 0x8E, write_enable); 2897 pci_write_config_byte(dev, 0x8E, write_enable);
2900 pci_write_config_byte(dev, 0x8D, write_target); 2898 pci_write_config_byte(dev, 0x8D, write_target);
2901 2899
2902 dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n"); 2900 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
2903 dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); 2901 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
2904} 2902}
2905DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); 2903DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
2906DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); 2904DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
@@ -2935,7 +2933,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2935 pci_write_config_byte(dev, 0xe1, 0x32); 2933 pci_write_config_byte(dev, 0xe1, 0x32);
2936 pci_write_config_byte(dev, 0xfc, 0x00); 2934 pci_write_config_byte(dev, 0xfc, 0x00);
2937 2935
2938 dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); 2936 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
2939 } 2937 }
2940 2938
2941 pci_read_config_byte(dev, 0xCB, &disable); 2939 pci_read_config_byte(dev, 0xCB, &disable);
@@ -2948,8 +2946,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2948 pci_write_config_byte(dev, 0xCB, disable | 0x02); 2946 pci_write_config_byte(dev, 0xCB, disable | 0x02);
2949 pci_write_config_byte(dev, 0xCA, write_enable); 2947 pci_write_config_byte(dev, 0xCA, write_enable);
2950 2948
2951 dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); 2949 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
2952 dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); 2950 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
2953 2951
2954} 2952}
2955DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2953DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
@@ -2990,7 +2988,7 @@ static void fixup_ti816x_class(struct pci_dev *dev)
2990 2988
2991 /* TI 816x devices do not have class code set when in PCIe boot mode */ 2989 /* TI 816x devices do not have class code set when in PCIe boot mode */
2992 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8; 2990 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
2993 dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n", 2991 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
2994 class, dev->class); 2992 class, dev->class);
2995} 2993}
2996DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800, 2994DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
@@ -3032,7 +3030,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
3032 */ 3030 */
3033 err = pci_read_config_word(dev, 0x48, &rcc); 3031 err = pci_read_config_word(dev, 0x48, &rcc);
3034 if (err) { 3032 if (err) {
3035 dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n"); 3033 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3036 return; 3034 return;
3037 } 3035 }
3038 3036
@@ -3043,7 +3041,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
3043 3041
3044 err = pci_write_config_word(dev, 0x48, rcc); 3042 err = pci_write_config_word(dev, 0x48, rcc);
3045 if (err) { 3043 if (err) {
3046 dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n"); 3044 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3047 return; 3045 return;
3048 } 3046 }
3049 3047
@@ -3108,7 +3106,7 @@ static ktime_t fixup_debug_start(struct pci_dev *dev,
3108{ 3106{
3109 ktime_t calltime = 0; 3107 ktime_t calltime = 0;
3110 3108
3111 dev_dbg(&dev->dev, "calling %pF\n", fn); 3109 pci_dbg(dev, "calling %pF\n", fn);
3112 if (initcall_debug) { 3110 if (initcall_debug) {
3113 pr_debug("calling %pF @ %i for %s\n", 3111 pr_debug("calling %pF @ %i for %s\n",
3114 fn, task_pid_nr(current), dev_name(&dev->dev)); 3112 fn, task_pid_nr(current), dev_name(&dev->dev));
@@ -3150,13 +3148,13 @@ static void disable_igfx_irq(struct pci_dev *dev)
3150{ 3148{
3151 void __iomem *regs = pci_iomap(dev, 0, 0); 3149 void __iomem *regs = pci_iomap(dev, 0, 0);
3152 if (regs == NULL) { 3150 if (regs == NULL) {
3153 dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n"); 3151 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3154 return; 3152 return;
3155 } 3153 }
3156 3154
3157 /* Check if any interrupt line is still enabled */ 3155 /* Check if any interrupt line is still enabled */
3158 if (readl(regs + I915_DEIER_REG) != 0) { 3156 if (readl(regs + I915_DEIER_REG) != 0) {
3159 dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n"); 3157 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3160 3158
3161 writel(0, regs + I915_DEIER_REG); 3159 writel(0, regs + I915_DEIER_REG);
3162 } 3160 }
@@ -3215,6 +3213,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3215 quirk_broken_intx_masking); 3213 quirk_broken_intx_masking);
3216DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ 3214DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
3217 quirk_broken_intx_masking); 3215 quirk_broken_intx_masking);
3216DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
3217 quirk_broken_intx_masking);
3218 3218
3219/* 3219/*
3220 * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) 3220 * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
@@ -3317,13 +3317,13 @@ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3317 3317
3318 /* For ConnectX-4 and ConnectX-4LX, need to check FW support */ 3318 /* For ConnectX-4 and ConnectX-4LX, need to check FW support */
3319 if (pci_enable_device_mem(pdev)) { 3319 if (pci_enable_device_mem(pdev)) {
3320 dev_warn(&pdev->dev, "Can't enable device memory\n"); 3320 pci_warn(pdev, "Can't enable device memory\n");
3321 return; 3321 return;
3322 } 3322 }
3323 3323
3324 fw_ver = ioremap(pci_resource_start(pdev, 0), 4); 3324 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3325 if (!fw_ver) { 3325 if (!fw_ver) {
3326 dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n"); 3326 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3327 goto out; 3327 goto out;
3328 } 3328 }
3329 3329
@@ -3335,7 +3335,7 @@ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3335 fw_subminor = fw_sub_min & 0xffff; 3335 fw_subminor = fw_sub_min & 0xffff;
3336 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR || 3336 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3337 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) { 3337 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3338 dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n", 3338 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3339 fw_major, fw_minor, fw_subminor, pdev->device == 3339 fw_major, fw_minor, fw_subminor, pdev->device ==
3340 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14); 3340 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3341 pdev->broken_intx_masking = 1; 3341 pdev->broken_intx_masking = 1;
@@ -3473,7 +3473,7 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3473 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP)) 3473 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3474 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV))) 3474 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3475 return; 3475 return;
3476 dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n"); 3476 pci_info(dev, "quirk: cutting power to thunderbolt controller...\n");
3477 3477
3478 /* magic sequence */ 3478 /* magic sequence */
3479 acpi_execute_simple_method(SXIO, NULL, 1); 3479 acpi_execute_simple_method(SXIO, NULL, 1);
@@ -3524,7 +3524,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3524 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) 3524 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3525 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8) 3525 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3526 goto out; 3526 goto out;
3527 dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); 3527 pci_info(dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
3528 device_pm_wait_for_dev(&dev->dev, &nhi->dev); 3528 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3529out: 3529out:
3530 pci_dev_put(nhi); 3530 pci_dev_put(nhi);
@@ -3740,7 +3740,7 @@ static int reset_ivb_igd(struct pci_dev *dev, int probe)
3740 goto reset_complete; 3740 goto reset_complete;
3741 msleep(10); 3741 msleep(10);
3742 } while (time_before(jiffies, timeout)); 3742 } while (time_before(jiffies, timeout));
3743 dev_warn(&dev->dev, "timeout during reset\n"); 3743 pci_warn(dev, "timeout during reset\n");
3744 3744
3745reset_complete: 3745reset_complete:
3746 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE); 3746 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
@@ -3879,6 +3879,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3879 quirk_dma_func1_alias); 3879 quirk_dma_func1_alias);
3880DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, 3880DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3881 quirk_dma_func1_alias); 3881 quirk_dma_func1_alias);
3882DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
3883 quirk_dma_func1_alias);
3882/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ 3884/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
3883DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, 3885DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3884 quirk_dma_func1_alias); 3886 quirk_dma_func1_alias);
@@ -4012,7 +4014,7 @@ static void quirk_tw686x_class(struct pci_dev *pdev)
4012 4014
4013 /* Use "Multimedia controller" class */ 4015 /* Use "Multimedia controller" class */
4014 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01; 4016 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4015 dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n", 4017 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4016 class, pdev->class); 4018 class, pdev->class);
4017} 4019}
4018DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8, 4020DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
@@ -4032,7 +4034,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4032static void quirk_relaxedordering_disable(struct pci_dev *dev) 4034static void quirk_relaxedordering_disable(struct pci_dev *dev)
4033{ 4035{
4034 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING; 4036 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4035 dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n"); 4037 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4036} 4038}
4037 4039
4038/* 4040/*
@@ -4141,11 +4143,11 @@ static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4141 struct pci_dev *root_port = pci_find_pcie_root_port(pdev); 4143 struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
4142 4144
4143 if (!root_port) { 4145 if (!root_port) {
4144 dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n"); 4146 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4145 return; 4147 return;
4146 } 4148 }
4147 4149
4148 dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n", 4150 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4149 dev_name(&pdev->dev)); 4151 dev_name(&pdev->dev));
4150 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL, 4152 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4151 PCI_EXP_DEVCTL_RELAX_EN | 4153 PCI_EXP_DEVCTL_RELAX_EN |
@@ -4339,7 +4341,7 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4339 u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); 4341 u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
4340 int ret = acs_flags & ~flags ? 0 : 1; 4342 int ret = acs_flags & ~flags ? 0 : 1;
4341 4343
4342 dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret); 4344 pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
4343 4345
4344 return ret; 4346 return ret;
4345} 4347}
@@ -4591,7 +4593,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4591 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) { 4593 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4592 updcr = readl(rcba_mem + INTEL_UPDCR_REG); 4594 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4593 if (updcr & INTEL_UPDCR_REG_MASK) { 4595 if (updcr & INTEL_UPDCR_REG_MASK) {
4594 dev_info(&dev->dev, "Disabling UPDCR peer decodes\n"); 4596 pci_info(dev, "Disabling UPDCR peer decodes\n");
4595 updcr &= ~INTEL_UPDCR_REG_MASK; 4597 updcr &= ~INTEL_UPDCR_REG_MASK;
4596 writel(updcr, rcba_mem + INTEL_UPDCR_REG); 4598 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4597 } 4599 }
@@ -4618,7 +4620,7 @@ static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4618 */ 4620 */
4619 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc); 4621 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4620 if (!(mpc & INTEL_MPC_REG_IRBNCE)) { 4622 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4621 dev_info(&dev->dev, "Enabling MPC IRBNCE\n"); 4623 pci_info(dev, "Enabling MPC IRBNCE\n");
4622 mpc |= INTEL_MPC_REG_IRBNCE; 4624 mpc |= INTEL_MPC_REG_IRBNCE;
4623 pci_write_config_word(dev, INTEL_MPC_REG, mpc); 4625 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4624 } 4626 }
@@ -4630,7 +4632,7 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4630 return -ENOTTY; 4632 return -ENOTTY;
4631 4633
4632 if (pci_quirk_enable_intel_lpc_acs(dev)) { 4634 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4633 dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n"); 4635 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
4634 return 0; 4636 return 0;
4635 } 4637 }
4636 4638
@@ -4638,7 +4640,7 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4638 4640
4639 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK; 4641 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4640 4642
4641 dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n"); 4643 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
4642 4644
4643 return 0; 4645 return 0;
4644} 4646}
@@ -4665,7 +4667,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4665 4667
4666 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); 4668 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4667 4669
4668 dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n"); 4670 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
4669 4671
4670 return 0; 4672 return 0;
4671} 4673}
@@ -4800,7 +4802,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
4800 return; 4802 return;
4801 4803
4802 bridge->no_ext_tags = 1; 4804 bridge->no_ext_tags = 1;
4803 dev_info(&pdev->dev, "disabling Extended Tags (this device can't handle them)\n"); 4805 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
4804 4806
4805 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); 4807 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
4806} 4808}
@@ -4815,7 +4817,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
4815 */ 4817 */
4816static void quirk_no_ats(struct pci_dev *pdev) 4818static void quirk_no_ats(struct pci_dev *pdev)
4817{ 4819{
4818 dev_info(&pdev->dev, "disabling ATS (broken on this device)\n"); 4820 pci_info(pdev, "disabling ATS (broken on this device)\n");
4819 pdev->ats_cap = 0; 4821 pdev->ats_cap = 0;
4820} 4822}
4821 4823
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 9ba3fa841eb0..374a33443be9 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -93,15 +93,15 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
93 void __iomem *pds; 93 void __iomem *pds;
94 /* Standard PCI ROMs start out with these bytes 55 AA */ 94 /* Standard PCI ROMs start out with these bytes 55 AA */
95 if (readw(image) != 0xAA55) { 95 if (readw(image) != 0xAA55) {
96 dev_err(&pdev->dev, "Invalid PCI ROM header signature: expecting 0xaa55, got %#06x\n", 96 pci_info(pdev, "Invalid PCI ROM header signature: expecting 0xaa55, got %#06x\n",
97 readw(image)); 97 readw(image));
98 break; 98 break;
99 } 99 }
100 /* get the PCI data structure and check its "PCIR" signature */ 100 /* get the PCI data structure and check its "PCIR" signature */
101 pds = image + readw(image + 24); 101 pds = image + readw(image + 24);
102 if (readl(pds) != 0x52494350) { 102 if (readl(pds) != 0x52494350) {
103 dev_err(&pdev->dev, "Invalid PCI ROM data signature: expecting 0x52494350, got %#010x\n", 103 pci_info(pdev, "Invalid PCI ROM data signature: expecting 0x52494350, got %#010x\n",
104 readl(pds)); 104 readl(pds));
105 break; 105 break;
106 } 106 }
107 last_image = readb(pds + 21) & 0x80; 107 last_image = readb(pds + 21) & 0x80;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0a26648f1712..3cce29a069e6 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -68,10 +68,8 @@ static int add_to_list(struct list_head *head,
68 struct pci_dev_resource *tmp; 68 struct pci_dev_resource *tmp;
69 69
70 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 70 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
71 if (!tmp) { 71 if (!tmp)
72 pr_warn("add_to_list: kmalloc() failed!\n");
73 return -ENOMEM; 72 return -ENOMEM;
74 }
75 73
76 tmp->res = res; 74 tmp->res = res;
77 tmp->dev = dev; 75 tmp->dev = dev;
@@ -153,7 +151,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
153 151
154 r_align = pci_resource_alignment(dev, r); 152 r_align = pci_resource_alignment(dev, r);
155 if (!r_align) { 153 if (!r_align) {
156 dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n", 154 pci_warn(dev, "BAR %d: %pR has bogus alignment\n",
157 i, r); 155 i, r);
158 continue; 156 continue;
159 } 157 }
@@ -261,7 +259,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
261 (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); 259 (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
262 if (pci_reassign_resource(add_res->dev, idx, 260 if (pci_reassign_resource(add_res->dev, idx,
263 add_size, align)) 261 add_size, align))
264 dev_printk(KERN_DEBUG, &add_res->dev->dev, 262 pci_printk(KERN_DEBUG, add_res->dev,
265 "failed to add %llx res[%d]=%pR\n", 263 "failed to add %llx res[%d]=%pR\n",
266 (unsigned long long)add_size, 264 (unsigned long long)add_size,
267 idx, res); 265 idx, res);
@@ -520,7 +518,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
520 struct resource *res; 518 struct resource *res;
521 struct pci_bus_region region; 519 struct pci_bus_region region;
522 520
523 dev_info(&bridge->dev, "CardBus bridge to %pR\n", 521 pci_info(bridge, "CardBus bridge to %pR\n",
524 &bus->busn_res); 522 &bus->busn_res);
525 523
526 res = bus->resource[0]; 524 res = bus->resource[0];
@@ -530,7 +528,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
530 * The IO resource is allocated a range twice as large as it 528 * The IO resource is allocated a range twice as large as it
531 * would normally need. This allows us to set both IO regs. 529 * would normally need. This allows us to set both IO regs.
532 */ 530 */
533 dev_info(&bridge->dev, " bridge window %pR\n", res); 531 pci_info(bridge, " bridge window %pR\n", res);
534 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, 532 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
535 region.start); 533 region.start);
536 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, 534 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
@@ -540,7 +538,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
540 res = bus->resource[1]; 538 res = bus->resource[1];
541 pcibios_resource_to_bus(bridge->bus, &region, res); 539 pcibios_resource_to_bus(bridge->bus, &region, res);
542 if (res->flags & IORESOURCE_IO) { 540 if (res->flags & IORESOURCE_IO) {
543 dev_info(&bridge->dev, " bridge window %pR\n", res); 541 pci_info(bridge, " bridge window %pR\n", res);
544 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, 542 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
545 region.start); 543 region.start);
546 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, 544 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
@@ -550,7 +548,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
550 res = bus->resource[2]; 548 res = bus->resource[2];
551 pcibios_resource_to_bus(bridge->bus, &region, res); 549 pcibios_resource_to_bus(bridge->bus, &region, res);
552 if (res->flags & IORESOURCE_MEM) { 550 if (res->flags & IORESOURCE_MEM) {
553 dev_info(&bridge->dev, " bridge window %pR\n", res); 551 pci_info(bridge, " bridge window %pR\n", res);
554 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, 552 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
555 region.start); 553 region.start);
556 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, 554 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
@@ -560,7 +558,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
560 res = bus->resource[3]; 558 res = bus->resource[3];
561 pcibios_resource_to_bus(bridge->bus, &region, res); 559 pcibios_resource_to_bus(bridge->bus, &region, res);
562 if (res->flags & IORESOURCE_MEM) { 560 if (res->flags & IORESOURCE_MEM) {
563 dev_info(&bridge->dev, " bridge window %pR\n", res); 561 pci_info(bridge, " bridge window %pR\n", res);
564 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, 562 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
565 region.start); 563 region.start);
566 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, 564 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
@@ -603,7 +601,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
603 l = ((u16) io_limit_lo << 8) | io_base_lo; 601 l = ((u16) io_limit_lo << 8) | io_base_lo;
604 /* Set up upper 16 bits of I/O base/limit. */ 602 /* Set up upper 16 bits of I/O base/limit. */
605 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 603 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
606 dev_info(&bridge->dev, " bridge window %pR\n", res); 604 pci_info(bridge, " bridge window %pR\n", res);
607 } else { 605 } else {
608 /* Clear upper 16 bits of I/O base/limit. */ 606 /* Clear upper 16 bits of I/O base/limit. */
609 io_upper16 = 0; 607 io_upper16 = 0;
@@ -629,7 +627,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
629 if (res->flags & IORESOURCE_MEM) { 627 if (res->flags & IORESOURCE_MEM) {
630 l = (region.start >> 16) & 0xfff0; 628 l = (region.start >> 16) & 0xfff0;
631 l |= region.end & 0xfff00000; 629 l |= region.end & 0xfff00000;
632 dev_info(&bridge->dev, " bridge window %pR\n", res); 630 pci_info(bridge, " bridge window %pR\n", res);
633 } else { 631 } else {
634 l = 0x0000fff0; 632 l = 0x0000fff0;
635 } 633 }
@@ -658,7 +656,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
658 bu = upper_32_bits(region.start); 656 bu = upper_32_bits(region.start);
659 lu = upper_32_bits(region.end); 657 lu = upper_32_bits(region.end);
660 } 658 }
661 dev_info(&bridge->dev, " bridge window %pR\n", res); 659 pci_info(bridge, " bridge window %pR\n", res);
662 } else { 660 } else {
663 l = 0x0000fff0; 661 l = 0x0000fff0;
664 } 662 }
@@ -673,7 +671,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
673{ 671{
674 struct pci_dev *bridge = bus->self; 672 struct pci_dev *bridge = bus->self;
675 673
676 dev_info(&bridge->dev, "PCI bridge to %pR\n", 674 pci_info(bridge, "PCI bridge to %pR\n",
677 &bus->busn_res); 675 &bus->busn_res);
678 676
679 if (type & IORESOURCE_IO) 677 if (type & IORESOURCE_IO)
@@ -945,7 +943,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
945 resource_size(b_res), min_align); 943 resource_size(b_res), min_align);
946 if (!size0 && !size1) { 944 if (!size0 && !size1) {
947 if (b_res->start || b_res->end) 945 if (b_res->start || b_res->end)
948 dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n", 946 pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
949 b_res, &bus->busn_res); 947 b_res, &bus->busn_res);
950 b_res->flags = 0; 948 b_res->flags = 0;
951 return; 949 return;
@@ -957,7 +955,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
957 if (size1 > size0 && realloc_head) { 955 if (size1 > size0 && realloc_head) {
958 add_to_list(realloc_head, bus->self, b_res, size1-size0, 956 add_to_list(realloc_head, bus->self, b_res, size1-size0,
959 min_align); 957 min_align);
960 dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n", 958 pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx\n",
961 b_res, &bus->busn_res, 959 b_res, &bus->busn_res,
962 (unsigned long long)size1-size0); 960 (unsigned long long)size1-size0);
963 } 961 }
@@ -1062,7 +1060,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
1062 if (order < 0) 1060 if (order < 0)
1063 order = 0; 1061 order = 0;
1064 if (order >= ARRAY_SIZE(aligns)) { 1062 if (order >= ARRAY_SIZE(aligns)) {
1065 dev_warn(&dev->dev, "disabling BAR %d: %pR (bad alignment %#llx)\n", 1063 pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
1066 i, r, (unsigned long long) align); 1064 i, r, (unsigned long long) align);
1067 r->flags = 0; 1065 r->flags = 0;
1068 continue; 1066 continue;
@@ -1094,7 +1092,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
1094 resource_size(b_res), add_align); 1092 resource_size(b_res), add_align);
1095 if (!size0 && !size1) { 1093 if (!size0 && !size1) {
1096 if (b_res->start || b_res->end) 1094 if (b_res->start || b_res->end)
1097 dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n", 1095 pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1098 b_res, &bus->busn_res); 1096 b_res, &bus->busn_res);
1099 b_res->flags = 0; 1097 b_res->flags = 0;
1100 return 0; 1098 return 0;
@@ -1104,7 +1102,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
1104 b_res->flags |= IORESOURCE_STARTALIGN; 1102 b_res->flags |= IORESOURCE_STARTALIGN;
1105 if (size1 > size0 && realloc_head) { 1103 if (size1 > size0 && realloc_head) {
1106 add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); 1104 add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
1107 dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx add_align %llx\n", 1105 pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
1108 b_res, &bus->busn_res, 1106 b_res, &bus->busn_res,
1109 (unsigned long long) (size1 - size0), 1107 (unsigned long long) (size1 - size0),
1110 (unsigned long long) add_align); 1108 (unsigned long long) add_align);
@@ -1408,7 +1406,7 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
1408 break; 1406 break;
1409 1407
1410 default: 1408 default:
1411 dev_info(&dev->dev, "not setting up bridge for bus %04x:%02x\n", 1409 pci_info(dev, "not setting up bridge for bus %04x:%02x\n",
1412 pci_domain_nr(b), b->number); 1410 pci_domain_nr(b), b->number);
1413 break; 1411 break;
1414 } 1412 }
@@ -1514,7 +1512,7 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
1514 break; 1512 break;
1515 1513
1516 default: 1514 default:
1517 dev_info(&bridge->dev, "not setting up bridge for bus %04x:%02x\n", 1515 pci_info(bridge, "not setting up bridge for bus %04x:%02x\n",
1518 pci_domain_nr(b), b->number); 1516 pci_domain_nr(b), b->number);
1519 break; 1517 break;
1520 } 1518 }
@@ -1572,7 +1570,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
1572 release_child_resources(r); 1570 release_child_resources(r);
1573 if (!release_resource(r)) { 1571 if (!release_resource(r)) {
1574 type = old_flags = r->flags & PCI_RES_TYPE_MASK; 1572 type = old_flags = r->flags & PCI_RES_TYPE_MASK;
1575 dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n", 1573 pci_printk(KERN_DEBUG, dev, "resource %d %pR released\n",
1576 PCI_BRIDGE_RESOURCES + idx, r); 1574 PCI_BRIDGE_RESOURCES + idx, r);
1577 /* keep the old size */ 1575 /* keep the old size */
1578 r->end = resource_size(r) - 1; 1576 r->end = resource_size(r) - 1;
@@ -1875,7 +1873,7 @@ static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
1875 return; 1873 return;
1876 1874
1877 dev_res->add_size = available - resource_size(res); 1875 dev_res->add_size = available - resource_size(res);
1878 dev_dbg(&bridge->dev, "bridge window %pR extended by %pa\n", res, 1876 pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
1879 &dev_res->add_size); 1877 &dev_res->add_size);
1880} 1878}
1881 1879
@@ -2086,7 +2084,7 @@ again:
2086enable_all: 2084enable_all:
2087 retval = pci_reenable_device(bridge); 2085 retval = pci_reenable_device(bridge);
2088 if (retval) 2086 if (retval)
2089 dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval); 2087 pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
2090 pci_set_master(bridge); 2088 pci_set_master(bridge);
2091} 2089}
2092EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); 2090EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
@@ -2120,7 +2118,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
2120 if (ret) 2118 if (ret)
2121 goto cleanup; 2119 goto cleanup;
2122 2120
2123 dev_info(&bridge->dev, "BAR %d: releasing %pR\n", 2121 pci_info(bridge, "BAR %d: releasing %pR\n",
2124 i, res); 2122 i, res);
2125 2123
2126 if (res->parent) 2124 if (res->parent)
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 9c8d81b3cf82..5ad4ee7d7b1e 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -26,7 +26,7 @@ void pci_assign_irq(struct pci_dev *dev)
26 struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus); 26 struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
27 27
28 if (!(hbrg->map_irq)) { 28 if (!(hbrg->map_irq)) {
29 dev_dbg(&dev->dev, "runtime IRQ mapping not provided by arch\n"); 29 pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
30 return; 30 return;
31 } 31 }
32 32
@@ -56,7 +56,7 @@ void pci_assign_irq(struct pci_dev *dev)
56 } 56 }
57 dev->irq = irq; 57 dev->irq = irq;
58 58
59 dev_dbg(&dev->dev, "assign IRQ: got %d\n", dev->irq); 59 pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
60 60
61 /* Always tell the device, so the driver knows what is 61 /* Always tell the device, so the driver knows what is
62 the real IRQ to use; the device does not use it. */ 62 the real IRQ to use; the device does not use it. */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index e815111f3f81..369d48d6c6f1 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -103,7 +103,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
103 pci_read_config_dword(dev, reg, &check); 103 pci_read_config_dword(dev, reg, &check);
104 104
105 if ((new ^ check) & mask) { 105 if ((new ^ check) & mask) {
106 dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n", 106 pci_err(dev, "BAR %d: error updating (%#08x != %#08x)\n",
107 resno, new, check); 107 resno, new, check);
108 } 108 }
109 109
@@ -112,7 +112,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
112 pci_write_config_dword(dev, reg + 4, new); 112 pci_write_config_dword(dev, reg + 4, new);
113 pci_read_config_dword(dev, reg + 4, &check); 113 pci_read_config_dword(dev, reg + 4, &check);
114 if (check != new) { 114 if (check != new) {
115 dev_err(&dev->dev, "BAR %d: error updating (high %#08x != %#08x)\n", 115 pci_err(dev, "BAR %d: error updating (high %#08x != %#08x)\n",
116 resno, new, check); 116 resno, new, check);
117 } 117 }
118 } 118 }
@@ -137,7 +137,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
137 struct resource *root, *conflict; 137 struct resource *root, *conflict;
138 138
139 if (res->flags & IORESOURCE_UNSET) { 139 if (res->flags & IORESOURCE_UNSET) {
140 dev_info(&dev->dev, "can't claim BAR %d %pR: no address assigned\n", 140 pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
141 resource, res); 141 resource, res);
142 return -EINVAL; 142 return -EINVAL;
143 } 143 }
@@ -152,7 +152,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
152 152
153 root = pci_find_parent_resource(dev, res); 153 root = pci_find_parent_resource(dev, res);
154 if (!root) { 154 if (!root) {
155 dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", 155 pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
156 resource, res); 156 resource, res);
157 res->flags |= IORESOURCE_UNSET; 157 res->flags |= IORESOURCE_UNSET;
158 return -EINVAL; 158 return -EINVAL;
@@ -160,7 +160,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
160 160
161 conflict = request_resource_conflict(root, res); 161 conflict = request_resource_conflict(root, res);
162 if (conflict) { 162 if (conflict) {
163 dev_info(&dev->dev, "can't claim BAR %d %pR: address conflict with %s %pR\n", 163 pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
164 resource, res, conflict->name, conflict); 164 resource, res, conflict->name, conflict);
165 res->flags |= IORESOURCE_UNSET; 165 res->flags |= IORESOURCE_UNSET;
166 return -EBUSY; 166 return -EBUSY;
@@ -172,7 +172,7 @@ EXPORT_SYMBOL(pci_claim_resource);
172 172
173void pci_disable_bridge_window(struct pci_dev *dev) 173void pci_disable_bridge_window(struct pci_dev *dev)
174{ 174{
175 dev_info(&dev->dev, "disabling bridge mem windows\n"); 175 pci_info(dev, "disabling bridge mem windows\n");
176 176
177 /* MMIO Base/Limit */ 177 /* MMIO Base/Limit */
178 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0); 178 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
@@ -221,11 +221,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
221 root = &iomem_resource; 221 root = &iomem_resource;
222 } 222 }
223 223
224 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", 224 pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
225 resno, res); 225 resno, res);
226 conflict = request_resource_conflict(root, res); 226 conflict = request_resource_conflict(root, res);
227 if (conflict) { 227 if (conflict) {
228 dev_info(&dev->dev, "BAR %d: %pR conflicts with %s %pR\n", 228 pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
229 resno, res, conflict->name, conflict); 229 resno, res, conflict->name, conflict);
230 res->start = start; 230 res->start = start;
231 res->end = end; 231 res->end = end;
@@ -324,7 +324,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
324 res->flags |= IORESOURCE_UNSET; 324 res->flags |= IORESOURCE_UNSET;
325 align = pci_resource_alignment(dev, res); 325 align = pci_resource_alignment(dev, res);
326 if (!align) { 326 if (!align) {
327 dev_info(&dev->dev, "BAR %d: can't assign %pR (bogus alignment)\n", 327 pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
328 resno, res); 328 resno, res);
329 return -EINVAL; 329 return -EINVAL;
330 } 330 }
@@ -338,19 +338,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
338 * working, which is better than just leaving it disabled. 338 * working, which is better than just leaving it disabled.
339 */ 339 */
340 if (ret < 0) { 340 if (ret < 0) {
341 dev_info(&dev->dev, "BAR %d: no space for %pR\n", resno, res); 341 pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
342 ret = pci_revert_fw_address(res, dev, resno, size); 342 ret = pci_revert_fw_address(res, dev, resno, size);
343 } 343 }
344 344
345 if (ret < 0) { 345 if (ret < 0) {
346 dev_info(&dev->dev, "BAR %d: failed to assign %pR\n", resno, 346 pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
347 res);
348 return ret; 347 return ret;
349 } 348 }
350 349
351 res->flags &= ~IORESOURCE_UNSET; 350 res->flags &= ~IORESOURCE_UNSET;
352 res->flags &= ~IORESOURCE_STARTALIGN; 351 res->flags &= ~IORESOURCE_STARTALIGN;
353 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); 352 pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
354 if (resno < PCI_BRIDGE_RESOURCES) 353 if (resno < PCI_BRIDGE_RESOURCES)
355 pci_update_resource(dev, resno); 354 pci_update_resource(dev, resno);
356 355
@@ -372,7 +371,7 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
372 flags = res->flags; 371 flags = res->flags;
373 res->flags |= IORESOURCE_UNSET; 372 res->flags |= IORESOURCE_UNSET;
374 if (!res->parent) { 373 if (!res->parent) {
375 dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n", 374 pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
376 resno, res); 375 resno, res);
377 return -EINVAL; 376 return -EINVAL;
378 } 377 }
@@ -382,14 +381,14 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
382 ret = _pci_assign_resource(dev, resno, new_size, min_align); 381 ret = _pci_assign_resource(dev, resno, new_size, min_align);
383 if (ret) { 382 if (ret) {
384 res->flags = flags; 383 res->flags = flags;
385 dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n", 384 pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
386 resno, res, (unsigned long long) addsize); 385 resno, res, (unsigned long long) addsize);
387 return ret; 386 return ret;
388 } 387 }
389 388
390 res->flags &= ~IORESOURCE_UNSET; 389 res->flags &= ~IORESOURCE_UNSET;
391 res->flags &= ~IORESOURCE_STARTALIGN; 390 res->flags &= ~IORESOURCE_STARTALIGN;
392 dev_info(&dev->dev, "BAR %d: reassigned %pR (expanded by %#llx)\n", 391 pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
393 resno, res, (unsigned long long) addsize); 392 resno, res, (unsigned long long) addsize);
394 if (resno < PCI_BRIDGE_RESOURCES) 393 if (resno < PCI_BRIDGE_RESOURCES)
395 pci_update_resource(dev, resno); 394 pci_update_resource(dev, resno);
@@ -401,7 +400,7 @@ void pci_release_resource(struct pci_dev *dev, int resno)
401{ 400{
402 struct resource *res = dev->resource + resno; 401 struct resource *res = dev->resource + resno;
403 402
404 dev_info(&dev->dev, "BAR %d: releasing %pR\n", resno, res); 403 pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
405 release_resource(res); 404 release_resource(res);
406 res->end = resource_size(res) - 1; 405 res->end = resource_size(res) - 1;
407 res->start = 0; 406 res->start = 0;
@@ -477,13 +476,13 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
477 continue; 476 continue;
478 477
479 if (r->flags & IORESOURCE_UNSET) { 478 if (r->flags & IORESOURCE_UNSET) {
480 dev_err(&dev->dev, "can't enable device: BAR %d %pR not assigned\n", 479 pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
481 i, r); 480 i, r);
482 return -EINVAL; 481 return -EINVAL;
483 } 482 }
484 483
485 if (!r->parent) { 484 if (!r->parent) {
486 dev_err(&dev->dev, "can't enable device: BAR %d %pR not claimed\n", 485 pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
487 i, r); 486 i, r);
488 return -EINVAL; 487 return -EINVAL;
489 } 488 }
@@ -495,8 +494,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
495 } 494 }
496 495
497 if (cmd != old_cmd) { 496 if (cmd != old_cmd) {
498 dev_info(&dev->dev, "enabling device (%04x -> %04x)\n", 497 pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
499 old_cmd, cmd);
500 pci_write_config_word(dev, PCI_COMMAND, cmd); 498 pci_write_config_word(dev, PCI_COMMAND, cmd);
501 } 499 }
502 return 0; 500 return 0;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 5c4c459cf092..4096c42771a2 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -698,6 +698,7 @@ static const struct event_reg {
698 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC, 698 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
699 cli_mrpc_comp_async_hdr), 699 cli_mrpc_comp_async_hdr),
700 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr), 700 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
701 EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
701 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr), 702 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
702 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr), 703 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
703 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr), 704 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
@@ -1343,6 +1344,8 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
1343 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3 1344 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
1344 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3 1345 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
1345 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3 1346 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
1347 SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
1348 SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
1346 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3 1349 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
1347 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 1350 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1348 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 1351 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 83efa001c2e7..e725f99b5479 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -28,7 +28,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
28 return -EPERM; 28 return -EPERM;
29 29
30 err = -ENODEV; 30 err = -ENODEV;
31 dev = pci_get_bus_and_slot(bus, dfn); 31 dev = pci_get_domain_bus_and_slot(0, bus, dfn);
32 if (!dev) 32 if (!dev)
33 goto error; 33 goto error;
34 34
@@ -96,7 +96,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
96 if (!capable(CAP_SYS_ADMIN)) 96 if (!capable(CAP_SYS_ADMIN))
97 return -EPERM; 97 return -EPERM;
98 98
99 dev = pci_get_bus_and_slot(bus, dfn); 99 dev = pci_get_domain_bus_and_slot(0, bus, dfn);
100 if (!dev) 100 if (!dev)
101 return -ENODEV; 101 return -ENODEV;
102 102
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
index 825c4bddb5fd..5acd9c02683a 100644
--- a/drivers/pci/vc.c
+++ b/drivers/pci/vc.c
@@ -54,7 +54,7 @@ static void pci_vc_load_arb_table(struct pci_dev *dev, int pos)
54 PCI_VC_PORT_STATUS_TABLE)) 54 PCI_VC_PORT_STATUS_TABLE))
55 return; 55 return;
56 56
57 dev_err(&dev->dev, "VC arbitration table failed to load\n"); 57 pci_err(dev, "VC arbitration table failed to load\n");
58} 58}
59 59
60/** 60/**
@@ -82,7 +82,7 @@ static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res)
82 if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE)) 82 if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE))
83 return; 83 return;
84 84
85 dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res); 85 pci_err(dev, "VC%d port arbitration table failed to load\n", res);
86} 86}
87 87
88/** 88/**
@@ -158,11 +158,11 @@ enable:
158 pci_write_config_dword(dev, ctrl_pos, ctrl); 158 pci_write_config_dword(dev, ctrl_pos, ctrl);
159 159
160 if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO)) 160 if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO))
161 dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id); 161 pci_err(dev, "VC%d negotiation stuck pending\n", id);
162 162
163 if (link && !pci_wait_for_pending(link, status_pos2, 163 if (link && !pci_wait_for_pending(link, status_pos2,
164 PCI_VC_RES_STATUS_NEGO)) 164 PCI_VC_RES_STATUS_NEGO))
165 dev_err(&link->dev, "VC%d negotiation stuck pending\n", id); 165 pci_err(link, "VC%d negotiation stuck pending\n", id);
166} 166}
167 167
168/** 168/**
@@ -192,8 +192,7 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
192 /* Sanity check buffer size for save/restore */ 192 /* Sanity check buffer size for save/restore */
193 if (buf && save_state->cap.size != 193 if (buf && save_state->cap.size !=
194 pci_vc_do_save_buffer(dev, pos, NULL, save)) { 194 pci_vc_do_save_buffer(dev, pos, NULL, save)) {
195 dev_err(&dev->dev, 195 pci_err(dev, "VC save buffer size does not match @0x%x\n", pos);
196 "VC save buffer size does not match @0x%x\n", pos);
197 return -ENOMEM; 196 return -ENOMEM;
198 } 197 }
199 198
@@ -363,14 +362,14 @@ int pci_save_vc_state(struct pci_dev *dev)
363 362
364 save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id); 363 save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
365 if (!save_state) { 364 if (!save_state) {
366 dev_err(&dev->dev, "%s buffer not found in %s\n", 365 pci_err(dev, "%s buffer not found in %s\n",
367 vc_caps[i].name, __func__); 366 vc_caps[i].name, __func__);
368 return -ENOMEM; 367 return -ENOMEM;
369 } 368 }
370 369
371 ret = pci_vc_do_save_buffer(dev, pos, save_state, true); 370 ret = pci_vc_do_save_buffer(dev, pos, save_state, true);
372 if (ret) { 371 if (ret) {
373 dev_err(&dev->dev, "%s save unsuccessful %s\n", 372 pci_err(dev, "%s save unsuccessful %s\n",
374 vc_caps[i].name, __func__); 373 vc_caps[i].name, __func__);
375 return ret; 374 return ret;
376 } 375 }
@@ -423,8 +422,7 @@ void pci_allocate_vc_save_buffers(struct pci_dev *dev)
423 422
424 len = pci_vc_do_save_buffer(dev, pos, NULL, false); 423 len = pci_vc_do_save_buffer(dev, pos, NULL, false);
425 if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len)) 424 if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len))
426 dev_err(&dev->dev, 425 pci_err(dev, "unable to preallocate %s save buffer\n",
427 "unable to preallocate %s save buffer\n",
428 vc_caps[i].name); 426 vc_caps[i].name);
429 } 427 }
430} 428}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 82fe8526ac90..8785014f656e 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -262,8 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
262 struct msi_desc *entry; 262 struct msi_desc *entry;
263 263
264 if (nvec > SH_INFO_MAX_VEC) { 264 if (nvec > SH_INFO_MAX_VEC) {
265 dev_err(&dev->dev, "too much vector for pci frontend: %x." 265 pci_err(dev, "too many vectors (0x%x) for PCI frontend:"
266 " Increase SH_INFO_MAX_VEC.\n", nvec); 266 " Increase SH_INFO_MAX_VEC\n", nvec);
267 return -EINVAL; 267 return -EINVAL;
268 } 268 }
269 269
@@ -282,7 +282,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
282 /* we get the result */ 282 /* we get the result */
283 for (i = 0; i < nvec; i++) { 283 for (i = 0; i < nvec; i++) {
284 if (op.msix_entries[i].vector <= 0) { 284 if (op.msix_entries[i].vector <= 0) {
285 dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n", 285 pci_warn(dev, "MSI-X entry %d is invalid: %d!\n",
286 i, op.msix_entries[i].vector); 286 i, op.msix_entries[i].vector);
287 err = -EINVAL; 287 err = -EINVAL;
288 vector[i] = -1; 288 vector[i] = -1;
@@ -296,7 +296,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
296 err = op.value; 296 err = op.value;
297 } 297 }
298 } else { 298 } else {
299 dev_err(&dev->dev, "enable msix get err %x\n", err); 299 pci_err(dev, "enable msix get err %x\n", err);
300 } 300 }
301 return err; 301 return err;
302} 302}
@@ -317,7 +317,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
317 317
318 /* What should do for error ? */ 318 /* What should do for error ? */
319 if (err) 319 if (err)
320 dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); 320 pci_err(dev, "pci_disable_msix get err %x\n", err);
321} 321}
322 322
323static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[]) 323static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
@@ -336,13 +336,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
336 if (likely(!err)) { 336 if (likely(!err)) {
337 vector[0] = op.value; 337 vector[0] = op.value;
338 if (op.value <= 0) { 338 if (op.value <= 0) {
339 dev_warn(&dev->dev, "MSI entry is invalid: %d!\n", 339 pci_warn(dev, "MSI entry is invalid: %d!\n",
340 op.value); 340 op.value);
341 err = -EINVAL; 341 err = -EINVAL;
342 vector[0] = -1; 342 vector[0] = -1;
343 } 343 }
344 } else { 344 } else {
345 dev_err(&dev->dev, "pci frontend enable msi failed for dev " 345 pci_err(dev, "pci frontend enable msi failed for dev "
346 "%x:%x\n", op.bus, op.devfn); 346 "%x:%x\n", op.bus, op.devfn);
347 err = -EINVAL; 347 err = -EINVAL;
348 } 348 }
@@ -561,7 +561,7 @@ static void free_root_bus_devs(struct pci_bus *bus)
561 while (!list_empty(&bus->devices)) { 561 while (!list_empty(&bus->devices)) {
562 dev = container_of(bus->devices.next, struct pci_dev, 562 dev = container_of(bus->devices.next, struct pci_dev,
563 bus_list); 563 bus_list);
564 dev_dbg(&dev->dev, "removing device\n"); 564 pci_dbg(dev, "removing device\n");
565 pci_stop_and_remove_bus_device(dev); 565 pci_stop_and_remove_bus_device(dev);
566 } 566 }
567} 567}
@@ -596,6 +596,7 @@ static pci_ers_result_t pcifront_common_process(int cmd,
596 struct pci_driver *pdrv; 596 struct pci_driver *pdrv;
597 int bus = pdev->sh_info->aer_op.bus; 597 int bus = pdev->sh_info->aer_op.bus;
598 int devfn = pdev->sh_info->aer_op.devfn; 598 int devfn = pdev->sh_info->aer_op.devfn;
599 int domain = pdev->sh_info->aer_op.domain;
599 struct pci_dev *pcidev; 600 struct pci_dev *pcidev;
600 int flag = 0; 601 int flag = 0;
601 602
@@ -604,7 +605,7 @@ static pci_ers_result_t pcifront_common_process(int cmd,
604 cmd, bus, devfn); 605 cmd, bus, devfn);
605 result = PCI_ERS_RESULT_NONE; 606 result = PCI_ERS_RESULT_NONE;
606 607
607 pcidev = pci_get_bus_and_slot(bus, devfn); 608 pcidev = pci_get_domain_bus_and_slot(domain, bus, devfn);
608 if (!pcidev || !pcidev->driver) { 609 if (!pcidev || !pcidev->driver) {
609 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); 610 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
610 pci_dev_put(pcidev); 611 pci_dev_put(pcidev);
@@ -614,8 +615,7 @@ static pci_ers_result_t pcifront_common_process(int cmd,
614 615
615 if (pdrv) { 616 if (pdrv) {
616 if (pdrv->err_handler && pdrv->err_handler->error_detected) { 617 if (pdrv->err_handler && pdrv->err_handler->error_detected) {
617 dev_dbg(&pcidev->dev, 618 pci_dbg(pcidev, "trying to call AER service\n");
618 "trying to call AER service\n");
619 if (pcidev) { 619 if (pcidev) {
620 flag = 1; 620 flag = 1;
621 switch (cmd) { 621 switch (cmd) {
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 2c2e6a3b4c7e..7b31f19ade83 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -251,8 +251,9 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp
251 struct pci_dev *pdev; 251 struct pci_dev *pdev;
252 struct device_node *dp; 252 struct device_node *dp;
253 253
254 pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], 254 pdev = pci_get_domain_bus_and_slot(0,
255 ((int *) op->oprom_array)[1]); 255 ((int *) op->oprom_array)[0],
256 ((int *) op->oprom_array)[1]);
256 257
257 dp = pci_device_to_OF_node(pdev); 258 dp = pci_device_to_OF_node(pdev);
258 data->current_node = dp; 259 data->current_node = dp;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 445b1dc5d441..a17ba1465815 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -422,7 +422,10 @@ static const char *vgacon_startup(void)
422 vga_video_port_val = VGA_CRT_DM; 422 vga_video_port_val = VGA_CRT_DM;
423 if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { 423 if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
424 static struct resource ega_console_resource = 424 static struct resource ega_console_resource =
425 { .name = "ega", .start = 0x3B0, .end = 0x3BF }; 425 { .name = "ega",
426 .flags = IORESOURCE_IO,
427 .start = 0x3B0,
428 .end = 0x3BF };
426 vga_video_type = VIDEO_TYPE_EGAM; 429 vga_video_type = VIDEO_TYPE_EGAM;
427 vga_vram_size = 0x8000; 430 vga_vram_size = 0x8000;
428 display_desc = "EGA+"; 431 display_desc = "EGA+";
@@ -430,9 +433,15 @@ static const char *vgacon_startup(void)
430 &ega_console_resource); 433 &ega_console_resource);
431 } else { 434 } else {
432 static struct resource mda1_console_resource = 435 static struct resource mda1_console_resource =
433 { .name = "mda", .start = 0x3B0, .end = 0x3BB }; 436 { .name = "mda",
437 .flags = IORESOURCE_IO,
438 .start = 0x3B0,
439 .end = 0x3BB };
434 static struct resource mda2_console_resource = 440 static struct resource mda2_console_resource =
435 { .name = "mda", .start = 0x3BF, .end = 0x3BF }; 441 { .name = "mda",
442 .flags = IORESOURCE_IO,
443 .start = 0x3BF,
444 .end = 0x3BF };
436 vga_video_type = VIDEO_TYPE_MDA; 445 vga_video_type = VIDEO_TYPE_MDA;
437 vga_vram_size = 0x2000; 446 vga_vram_size = 0x2000;
438 display_desc = "*MDA"; 447 display_desc = "*MDA";
@@ -454,15 +463,21 @@ static const char *vgacon_startup(void)
454 vga_vram_size = 0x8000; 463 vga_vram_size = 0x8000;
455 464
456 if (!screen_info.orig_video_isVGA) { 465 if (!screen_info.orig_video_isVGA) {
457 static struct resource ega_console_resource 466 static struct resource ega_console_resource =
458 = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; 467 { .name = "ega",
468 .flags = IORESOURCE_IO,
469 .start = 0x3C0,
470 .end = 0x3DF };
459 vga_video_type = VIDEO_TYPE_EGAC; 471 vga_video_type = VIDEO_TYPE_EGAC;
460 display_desc = "EGA"; 472 display_desc = "EGA";
461 request_resource(&ioport_resource, 473 request_resource(&ioport_resource,
462 &ega_console_resource); 474 &ega_console_resource);
463 } else { 475 } else {
464 static struct resource vga_console_resource 476 static struct resource vga_console_resource =
465 = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; 477 { .name = "vga+",
478 .flags = IORESOURCE_IO,
479 .start = 0x3C0,
480 .end = 0x3DF };
466 vga_video_type = VIDEO_TYPE_VGAC; 481 vga_video_type = VIDEO_TYPE_VGAC;
467 display_desc = "VGA+"; 482 display_desc = "VGA+";
468 request_resource(&ioport_resource, 483 request_resource(&ioport_resource,
@@ -494,7 +509,10 @@ static const char *vgacon_startup(void)
494 } 509 }
495 } else { 510 } else {
496 static struct resource cga_console_resource = 511 static struct resource cga_console_resource =
497 { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; 512 { .name = "cga",
513 .flags = IORESOURCE_IO,
514 .start = 0x3D4,
515 .end = 0x3D5 };
498 vga_video_type = VIDEO_TYPE_CGA; 516 vga_video_type = VIDEO_TYPE_CGA;
499 vga_vram_size = 0x2000; 517 vga_vram_size = 0x2000;
500 display_desc = "*CGA"; 518 display_desc = "*CGA";
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
index 83fec573cceb..57aff7450bce 100644
--- a/drivers/video/fbdev/intelfb/intelfbhw.c
+++ b/drivers/video/fbdev/intelfb/intelfbhw.c
@@ -181,7 +181,9 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
181 return 1; 181 return 1;
182 182
183 /* Find the bridge device. It is always 0:0.0 */ 183 /* Find the bridge device. It is always 0:0.0 */
184 if (!(bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)))) { 184 bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0,
185 PCI_DEVFN(0, 0));
186 if (!bridge_dev) {
185 ERR_MSG("cannot find bridge device\n"); 187 ERR_MSG("cannot find bridge device\n");
186 return 1; 188 return 1;
187 } 189 }
diff --git a/drivers/video/fbdev/nvidia/nv_hw.c b/drivers/video/fbdev/nvidia/nv_hw.c
index 81c80ac3c76f..8335da4ca30e 100644
--- a/drivers/video/fbdev/nvidia/nv_hw.c
+++ b/drivers/video/fbdev/nvidia/nv_hw.c
@@ -683,10 +683,11 @@ static void nForceUpdateArbitrationSettings(unsigned VClk,
683 nv10_sim_state sim_data; 683 nv10_sim_state sim_data;
684 unsigned int M, N, P, pll, MClk, NVClk, memctrl; 684 unsigned int M, N, P, pll, MClk, NVClk, memctrl;
685 struct pci_dev *dev; 685 struct pci_dev *dev;
686 int domain = pci_domain_nr(par->pci_dev->bus);
686 687
687 if ((par->Chipset & 0x0FF0) == 0x01A0) { 688 if ((par->Chipset & 0x0FF0) == 0x01A0) {
688 unsigned int uMClkPostDiv; 689 unsigned int uMClkPostDiv;
689 dev = pci_get_bus_and_slot(0, 3); 690 dev = pci_get_domain_bus_and_slot(domain, 0, 3);
690 pci_read_config_dword(dev, 0x6C, &uMClkPostDiv); 691 pci_read_config_dword(dev, 0x6C, &uMClkPostDiv);
691 uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf; 692 uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf;
692 693
@@ -694,7 +695,7 @@ static void nForceUpdateArbitrationSettings(unsigned VClk,
694 uMClkPostDiv = 4; 695 uMClkPostDiv = 4;
695 MClk = 400000 / uMClkPostDiv; 696 MClk = 400000 / uMClkPostDiv;
696 } else { 697 } else {
697 dev = pci_get_bus_and_slot(0, 5); 698 dev = pci_get_domain_bus_and_slot(domain, 0, 5);
698 pci_read_config_dword(dev, 0x4c, &MClk); 699 pci_read_config_dword(dev, 0x4c, &MClk);
699 MClk /= 1000; 700 MClk /= 1000;
700 } 701 }
@@ -707,13 +708,13 @@ static void nForceUpdateArbitrationSettings(unsigned VClk,
707 sim_data.pix_bpp = (char)pixelDepth; 708 sim_data.pix_bpp = (char)pixelDepth;
708 sim_data.enable_video = 0; 709 sim_data.enable_video = 0;
709 sim_data.enable_mp = 0; 710 sim_data.enable_mp = 0;
710 dev = pci_get_bus_and_slot(0, 1); 711 dev = pci_get_domain_bus_and_slot(domain, 0, 1);
711 pci_read_config_dword(dev, 0x7C, &sim_data.memory_type); 712 pci_read_config_dword(dev, 0x7C, &sim_data.memory_type);
712 pci_dev_put(dev); 713 pci_dev_put(dev);
713 sim_data.memory_type = (sim_data.memory_type >> 12) & 1; 714 sim_data.memory_type = (sim_data.memory_type >> 12) & 1;
714 sim_data.memory_width = 64; 715 sim_data.memory_width = 64;
715 716
716 dev = pci_get_bus_and_slot(0, 3); 717 dev = pci_get_domain_bus_and_slot(domain, 0, 3);
717 pci_read_config_dword(dev, 0, &memctrl); 718 pci_read_config_dword(dev, 0, &memctrl);
718 pci_dev_put(dev); 719 pci_dev_put(dev);
719 memctrl >>= 16; 720 memctrl >>= 16;
@@ -721,7 +722,7 @@ static void nForceUpdateArbitrationSettings(unsigned VClk,
721 if ((memctrl == 0x1A9) || (memctrl == 0x1AB) || (memctrl == 0x1ED)) { 722 if ((memctrl == 0x1A9) || (memctrl == 0x1AB) || (memctrl == 0x1ED)) {
722 u32 dimm[3]; 723 u32 dimm[3];
723 724
724 dev = pci_get_bus_and_slot(0, 2); 725 dev = pci_get_domain_bus_and_slot(domain, 0, 2);
725 pci_read_config_dword(dev, 0x40, &dimm[0]); 726 pci_read_config_dword(dev, 0x40, &dimm[0]);
726 dimm[0] = (dimm[0] >> 8) & 0x4f; 727 dimm[0] = (dimm[0] >> 8) & 0x4f;
727 pci_read_config_dword(dev, 0x44, &dimm[1]); 728 pci_read_config_dword(dev, 0x44, &dimm[1]);
diff --git a/drivers/video/fbdev/nvidia/nv_setup.c b/drivers/video/fbdev/nvidia/nv_setup.c
index 2f2e162134fa..b17acd290360 100644
--- a/drivers/video/fbdev/nvidia/nv_setup.c
+++ b/drivers/video/fbdev/nvidia/nv_setup.c
@@ -264,7 +264,8 @@ static void nv10GetConfig(struct nvidia_par *par)
264 } 264 }
265#endif 265#endif
266 266
267 dev = pci_get_bus_and_slot(0, 1); 267 dev = pci_get_domain_bus_and_slot(pci_domain_nr(par->pci_dev->bus),
268 0, 1);
268 if ((par->Chipset & 0xffff) == 0x01a0) { 269 if ((par->Chipset & 0xffff) == 0x01a0) {
269 u32 amt; 270 u32 amt;
270 271
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 1ea78bb911fb..ff8282374f37 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -780,7 +780,7 @@ static int riva_load_video_mode(struct fb_info *info)
780 else 780 else
781 newmode.misc_output |= 0x80; 781 newmode.misc_output |= 0x80;
782 782
783 rc = CalcStateExt(&par->riva, &newmode.ext, bpp, width, 783 rc = CalcStateExt(&par->riva, &newmode.ext, par->pdev, bpp, width,
784 hDisplaySize, height, dotClock); 784 hDisplaySize, height, dotClock);
785 if (rc) 785 if (rc)
786 goto out; 786 goto out;
diff --git a/drivers/video/fbdev/riva/nv_driver.c b/drivers/video/fbdev/riva/nv_driver.c
index f3694cf17e58..a3d9c66973ad 100644
--- a/drivers/video/fbdev/riva/nv_driver.c
+++ b/drivers/video/fbdev/riva/nv_driver.c
@@ -159,6 +159,7 @@ unsigned long riva_get_memlen(struct riva_par *par)
159 unsigned int chipset = par->Chipset; 159 unsigned int chipset = par->Chipset;
160 struct pci_dev* dev; 160 struct pci_dev* dev;
161 u32 amt; 161 u32 amt;
162 int domain = pci_domain_nr(par->pdev->bus);
162 163
163 switch (chip->Architecture) { 164 switch (chip->Architecture) {
164 case NV_ARCH_03: 165 case NV_ARCH_03:
@@ -226,12 +227,12 @@ unsigned long riva_get_memlen(struct riva_par *par)
226 case NV_ARCH_30: 227 case NV_ARCH_30:
227 if(chipset == NV_CHIP_IGEFORCE2) { 228 if(chipset == NV_CHIP_IGEFORCE2) {
228 229
229 dev = pci_get_bus_and_slot(0, 1); 230 dev = pci_get_domain_bus_and_slot(domain, 0, 1);
230 pci_read_config_dword(dev, 0x7C, &amt); 231 pci_read_config_dword(dev, 0x7C, &amt);
231 pci_dev_put(dev); 232 pci_dev_put(dev);
232 memlen = (((amt >> 6) & 31) + 1) * 1024; 233 memlen = (((amt >> 6) & 31) + 1) * 1024;
233 } else if (chipset == NV_CHIP_0x01F0) { 234 } else if (chipset == NV_CHIP_0x01F0) {
234 dev = pci_get_bus_and_slot(0, 1); 235 dev = pci_get_domain_bus_and_slot(domain, 0, 1);
235 pci_read_config_dword(dev, 0x84, &amt); 236 pci_read_config_dword(dev, 0x84, &amt);
236 pci_dev_put(dev); 237 pci_dev_put(dev);
237 memlen = (((amt >> 4) & 127) + 1) * 1024; 238 memlen = (((amt >> 4) & 127) + 1) * 1024;
@@ -417,6 +418,6 @@ riva_common_setup(struct riva_par *par)
417 } 418 }
418 par->riva.flatPanel = (par->FlatPanel > 0) ? TRUE : FALSE; 419 par->riva.flatPanel = (par->FlatPanel > 0) ? TRUE : FALSE;
419 420
420 RivaGetConfig(&par->riva, par->Chipset); 421 RivaGetConfig(&par->riva, par->pdev, par->Chipset);
421} 422}
422 423
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index 8bdf37f3013b..0601c13f2105 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -1108,7 +1108,8 @@ static void nForceUpdateArbitrationSettings
1108 unsigned pixelDepth, 1108 unsigned pixelDepth,
1109 unsigned *burst, 1109 unsigned *burst,
1110 unsigned *lwm, 1110 unsigned *lwm,
1111 RIVA_HW_INST *chip 1111 RIVA_HW_INST *chip,
1112 struct pci_dev *pdev
1112) 1113)
1113{ 1114{
1114 nv10_fifo_info fifo_data; 1115 nv10_fifo_info fifo_data;
@@ -1116,8 +1117,9 @@ static void nForceUpdateArbitrationSettings
1116 unsigned int M, N, P, pll, MClk, NVClk; 1117 unsigned int M, N, P, pll, MClk, NVClk;
1117 unsigned int uMClkPostDiv; 1118 unsigned int uMClkPostDiv;
1118 struct pci_dev *dev; 1119 struct pci_dev *dev;
1120 int domain = pci_domain_nr(pdev->bus);
1119 1121
1120 dev = pci_get_bus_and_slot(0, 3); 1122 dev = pci_get_domain_bus_and_slot(domain, 0, 3);
1121 pci_read_config_dword(dev, 0x6C, &uMClkPostDiv); 1123 pci_read_config_dword(dev, 0x6C, &uMClkPostDiv);
1122 pci_dev_put(dev); 1124 pci_dev_put(dev);
1123 uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf; 1125 uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf;
@@ -1132,7 +1134,7 @@ static void nForceUpdateArbitrationSettings
1132 sim_data.enable_video = 0; 1134 sim_data.enable_video = 0;
1133 sim_data.enable_mp = 0; 1135 sim_data.enable_mp = 0;
1134 1136
1135 dev = pci_get_bus_and_slot(0, 1); 1137 dev = pci_get_domain_bus_and_slot(domain, 0, 1);
1136 pci_read_config_dword(dev, 0x7C, &sim_data.memory_type); 1138 pci_read_config_dword(dev, 0x7C, &sim_data.memory_type);
1137 pci_dev_put(dev); 1139 pci_dev_put(dev);
1138 sim_data.memory_type = (sim_data.memory_type >> 12) & 1; 1140 sim_data.memory_type = (sim_data.memory_type >> 12) & 1;
@@ -1234,6 +1236,7 @@ int CalcStateExt
1234( 1236(
1235 RIVA_HW_INST *chip, 1237 RIVA_HW_INST *chip,
1236 RIVA_HW_STATE *state, 1238 RIVA_HW_STATE *state,
1239 struct pci_dev *pdev,
1237 int bpp, 1240 int bpp,
1238 int width, 1241 int width,
1239 int hDisplaySize, 1242 int hDisplaySize,
@@ -1300,7 +1303,7 @@ int CalcStateExt
1300 pixelDepth * 8, 1303 pixelDepth * 8,
1301 &(state->arbitration0), 1304 &(state->arbitration0),
1302 &(state->arbitration1), 1305 &(state->arbitration1),
1303 chip); 1306 chip, pdev);
1304 } else { 1307 } else {
1305 nv10UpdateArbitrationSettings(VClk, 1308 nv10UpdateArbitrationSettings(VClk,
1306 pixelDepth * 8, 1309 pixelDepth * 8,
@@ -2102,10 +2105,12 @@ static void nv4GetConfig
2102static void nv10GetConfig 2105static void nv10GetConfig
2103( 2106(
2104 RIVA_HW_INST *chip, 2107 RIVA_HW_INST *chip,
2108 struct pci_dev *pdev,
2105 unsigned int chipset 2109 unsigned int chipset
2106) 2110)
2107{ 2111{
2108 struct pci_dev* dev; 2112 struct pci_dev* dev;
2113 int domain = pci_domain_nr(pdev->bus);
2109 u32 amt; 2114 u32 amt;
2110 2115
2111#ifdef __BIG_ENDIAN 2116#ifdef __BIG_ENDIAN
@@ -2118,12 +2123,12 @@ static void nv10GetConfig
2118 * Fill in chip configuration. 2123 * Fill in chip configuration.
2119 */ 2124 */
2120 if(chipset == NV_CHIP_IGEFORCE2) { 2125 if(chipset == NV_CHIP_IGEFORCE2) {
2121 dev = pci_get_bus_and_slot(0, 1); 2126 dev = pci_get_domain_bus_and_slot(domain, 0, 1);
2122 pci_read_config_dword(dev, 0x7C, &amt); 2127 pci_read_config_dword(dev, 0x7C, &amt);
2123 pci_dev_put(dev); 2128 pci_dev_put(dev);
2124 chip->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024; 2129 chip->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024;
2125 } else if(chipset == NV_CHIP_0x01F0) { 2130 } else if(chipset == NV_CHIP_0x01F0) {
2126 dev = pci_get_bus_and_slot(0, 1); 2131 dev = pci_get_domain_bus_and_slot(domain, 0, 1);
2127 pci_read_config_dword(dev, 0x84, &amt); 2132 pci_read_config_dword(dev, 0x84, &amt);
2128 pci_dev_put(dev); 2133 pci_dev_put(dev);
2129 chip->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024; 2134 chip->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024;
@@ -2224,6 +2229,7 @@ static void nv10GetConfig
2224int RivaGetConfig 2229int RivaGetConfig
2225( 2230(
2226 RIVA_HW_INST *chip, 2231 RIVA_HW_INST *chip,
2232 struct pci_dev *pdev,
2227 unsigned int chipset 2233 unsigned int chipset
2228) 2234)
2229{ 2235{
@@ -2245,7 +2251,7 @@ int RivaGetConfig
2245 case NV_ARCH_10: 2251 case NV_ARCH_10:
2246 case NV_ARCH_20: 2252 case NV_ARCH_20:
2247 case NV_ARCH_30: 2253 case NV_ARCH_30:
2248 nv10GetConfig(chip, chipset); 2254 nv10GetConfig(chip, pdev, chipset);
2249 break; 2255 break;
2250 default: 2256 default:
2251 return (-1); 2257 return (-1);
diff --git a/drivers/video/fbdev/riva/riva_hw.h b/drivers/video/fbdev/riva/riva_hw.h
index c2769f73e0b2..5e7b35466b00 100644
--- a/drivers/video/fbdev/riva/riva_hw.h
+++ b/drivers/video/fbdev/riva/riva_hw.h
@@ -536,6 +536,7 @@ extern int CalcStateExt
536( 536(
537 RIVA_HW_INST *chip, 537 RIVA_HW_INST *chip,
538 RIVA_HW_STATE *state, 538 RIVA_HW_STATE *state,
539 struct pci_dev *pdev,
539 int bpp, 540 int bpp,
540 int width, 541 int width,
541 int hDisplaySize, 542 int hDisplaySize,
@@ -546,7 +547,7 @@ extern int CalcStateExt
546/* 547/*
547 * External routines. 548 * External routines.
548 */ 549 */
549int RivaGetConfig(RIVA_HW_INST *, unsigned int); 550int RivaGetConfig(RIVA_HW_INST *chip, struct pci_dev *pdev, unsigned int c);
550/* 551/*
551 * FIFO Free Count. Should attempt to yield processor if RIVA is busy. 552 * FIFO Free Count. Should attempt to yield processor if RIVA is busy.
552 */ 553 */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index bf588a05d0d0..88865e0ebf4d 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -9,8 +9,7 @@ struct pci_dev;
9struct of_phandle_args; 9struct of_phandle_args;
10struct device_node; 10struct device_node;
11 11
12#ifdef CONFIG_OF_PCI 12#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
13int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
14struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
15 unsigned int devfn); 14 unsigned int devfn);
16int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
@@ -23,11 +22,6 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
23 const char *map_name, const char *map_mask_name, 22 const char *map_name, const char *map_mask_name,
24 struct device_node **target, u32 *id_out); 23 struct device_node **target, u32 *id_out);
25#else 24#else
26static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
27{
28 return 0;
29}
30
31static inline struct device_node *of_pci_find_child_device(struct device_node *parent, 25static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
32 unsigned int devfn) 26 unsigned int devfn)
33{ 27{
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 3cc06b059017..df28af5cef21 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -24,43 +24,12 @@
24#define PCIE_LINK_STATE_CLKPM 4 24#define PCIE_LINK_STATE_CLKPM 4
25 25
26#ifdef CONFIG_PCIEASPM 26#ifdef CONFIG_PCIEASPM
27void pcie_aspm_init_link_state(struct pci_dev *pdev);
28void pcie_aspm_exit_link_state(struct pci_dev *pdev);
29void pcie_aspm_pm_state_change(struct pci_dev *pdev);
30void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
31void pci_disable_link_state(struct pci_dev *pdev, int state); 27void pci_disable_link_state(struct pci_dev *pdev, int state);
32void pci_disable_link_state_locked(struct pci_dev *pdev, int state); 28void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
33void pcie_no_aspm(void); 29void pcie_no_aspm(void);
34#else 30#else
35static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) 31static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { }
36{ 32static inline void pcie_no_aspm(void) { }
37}
38static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev)
39{
40}
41static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev)
42{
43}
44static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
45{
46}
47static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
48{
49}
50static inline void pcie_no_aspm(void)
51{
52}
53#endif 33#endif
54 34
55#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
56void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
57void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
58#else
59static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
60{
61}
62static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
63{
64}
65#endif
66#endif /* LINUX_ASPM_H */ 35#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index d1f9fdade1e0..0dd1a3f7b309 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -17,91 +17,90 @@ static inline void *
17pci_alloc_consistent(struct pci_dev *hwdev, size_t size, 17pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
18 dma_addr_t *dma_handle) 18 dma_addr_t *dma_handle)
19{ 19{
20 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); 20 return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
21} 21}
22 22
23static inline void * 23static inline void *
24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, 24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
25 dma_addr_t *dma_handle) 25 dma_addr_t *dma_handle)
26{ 26{
27 return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, 27 return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
28 size, dma_handle, GFP_ATOMIC);
29} 28}
30 29
31static inline void 30static inline void
32pci_free_consistent(struct pci_dev *hwdev, size_t size, 31pci_free_consistent(struct pci_dev *hwdev, size_t size,
33 void *vaddr, dma_addr_t dma_handle) 32 void *vaddr, dma_addr_t dma_handle)
34{ 33{
35 dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); 34 dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
36} 35}
37 36
38static inline dma_addr_t 37static inline dma_addr_t
39pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) 38pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
40{ 39{
41 return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); 40 return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
42} 41}
43 42
44static inline void 43static inline void
45pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, 44pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
46 size_t size, int direction) 45 size_t size, int direction)
47{ 46{
48 dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); 47 dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
49} 48}
50 49
51static inline dma_addr_t 50static inline dma_addr_t
52pci_map_page(struct pci_dev *hwdev, struct page *page, 51pci_map_page(struct pci_dev *hwdev, struct page *page,
53 unsigned long offset, size_t size, int direction) 52 unsigned long offset, size_t size, int direction)
54{ 53{
55 return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); 54 return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
56} 55}
57 56
58static inline void 57static inline void
59pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, 58pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
60 size_t size, int direction) 59 size_t size, int direction)
61{ 60{
62 dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); 61 dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
63} 62}
64 63
65static inline int 64static inline int
66pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, 65pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
67 int nents, int direction) 66 int nents, int direction)
68{ 67{
69 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); 68 return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
70} 69}
71 70
72static inline void 71static inline void
73pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, 72pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
74 int nents, int direction) 73 int nents, int direction)
75{ 74{
76 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); 75 dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
77} 76}
78 77
79static inline void 78static inline void
80pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, 79pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
81 size_t size, int direction) 80 size_t size, int direction)
82{ 81{
83 dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); 82 dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
84} 83}
85 84
86static inline void 85static inline void
87pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, 86pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
88 size_t size, int direction) 87 size_t size, int direction)
89{ 88{
90 dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); 89 dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
91} 90}
92 91
93static inline void 92static inline void
94pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, 93pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
95 int nelems, int direction) 94 int nelems, int direction)
96{ 95{
97 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); 96 dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
98} 97}
99 98
100static inline void 99static inline void
101pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, 100pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
102 int nelems, int direction) 101 int nelems, int direction)
103{ 102{
104 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); 103 dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
105} 104}
106 105
107static inline int 106static inline int
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 573730d490a7..a1a5e5df0f66 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -36,17 +36,20 @@ enum pci_epc_irq_type {
36 * @owner: the module owner containing the ops 36 * @owner: the module owner containing the ops
37 */ 37 */
38struct pci_epc_ops { 38struct pci_epc_ops {
39 int (*write_header)(struct pci_epc *pci_epc, 39 int (*write_header)(struct pci_epc *epc, u8 func_no,
40 struct pci_epf_header *hdr); 40 struct pci_epf_header *hdr);
41 int (*set_bar)(struct pci_epc *epc, enum pci_barno bar, 41 int (*set_bar)(struct pci_epc *epc, u8 func_no,
42 enum pci_barno bar,
42 dma_addr_t bar_phys, size_t size, int flags); 43 dma_addr_t bar_phys, size_t size, int flags);
43 void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar); 44 void (*clear_bar)(struct pci_epc *epc, u8 func_no,
44 int (*map_addr)(struct pci_epc *epc, phys_addr_t addr, 45 enum pci_barno bar);
45 u64 pci_addr, size_t size); 46 int (*map_addr)(struct pci_epc *epc, u8 func_no,
46 void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr); 47 phys_addr_t addr, u64 pci_addr, size_t size);
47 int (*set_msi)(struct pci_epc *epc, u8 interrupts); 48 void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
48 int (*get_msi)(struct pci_epc *epc); 49 phys_addr_t addr);
49 int (*raise_irq)(struct pci_epc *pci_epc, 50 int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
51 int (*get_msi)(struct pci_epc *epc, u8 func_no);
52 int (*raise_irq)(struct pci_epc *epc, u8 func_no,
50 enum pci_epc_irq_type type, u8 interrupt_num); 53 enum pci_epc_irq_type type, u8 interrupt_num);
51 int (*start)(struct pci_epc *epc); 54 int (*start)(struct pci_epc *epc);
52 void (*stop)(struct pci_epc *epc); 55 void (*stop)(struct pci_epc *epc);
@@ -121,17 +124,21 @@ void pci_epc_destroy(struct pci_epc *epc);
121int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); 124int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
122void pci_epc_linkup(struct pci_epc *epc); 125void pci_epc_linkup(struct pci_epc *epc);
123void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); 126void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
124int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr); 127int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
125int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, 128 struct pci_epf_header *hdr);
129int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
130 enum pci_barno bar,
126 dma_addr_t bar_phys, size_t size, int flags); 131 dma_addr_t bar_phys, size_t size, int flags);
127void pci_epc_clear_bar(struct pci_epc *epc, int bar); 132void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar);
128int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, 133int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
134 phys_addr_t phys_addr,
129 u64 pci_addr, size_t size); 135 u64 pci_addr, size_t size);
130void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr); 136void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
131int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts); 137 phys_addr_t phys_addr);
132int pci_epc_get_msi(struct pci_epc *epc); 138int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
133int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, 139int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
134 u8 interrupt_num); 140int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
141 enum pci_epc_irq_type type, u8 interrupt_num);
135int pci_epc_start(struct pci_epc *epc); 142int pci_epc_start(struct pci_epc *epc);
136void pci_epc_stop(struct pci_epc *epc); 143void pci_epc_stop(struct pci_epc *epc);
137struct pci_epc *pci_epc_get(const char *epc_name); 144struct pci_epc *pci_epc_get(const char *epc_name);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c170c9250c8b..43dcecd6533d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -48,17 +48,17 @@
48 * In the interest of not exposing interfaces to user-space unnecessarily, 48 * In the interest of not exposing interfaces to user-space unnecessarily,
49 * the following kernel-only defines are being added here. 49 * the following kernel-only defines are being added here.
50 */ 50 */
51#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) 51#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
52/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 52/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
53#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 53#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
54 54
55/* pci_slot represents a physical slot */ 55/* pci_slot represents a physical slot */
56struct pci_slot { 56struct pci_slot {
57 struct pci_bus *bus; /* The bus this slot is on */ 57 struct pci_bus *bus; /* Bus this slot is on */
58 struct list_head list; /* node in list of slots on this bus */ 58 struct list_head list; /* Node in list of slots */
59 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ 59 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
60 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ 60 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
61 struct kobject kobj; 61 struct kobject kobj;
62}; 62};
63 63
64static inline const char *pci_slot_name(const struct pci_slot *slot) 64static inline const char *pci_slot_name(const struct pci_slot *slot)
@@ -72,9 +72,7 @@ enum pci_mmap_state {
72 pci_mmap_mem 72 pci_mmap_mem
73}; 73};
74 74
75/* 75/* For PCI devices, the region numbers are assigned this way: */
76 * For PCI devices, the region numbers are assigned this way:
77 */
78enum { 76enum {
79 /* #0-5: standard PCI resources */ 77 /* #0-5: standard PCI resources */
80 PCI_STD_RESOURCES, 78 PCI_STD_RESOURCES,
@@ -83,23 +81,23 @@ enum {
83 /* #6: expansion ROM resource */ 81 /* #6: expansion ROM resource */
84 PCI_ROM_RESOURCE, 82 PCI_ROM_RESOURCE,
85 83
86 /* device specific resources */ 84 /* Device-specific resources */
87#ifdef CONFIG_PCI_IOV 85#ifdef CONFIG_PCI_IOV
88 PCI_IOV_RESOURCES, 86 PCI_IOV_RESOURCES,
89 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, 87 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
90#endif 88#endif
91 89
92 /* resources assigned to buses behind the bridge */ 90 /* Resources assigned to buses behind the bridge */
93#define PCI_BRIDGE_RESOURCE_NUM 4 91#define PCI_BRIDGE_RESOURCE_NUM 4
94 92
95 PCI_BRIDGE_RESOURCES, 93 PCI_BRIDGE_RESOURCES,
96 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 94 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
97 PCI_BRIDGE_RESOURCE_NUM - 1, 95 PCI_BRIDGE_RESOURCE_NUM - 1,
98 96
99 /* total resources associated with a PCI device */ 97 /* Total resources associated with a PCI device */
100 PCI_NUM_RESOURCES, 98 PCI_NUM_RESOURCES,
101 99
102 /* preserve this for compatibility */ 100 /* Preserve this for compatibility */
103 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, 101 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
104}; 102};
105 103
@@ -152,9 +150,10 @@ static inline const char *pci_power_name(pci_power_t state)
152#define PCI_PM_D3COLD_WAIT 100 150#define PCI_PM_D3COLD_WAIT 100
153#define PCI_PM_BUS_WAIT 50 151#define PCI_PM_BUS_WAIT 50
154 152
155/** The pci_channel state describes connectivity between the CPU and 153/**
156 * the pci device. If some PCI bus between here and the pci device 154 * The pci_channel state describes connectivity between the CPU and
157 * has crashed or locked up, this info is reflected here. 155 * the PCI device. If some PCI bus between here and the PCI device
156 * has crashed or locked up, this info is reflected here.
158 */ 157 */
159typedef unsigned int __bitwise pci_channel_state_t; 158typedef unsigned int __bitwise pci_channel_state_t;
160 159
@@ -184,9 +183,7 @@ enum pcie_reset_state {
184 183
185typedef unsigned short __bitwise pci_dev_flags_t; 184typedef unsigned short __bitwise pci_dev_flags_t;
186enum pci_dev_flags { 185enum pci_dev_flags {
187 /* INTX_DISABLE in PCI_COMMAND register disables MSI 186 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
188 * generation too.
189 */
190 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), 187 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
191 /* Device configuration is irrevocably lost if disabled into D3 */ 188 /* Device configuration is irrevocably lost if disabled into D3 */
192 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), 189 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
@@ -202,7 +199,7 @@ enum pci_dev_flags {
202 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 199 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
203 /* Get VPD from function 0 VPD */ 200 /* Get VPD from function 0 VPD */
204 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), 201 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
205 /* a non-root bridge where translation occurs, stop alias search here */ 202 /* A non-root bridge where translation occurs, stop alias search here */
206 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 203 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
207 /* Do not use FLR even if device advertises PCI_AF_CAP */ 204 /* Do not use FLR even if device advertises PCI_AF_CAP */
208 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 205 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
@@ -222,17 +219,17 @@ enum pci_bus_flags {
222 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, 219 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
223}; 220};
224 221
225/* These values come from the PCI Express Spec */ 222/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
226enum pcie_link_width { 223enum pcie_link_width {
227 PCIE_LNK_WIDTH_RESRV = 0x00, 224 PCIE_LNK_WIDTH_RESRV = 0x00,
228 PCIE_LNK_X1 = 0x01, 225 PCIE_LNK_X1 = 0x01,
229 PCIE_LNK_X2 = 0x02, 226 PCIE_LNK_X2 = 0x02,
230 PCIE_LNK_X4 = 0x04, 227 PCIE_LNK_X4 = 0x04,
231 PCIE_LNK_X8 = 0x08, 228 PCIE_LNK_X8 = 0x08,
232 PCIE_LNK_X12 = 0x0C, 229 PCIE_LNK_X12 = 0x0c,
233 PCIE_LNK_X16 = 0x10, 230 PCIE_LNK_X16 = 0x10,
234 PCIE_LNK_X32 = 0x20, 231 PCIE_LNK_X32 = 0x20,
235 PCIE_LNK_WIDTH_UNKNOWN = 0xFF, 232 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
236}; 233};
237 234
238/* Based on the PCI Hotplug Spec, but some values are made up by us */ 235/* Based on the PCI Hotplug Spec, but some values are made up by us */
@@ -263,15 +260,15 @@ enum pci_bus_speed {
263}; 260};
264 261
265struct pci_cap_saved_data { 262struct pci_cap_saved_data {
266 u16 cap_nr; 263 u16 cap_nr;
267 bool cap_extended; 264 bool cap_extended;
268 unsigned int size; 265 unsigned int size;
269 u32 data[0]; 266 u32 data[0];
270}; 267};
271 268
272struct pci_cap_saved_state { 269struct pci_cap_saved_state {
273 struct hlist_node next; 270 struct hlist_node next;
274 struct pci_cap_saved_data cap; 271 struct pci_cap_saved_data cap;
275}; 272};
276 273
277struct irq_affinity; 274struct irq_affinity;
@@ -280,19 +277,17 @@ struct pci_vpd;
280struct pci_sriov; 277struct pci_sriov;
281struct pci_ats; 278struct pci_ats;
282 279
283/* 280/* The pci_dev structure describes PCI devices */
284 * The pci_dev structure is used to describe PCI devices.
285 */
286struct pci_dev { 281struct pci_dev {
287 struct list_head bus_list; /* node in per-bus list */ 282 struct list_head bus_list; /* Node in per-bus list */
288 struct pci_bus *bus; /* bus this device is on */ 283 struct pci_bus *bus; /* Bus this device is on */
289 struct pci_bus *subordinate; /* bus this device bridges to */ 284 struct pci_bus *subordinate; /* Bus this device bridges to */
290 285
291 void *sysdata; /* hook for sys-specific extension */ 286 void *sysdata; /* Hook for sys-specific extension */
292 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ 287 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
293 struct pci_slot *slot; /* Physical slot this device is in */ 288 struct pci_slot *slot; /* Physical slot this device is in */
294 289
295 unsigned int devfn; /* encoded device & function index */ 290 unsigned int devfn; /* Encoded device & function index */
296 unsigned short vendor; 291 unsigned short vendor;
297 unsigned short device; 292 unsigned short device;
298 unsigned short subsystem_vendor; 293 unsigned short subsystem_vendor;
@@ -307,12 +302,12 @@ struct pci_dev {
307 u8 msi_cap; /* MSI capability offset */ 302 u8 msi_cap; /* MSI capability offset */
308 u8 msix_cap; /* MSI-X capability offset */ 303 u8 msix_cap; /* MSI-X capability offset */
309 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ 304 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
310 u8 rom_base_reg; /* which config register controls the ROM */ 305 u8 rom_base_reg; /* Config register controlling ROM */
311 u8 pin; /* which interrupt pin this device uses */ 306 u8 pin; /* Interrupt pin this device uses */
312 u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ 307 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
313 unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ 308 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
314 309
315 struct pci_driver *driver; /* which driver has allocated this device */ 310 struct pci_driver *driver; /* Driver bound to this device */
316 u64 dma_mask; /* Mask of the bits of bus address this 311 u64 dma_mask; /* Mask of the bits of bus address this
317 device implements. Normally this is 312 device implements. Normally this is
318 0xffffffff. You only need to change 313 0xffffffff. You only need to change
@@ -321,9 +316,9 @@ struct pci_dev {
321 316
322 struct device_dma_parameters dma_parms; 317 struct device_dma_parameters dma_parms;
323 318
324 pci_power_t current_state; /* Current operating state. In ACPI-speak, 319 pci_power_t current_state; /* Current operating state. In ACPI,
325 this is D0-D3, D0 being fully functional, 320 this is D0-D3, D0 being fully
326 and D3 being off. */ 321 functional, and D3 being off. */
327 u8 pm_cap; /* PM capability offset */ 322 u8 pm_cap; /* PM capability offset */
328 unsigned int pme_support:5; /* Bitmask of states from which PME# 323 unsigned int pme_support:5; /* Bitmask of states from which PME#
329 can be generated */ 324 can be generated */
@@ -334,10 +329,10 @@ struct pci_dev {
334 unsigned int no_d3cold:1; /* D3cold is forbidden */ 329 unsigned int no_d3cold:1; /* D3cold is forbidden */
335 unsigned int bridge_d3:1; /* Allow D3 for bridge */ 330 unsigned int bridge_d3:1; /* Allow D3 for bridge */
336 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ 331 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
337 unsigned int mmio_always_on:1; /* disallow turning off io/mem 332 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
338 decoding during bar sizing */ 333 decoding during BAR sizing */
339 unsigned int wakeup_prepared:1; 334 unsigned int wakeup_prepared:1;
340 unsigned int runtime_d3cold:1; /* whether go through runtime 335 unsigned int runtime_d3cold:1; /* Whether go through runtime
341 D3cold, not set for devices 336 D3cold, not set for devices
342 powered on/off by the 337 powered on/off by the
343 corresponding bridge */ 338 corresponding bridge */
@@ -350,12 +345,14 @@ struct pci_dev {
350 345
351#ifdef CONFIG_PCIEASPM 346#ifdef CONFIG_PCIEASPM
352 struct pcie_link_state *link_state; /* ASPM link state */ 347 struct pcie_link_state *link_state; /* ASPM link state */
348 unsigned int ltr_path:1; /* Latency Tolerance Reporting
349 supported from root to here */
353#endif 350#endif
354 351
355 pci_channel_state_t error_state; /* current connectivity state */ 352 pci_channel_state_t error_state; /* Current connectivity state */
356 struct device dev; /* Generic device interface */ 353 struct device dev; /* Generic device interface */
357 354
358 int cfg_size; /* Size of configuration space */ 355 int cfg_size; /* Size of config space */
359 356
360 /* 357 /*
361 * Instead of touching interrupt line and base address registers 358 * Instead of touching interrupt line and base address registers
@@ -364,47 +361,47 @@ struct pci_dev {
364 unsigned int irq; 361 unsigned int irq;
365 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ 362 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
366 363
367 bool match_driver; /* Skip attaching driver */ 364 bool match_driver; /* Skip attaching driver */
368 /* These fields are used by common fixups */ 365
369 unsigned int transparent:1; /* Subtractive decode PCI bridge */ 366 unsigned int transparent:1; /* Subtractive decode bridge */
370 unsigned int multifunction:1;/* Part of multi-function device */ 367 unsigned int multifunction:1; /* Multi-function device */
371 /* keep track of device state */ 368
372 unsigned int is_added:1; 369 unsigned int is_added:1;
373 unsigned int is_busmaster:1; /* device is busmaster */ 370 unsigned int is_busmaster:1; /* Is busmaster */
374 unsigned int no_msi:1; /* device may not use msi */ 371 unsigned int no_msi:1; /* May not use MSI */
375 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ 372 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
376 unsigned int block_cfg_access:1; /* config space access is blocked */ 373 unsigned int block_cfg_access:1; /* Config space access blocked */
377 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 374 unsigned int broken_parity_status:1; /* Generates false positive parity */
378 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 375 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
379 unsigned int msi_enabled:1; 376 unsigned int msi_enabled:1;
380 unsigned int msix_enabled:1; 377 unsigned int msix_enabled:1;
381 unsigned int ari_enabled:1; /* ARI forwarding */ 378 unsigned int ari_enabled:1; /* ARI forwarding */
382 unsigned int ats_enabled:1; /* Address Translation Service */ 379 unsigned int ats_enabled:1; /* Address Translation Svc */
383 unsigned int pasid_enabled:1; /* Process Address Space ID */ 380 unsigned int pasid_enabled:1; /* Process Address Space ID */
384 unsigned int pri_enabled:1; /* Page Request Interface */ 381 unsigned int pri_enabled:1; /* Page Request Interface */
385 unsigned int is_managed:1; 382 unsigned int is_managed:1;
386 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 383 unsigned int needs_freset:1; /* Requires fundamental reset */
387 unsigned int state_saved:1; 384 unsigned int state_saved:1;
388 unsigned int is_physfn:1; 385 unsigned int is_physfn:1;
389 unsigned int is_virtfn:1; 386 unsigned int is_virtfn:1;
390 unsigned int reset_fn:1; 387 unsigned int reset_fn:1;
391 unsigned int is_hotplug_bridge:1; 388 unsigned int is_hotplug_bridge:1;
392 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 389 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
393 unsigned int __aer_firmware_first_valid:1; 390 unsigned int __aer_firmware_first_valid:1;
394 unsigned int __aer_firmware_first:1; 391 unsigned int __aer_firmware_first:1;
395 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 392 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
396 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 393 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
397 unsigned int irq_managed:1; 394 unsigned int irq_managed:1;
398 unsigned int has_secondary_link:1; 395 unsigned int has_secondary_link:1;
399 unsigned int non_compliant_bars:1; /* broken BARs; ignore them */ 396 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
400 unsigned int is_probed:1; /* device probing in progress */ 397 unsigned int is_probed:1; /* Device probing in progress */
401 pci_dev_flags_t dev_flags; 398 pci_dev_flags_t dev_flags;
402 atomic_t enable_cnt; /* pci_enable_device has been called */ 399 atomic_t enable_cnt; /* pci_enable_device has been called */
403 400
404 u32 saved_config_space[16]; /* config space saved at suspend time */ 401 u32 saved_config_space[16]; /* Config space saved at suspend time */
405 struct hlist_head saved_cap_space; 402 struct hlist_head saved_cap_space;
406 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ 403 struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
407 int rom_attr_enabled; /* has display of the rom attribute been enabled? */ 404 int rom_attr_enabled; /* Display of ROM attribute enabled? */
408 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 405 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
409 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 406 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
410 407
@@ -419,12 +416,12 @@ struct pci_dev {
419 struct pci_vpd *vpd; 416 struct pci_vpd *vpd;
420#ifdef CONFIG_PCI_ATS 417#ifdef CONFIG_PCI_ATS
421 union { 418 union {
422 struct pci_sriov *sriov; /* SR-IOV capability related */ 419 struct pci_sriov *sriov; /* PF: SR-IOV info */
423 struct pci_dev *physfn; /* the PF this VF is associated with */ 420 struct pci_dev *physfn; /* VF: related PF */
424 }; 421 };
425 u16 ats_cap; /* ATS Capability offset */ 422 u16 ats_cap; /* ATS Capability offset */
426 u8 ats_stu; /* ATS Smallest Translation Unit */ 423 u8 ats_stu; /* ATS Smallest Translation Unit */
427 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ 424 atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
428#endif 425#endif
429#ifdef CONFIG_PCI_PRI 426#ifdef CONFIG_PCI_PRI
430 u32 pri_reqs_alloc; /* Number of PRI requests allocated */ 427 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
@@ -432,11 +429,11 @@ struct pci_dev {
432#ifdef CONFIG_PCI_PASID 429#ifdef CONFIG_PCI_PASID
433 u16 pasid_features; 430 u16 pasid_features;
434#endif 431#endif
435 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ 432 phys_addr_t rom; /* Physical address if not from BAR */
436 size_t romlen; /* Length of ROM if it's not from the BAR */ 433 size_t romlen; /* Length if not from BAR */
437 char *driver_override; /* Driver name to force a match */ 434 char *driver_override; /* Driver name to force a match */
438 435
439 unsigned long priv_flags; /* Private flags for the pci driver */ 436 unsigned long priv_flags; /* Private flags for the PCI driver */
440}; 437};
441 438
442static inline struct pci_dev *pci_physfn(struct pci_dev *dev) 439static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -459,26 +456,26 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
459} 456}
460 457
461struct pci_host_bridge { 458struct pci_host_bridge {
462 struct device dev; 459 struct device dev;
463 struct pci_bus *bus; /* root bus */ 460 struct pci_bus *bus; /* Root bus */
464 struct pci_ops *ops; 461 struct pci_ops *ops;
465 void *sysdata; 462 void *sysdata;
466 int busnr; 463 int busnr;
467 struct list_head windows; /* resource_entry */ 464 struct list_head windows; /* resource_entry */
468 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */ 465 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
469 int (*map_irq)(const struct pci_dev *, u8, u8); 466 int (*map_irq)(const struct pci_dev *, u8, u8);
470 void (*release_fn)(struct pci_host_bridge *); 467 void (*release_fn)(struct pci_host_bridge *);
471 void *release_data; 468 void *release_data;
472 struct msi_controller *msi; 469 struct msi_controller *msi;
473 unsigned int ignore_reset_delay:1; /* for entire hierarchy */ 470 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
474 unsigned int no_ext_tags:1; /* no Extended Tags */ 471 unsigned int no_ext_tags:1; /* No Extended Tags */
475 /* Resource alignment requirements */ 472 /* Resource alignment requirements */
476 resource_size_t (*align_resource)(struct pci_dev *dev, 473 resource_size_t (*align_resource)(struct pci_dev *dev,
477 const struct resource *res, 474 const struct resource *res,
478 resource_size_t start, 475 resource_size_t start,
479 resource_size_t size, 476 resource_size_t size,
480 resource_size_t align); 477 resource_size_t align);
481 unsigned long private[0] ____cacheline_aligned; 478 unsigned long private[0] ____cacheline_aligned;
482}; 479};
483 480
484#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 481#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -500,8 +497,8 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge);
500struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); 497struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
501 498
502void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 499void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
503 void (*release_fn)(struct pci_host_bridge *), 500 void (*release_fn)(struct pci_host_bridge *),
504 void *release_data); 501 void *release_data);
505 502
506int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); 503int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
507 504
@@ -521,32 +518,32 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
521#define PCI_SUBTRACTIVE_DECODE 0x1 518#define PCI_SUBTRACTIVE_DECODE 0x1
522 519
523struct pci_bus_resource { 520struct pci_bus_resource {
524 struct list_head list; 521 struct list_head list;
525 struct resource *res; 522 struct resource *res;
526 unsigned int flags; 523 unsigned int flags;
527}; 524};
528 525
529#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 526#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
530 527
531struct pci_bus { 528struct pci_bus {
532 struct list_head node; /* node in list of buses */ 529 struct list_head node; /* Node in list of buses */
533 struct pci_bus *parent; /* parent bus this bridge is on */ 530 struct pci_bus *parent; /* Parent bus this bridge is on */
534 struct list_head children; /* list of child buses */ 531 struct list_head children; /* List of child buses */
535 struct list_head devices; /* list of devices on this bus */ 532 struct list_head devices; /* List of devices on this bus */
536 struct pci_dev *self; /* bridge device as seen by parent */ 533 struct pci_dev *self; /* Bridge device as seen by parent */
537 struct list_head slots; /* list of slots on this bus; 534 struct list_head slots; /* List of slots on this bus;
538 protected by pci_slot_mutex */ 535 protected by pci_slot_mutex */
539 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 536 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
540 struct list_head resources; /* address space routed to this bus */ 537 struct list_head resources; /* Address space routed to this bus */
541 struct resource busn_res; /* bus numbers routed to this bus */ 538 struct resource busn_res; /* Bus numbers routed to this bus */
542 539
543 struct pci_ops *ops; /* configuration access functions */ 540 struct pci_ops *ops; /* Configuration access functions */
544 struct msi_controller *msi; /* MSI controller */ 541 struct msi_controller *msi; /* MSI controller */
545 void *sysdata; /* hook for sys-specific extension */ 542 void *sysdata; /* Hook for sys-specific extension */
546 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ 543 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
547 544
548 unsigned char number; /* bus number */ 545 unsigned char number; /* Bus number */
549 unsigned char primary; /* number of primary bridge */ 546 unsigned char primary; /* Number of primary bridge */
550 unsigned char max_bus_speed; /* enum pci_bus_speed */ 547 unsigned char max_bus_speed; /* enum pci_bus_speed */
551 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 548 unsigned char cur_bus_speed; /* enum pci_bus_speed */
552#ifdef CONFIG_PCI_DOMAINS_GENERIC 549#ifdef CONFIG_PCI_DOMAINS_GENERIC
@@ -555,12 +552,12 @@ struct pci_bus {
555 552
556 char name[48]; 553 char name[48];
557 554
558 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ 555 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
559 pci_bus_flags_t bus_flags; /* inherited by child buses */ 556 pci_bus_flags_t bus_flags; /* Inherited by child buses */
560 struct device *bridge; 557 struct device *bridge;
561 struct device dev; 558 struct device dev;
562 struct bin_attribute *legacy_io; /* legacy I/O for this bus */ 559 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
563 struct bin_attribute *legacy_mem; /* legacy mem */ 560 struct bin_attribute *legacy_mem; /* Legacy mem */
564 unsigned int is_added:1; 561 unsigned int is_added:1;
565}; 562};
566 563
@@ -617,9 +614,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
617static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 614static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
618#endif 615#endif
619 616
620/* 617/* Error values that may be returned by PCI functions */
621 * Error values that may be returned by PCI functions.
622 */
623#define PCIBIOS_SUCCESSFUL 0x00 618#define PCIBIOS_SUCCESSFUL 0x00
624#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 619#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
625#define PCIBIOS_BAD_VENDOR_ID 0x83 620#define PCIBIOS_BAD_VENDOR_ID 0x83
@@ -628,9 +623,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
628#define PCIBIOS_SET_FAILED 0x88 623#define PCIBIOS_SET_FAILED 0x88
629#define PCIBIOS_BUFFER_TOO_SMALL 0x89 624#define PCIBIOS_BUFFER_TOO_SMALL 0x89
630 625
631/* 626/* Translate above to generic errno for passing back through non-PCI code */
632 * Translate above to generic errno for passing back through non-PCI code.
633 */
634static inline int pcibios_err_to_errno(int err) 627static inline int pcibios_err_to_errno(int err)
635{ 628{
636 if (err <= PCIBIOS_SUCCESSFUL) 629 if (err <= PCIBIOS_SUCCESSFUL)
@@ -680,13 +673,13 @@ typedef u32 pci_bus_addr_t;
680#endif 673#endif
681 674
682struct pci_bus_region { 675struct pci_bus_region {
683 pci_bus_addr_t start; 676 pci_bus_addr_t start;
684 pci_bus_addr_t end; 677 pci_bus_addr_t end;
685}; 678};
686 679
687struct pci_dynids { 680struct pci_dynids {
688 spinlock_t lock; /* protects list, index */ 681 spinlock_t lock; /* Protects list, index */
689 struct list_head list; /* for IDs added at runtime */ 682 struct list_head list; /* For IDs added at runtime */
690}; 683};
691 684
692 685
@@ -700,13 +693,13 @@ struct pci_dynids {
700typedef unsigned int __bitwise pci_ers_result_t; 693typedef unsigned int __bitwise pci_ers_result_t;
701 694
702enum pci_ers_result { 695enum pci_ers_result {
703 /* no result/none/not supported in device driver */ 696 /* No result/none/not supported in device driver */
704 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, 697 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
705 698
706 /* Device driver can recover without slot reset */ 699 /* Device driver can recover without slot reset */
707 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, 700 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
708 701
709 /* Device driver wants slot to be reset. */ 702 /* Device driver wants slot to be reset */
710 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, 703 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
711 704
712 /* Device has completely failed, is unrecoverable */ 705 /* Device has completely failed, is unrecoverable */
@@ -742,27 +735,27 @@ struct pci_error_handlers {
742 735
743struct module; 736struct module;
744struct pci_driver { 737struct pci_driver {
745 struct list_head node; 738 struct list_head node;
746 const char *name; 739 const char *name;
747 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ 740 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
748 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 741 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
749 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 742 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
750 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ 743 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
751 int (*suspend_late) (struct pci_dev *dev, pm_message_t state); 744 int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
752 int (*resume_early) (struct pci_dev *dev); 745 int (*resume_early)(struct pci_dev *dev);
753 int (*resume) (struct pci_dev *dev); /* Device woken up */ 746 int (*resume) (struct pci_dev *dev); /* Device woken up */
754 void (*shutdown) (struct pci_dev *dev); 747 void (*shutdown) (struct pci_dev *dev);
755 int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */ 748 int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */
756 const struct pci_error_handlers *err_handler; 749 const struct pci_error_handlers *err_handler;
757 const struct attribute_group **groups; 750 const struct attribute_group **groups;
758 struct device_driver driver; 751 struct device_driver driver;
759 struct pci_dynids dynids; 752 struct pci_dynids dynids;
760}; 753};
761 754
762#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) 755#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
763 756
764/** 757/**
765 * PCI_DEVICE - macro used to describe a specific pci device 758 * PCI_DEVICE - macro used to describe a specific PCI device
766 * @vend: the 16 bit PCI Vendor ID 759 * @vend: the 16 bit PCI Vendor ID
767 * @dev: the 16 bit PCI Device ID 760 * @dev: the 16 bit PCI Device ID
768 * 761 *
@@ -775,7 +768,7 @@ struct pci_driver {
775 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 768 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
776 769
777/** 770/**
778 * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem 771 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
779 * @vend: the 16 bit PCI Vendor ID 772 * @vend: the 16 bit PCI Vendor ID
780 * @dev: the 16 bit PCI Device ID 773 * @dev: the 16 bit PCI Device ID
781 * @subvend: the 16 bit PCI Subvendor ID 774 * @subvend: the 16 bit PCI Subvendor ID
@@ -789,7 +782,7 @@ struct pci_driver {
789 .subvendor = (subvend), .subdevice = (subdev) 782 .subvendor = (subvend), .subdevice = (subdev)
790 783
791/** 784/**
792 * PCI_DEVICE_CLASS - macro used to describe a specific pci device class 785 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
793 * @dev_class: the class, subclass, prog-if triple for this device 786 * @dev_class: the class, subclass, prog-if triple for this device
794 * @dev_class_mask: the class mask for this device 787 * @dev_class_mask: the class mask for this device
795 * 788 *
@@ -803,7 +796,7 @@ struct pci_driver {
803 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 796 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
804 797
805/** 798/**
806 * PCI_VDEVICE - macro used to describe a specific pci device in short form 799 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
807 * @vend: the vendor name 800 * @vend: the vendor name
808 * @dev: the 16 bit PCI Device ID 801 * @dev: the 16 bit PCI Device ID
809 * 802 *
@@ -812,22 +805,21 @@ struct pci_driver {
812 * to PCI_ANY_ID. The macro allows the next field to follow as the device 805 * to PCI_ANY_ID. The macro allows the next field to follow as the device
813 * private data. 806 * private data.
814 */ 807 */
815
816#define PCI_VDEVICE(vend, dev) \ 808#define PCI_VDEVICE(vend, dev) \
817 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 809 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
818 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 810 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
819 811
820enum { 812enum {
821 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */ 813 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
822 PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */ 814 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
823 PCI_PROBE_ONLY = 0x00000004, /* use existing setup */ 815 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
824 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */ 816 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
825 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */ 817 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
826 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ 818 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
827 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */ 819 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
828}; 820};
829 821
830/* these external functions are only available when PCI support is enabled */ 822/* These external functions are only available when PCI support is enabled */
831#ifdef CONFIG_PCI 823#ifdef CONFIG_PCI
832 824
833extern unsigned int pci_flags; 825extern unsigned int pci_flags;
@@ -840,11 +832,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; }
840void pcie_bus_configure_settings(struct pci_bus *bus); 832void pcie_bus_configure_settings(struct pci_bus *bus);
841 833
842enum pcie_bus_config_types { 834enum pcie_bus_config_types {
843 PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */ 835 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
844 PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */ 836 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
845 PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */ 837 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
846 PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */ 838 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
847 PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */ 839 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
848}; 840};
849 841
850extern enum pcie_bus_config_types pcie_bus_config; 842extern enum pcie_bus_config_types pcie_bus_config;
@@ -853,7 +845,7 @@ extern struct bus_type pci_bus_type;
853 845
854/* Do NOT directly access these two variables, unless you are arch-specific PCI 846/* Do NOT directly access these two variables, unless you are arch-specific PCI
855 * code, or PCI core code. */ 847 * code, or PCI core code. */
856extern struct list_head pci_root_buses; /* list of all known PCI buses */ 848extern struct list_head pci_root_buses; /* List of all known PCI buses */
857/* Some device drivers need know if PCI is initiated */ 849/* Some device drivers need know if PCI is initiated */
858int no_pci_devices(void); 850int no_pci_devices(void);
859 851
@@ -887,12 +879,13 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
887struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 879struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
888 struct pci_ops *ops, void *sysdata, 880 struct pci_ops *ops, void *sysdata,
889 struct list_head *resources); 881 struct list_head *resources);
882int pci_host_probe(struct pci_host_bridge *bridge);
890int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 883int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
891int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 884int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
892void pci_bus_release_busn_res(struct pci_bus *b); 885void pci_bus_release_busn_res(struct pci_bus *b);
893struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 886struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
894 struct pci_ops *ops, void *sysdata, 887 struct pci_ops *ops, void *sysdata,
895 struct list_head *resources); 888 struct list_head *resources);
896int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); 889int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
897struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 890struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
898 int busnr); 891 int busnr);
@@ -949,10 +942,10 @@ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
949struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 942struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
950 943
951struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, 944struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
952 struct pci_dev *from); 945 struct pci_dev *from);
953struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 946struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
954 unsigned int ss_vendor, unsigned int ss_device, 947 unsigned int ss_vendor, unsigned int ss_device,
955 struct pci_dev *from); 948 struct pci_dev *from);
956struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 949struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
957struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, 950struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
958 unsigned int devfn); 951 unsigned int devfn);
@@ -1028,7 +1021,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1028 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); 1021 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1029} 1022}
1030 1023
1031/* user-space driven config access */ 1024/* User-space driven config access */
1032int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 1025int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1033int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 1026int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1034int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); 1027int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
@@ -1170,7 +1163,7 @@ unsigned int pci_rescan_bus(struct pci_bus *bus);
1170void pci_lock_rescan_remove(void); 1163void pci_lock_rescan_remove(void);
1171void pci_unlock_rescan_remove(void); 1164void pci_unlock_rescan_remove(void);
1172 1165
1173/* Vital product data routines */ 1166/* Vital Product Data routines */
1174ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1167ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1175ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1168ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1176int pci_set_vpd_size(struct pci_dev *dev, size_t len); 1169int pci_set_vpd_size(struct pci_dev *dev, size_t len);
@@ -1255,9 +1248,7 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1255int __must_check __pci_register_driver(struct pci_driver *, struct module *, 1248int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1256 const char *mod_name); 1249 const char *mod_name);
1257 1250
1258/* 1251/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1259 * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
1260 */
1261#define pci_register_driver(driver) \ 1252#define pci_register_driver(driver) \
1262 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 1253 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1263 1254
@@ -1272,8 +1263,7 @@ void pci_unregister_driver(struct pci_driver *dev);
1272 * use this macro once, and calling it replaces module_init() and module_exit() 1263 * use this macro once, and calling it replaces module_init() and module_exit()
1273 */ 1264 */
1274#define module_pci_driver(__pci_driver) \ 1265#define module_pci_driver(__pci_driver) \
1275 module_driver(__pci_driver, pci_register_driver, \ 1266 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1276 pci_unregister_driver)
1277 1267
1278/** 1268/**
1279 * builtin_pci_driver() - Helper macro for registering a PCI driver 1269 * builtin_pci_driver() - Helper macro for registering a PCI driver
@@ -1312,10 +1302,10 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
1312int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1302int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1313 unsigned int command_bits, u32 flags); 1303 unsigned int command_bits, u32 flags);
1314 1304
1315#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ 1305#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
1316#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ 1306#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1317#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ 1307#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1318#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ 1308#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1319#define PCI_IRQ_ALL_TYPES \ 1309#define PCI_IRQ_ALL_TYPES \
1320 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) 1310 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1321 1311
@@ -1334,8 +1324,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1334#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 1324#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1335 1325
1336struct msix_entry { 1326struct msix_entry {
1337 u32 vector; /* kernel uses to write allocated vector */ 1327 u32 vector; /* Kernel uses to write allocated vector */
1338 u16 entry; /* driver uses to specify entry, OS writes */ 1328 u16 entry; /* Driver uses to specify entry, OS writes */
1339}; 1329};
1340 1330
1341#ifdef CONFIG_PCI_MSI 1331#ifdef CONFIG_PCI_MSI
@@ -1375,10 +1365,10 @@ static inline int pci_msi_enabled(void) { return 0; }
1375static inline int pci_enable_msi(struct pci_dev *dev) 1365static inline int pci_enable_msi(struct pci_dev *dev)
1376{ return -ENOSYS; } 1366{ return -ENOSYS; }
1377static inline int pci_enable_msix_range(struct pci_dev *dev, 1367static inline int pci_enable_msix_range(struct pci_dev *dev,
1378 struct msix_entry *entries, int minvec, int maxvec) 1368 struct msix_entry *entries, int minvec, int maxvec)
1379{ return -ENOSYS; } 1369{ return -ENOSYS; }
1380static inline int pci_enable_msix_exact(struct pci_dev *dev, 1370static inline int pci_enable_msix_exact(struct pci_dev *dev,
1381 struct msix_entry *entries, int nvec) 1371 struct msix_entry *entries, int nvec)
1382{ return -ENOSYS; } 1372{ return -ENOSYS; }
1383 1373
1384static inline int 1374static inline int
@@ -1543,9 +1533,9 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1543int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); 1533int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1544#endif 1534#endif
1545 1535
1546/* some architectures require additional setup to direct VGA traffic */ 1536/* Some architectures require additional setup to direct VGA traffic */
1547typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1537typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1548 unsigned int command_bits, u32 flags); 1538 unsigned int command_bits, u32 flags);
1549void pci_register_set_vga_state(arch_set_vga_state_t func); 1539void pci_register_set_vga_state(arch_set_vga_state_t func);
1550 1540
1551static inline int 1541static inline int
@@ -1584,10 +1574,9 @@ static inline void pci_clear_flags(int flags) { }
1584static inline int pci_has_flag(int flag) { return 0; } 1574static inline int pci_has_flag(int flag) { return 0; }
1585 1575
1586/* 1576/*
1587 * If the system does not have PCI, clearly these return errors. Define 1577 * If the system does not have PCI, clearly these return errors. Define
1588 * these as simple inline functions to avoid hair in drivers. 1578 * these as simple inline functions to avoid hair in drivers.
1589 */ 1579 */
1590
1591#define _PCI_NOP(o, s, t) \ 1580#define _PCI_NOP(o, s, t) \
1592 static inline int pci_##o##_config_##s(struct pci_dev *dev, \ 1581 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1593 int where, t val) \ 1582 int where, t val) \
@@ -1686,6 +1675,13 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1686#define dev_is_pf(d) (false) 1675#define dev_is_pf(d) (false)
1687static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 1676static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1688{ return false; } 1677{ return false; }
1678static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1679 struct device_node *node,
1680 const u32 *intspec,
1681 unsigned int intsize,
1682 unsigned long *out_hwirq,
1683 unsigned int *out_type)
1684{ return -EINVAL; }
1689#endif /* CONFIG_PCI */ 1685#endif /* CONFIG_PCI */
1690 1686
1691/* Include architecture-dependent settings and functions */ 1687/* Include architecture-dependent settings and functions */
@@ -1726,8 +1722,10 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1726#define pci_root_bus_fwnode(bus) NULL 1722#define pci_root_bus_fwnode(bus) NULL
1727#endif 1723#endif
1728 1724
1729/* these helpers provide future and backwards compatibility 1725/*
1730 * for accessing popular PCI BAR info */ 1726 * These helpers provide future and backwards compatibility
1727 * for accessing popular PCI BAR info
1728 */
1731#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) 1729#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1732#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) 1730#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1733#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) 1731#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
@@ -1739,7 +1737,8 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1739 (pci_resource_end((dev), (bar)) - \ 1737 (pci_resource_end((dev), (bar)) - \
1740 pci_resource_start((dev), (bar)) + 1)) 1738 pci_resource_start((dev), (bar)) + 1))
1741 1739
1742/* Similar to the helpers above, these manipulate per-pci_dev 1740/*
1741 * Similar to the helpers above, these manipulate per-pci_dev
1743 * driver-specific data. They are really just a wrapper around 1742 * driver-specific data. They are really just a wrapper around
1744 * the generic device structure functions of these calls. 1743 * the generic device structure functions of these calls.
1745 */ 1744 */
@@ -1753,16 +1752,14 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1753 dev_set_drvdata(&pdev->dev, data); 1752 dev_set_drvdata(&pdev->dev, data);
1754} 1753}
1755 1754
1756/* If you want to know what to call your pci_dev, ask this function.
1757 * Again, it's a wrapper around the generic device.
1758 */
1759static inline const char *pci_name(const struct pci_dev *pdev) 1755static inline const char *pci_name(const struct pci_dev *pdev)
1760{ 1756{
1761 return dev_name(&pdev->dev); 1757 return dev_name(&pdev->dev);
1762} 1758}
1763 1759
1764 1760
1765/* Some archs don't want to expose struct resource to userland as-is 1761/*
1762 * Some archs don't want to expose struct resource to userland as-is
1766 * in sysfs and /proc 1763 * in sysfs and /proc
1767 */ 1764 */
1768#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER 1765#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
@@ -1781,16 +1778,16 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
1781 1778
1782 1779
1783/* 1780/*
1784 * The world is not perfect and supplies us with broken PCI devices. 1781 * The world is not perfect and supplies us with broken PCI devices.
1785 * For at least a part of these bugs we need a work-around, so both 1782 * For at least a part of these bugs we need a work-around, so both
1786 * generic (drivers/pci/quirks.c) and per-architecture code can define 1783 * generic (drivers/pci/quirks.c) and per-architecture code can define
1787 * fixup hooks to be called for particular buggy devices. 1784 * fixup hooks to be called for particular buggy devices.
1788 */ 1785 */
1789 1786
1790struct pci_fixup { 1787struct pci_fixup {
1791 u16 vendor; /* You can use PCI_ANY_ID here of course */ 1788 u16 vendor; /* Or PCI_ANY_ID */
1792 u16 device; /* You can use PCI_ANY_ID here of course */ 1789 u16 device; /* Or PCI_ANY_ID */
1793 u32 class; /* You can use PCI_ANY_ID here too */ 1790 u32 class; /* Or PCI_ANY_ID */
1794 unsigned int class_shift; /* should be 0, 8, 16 */ 1791 unsigned int class_shift; /* should be 0, 8, 16 */
1795 void (*hook)(struct pci_dev *dev); 1792 void (*hook)(struct pci_dev *dev);
1796}; 1793};
@@ -1832,23 +1829,19 @@ enum pci_fixup_pass {
1832#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1829#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1833 class_shift, hook) \ 1830 class_shift, hook) \
1834 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1831 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1835 resume##hook, vendor, device, class, \ 1832 resume##hook, vendor, device, class, class_shift, hook)
1836 class_shift, hook)
1837#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1833#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1838 class_shift, hook) \ 1834 class_shift, hook) \
1839 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1835 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1840 resume_early##hook, vendor, device, \ 1836 resume_early##hook, vendor, device, class, class_shift, hook)
1841 class, class_shift, hook)
1842#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1837#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1843 class_shift, hook) \ 1838 class_shift, hook) \
1844 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1839 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1845 suspend##hook, vendor, device, class, \ 1840 suspend##hook, vendor, device, class, class_shift, hook)
1846 class_shift, hook)
1847#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ 1841#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1848 class_shift, hook) \ 1842 class_shift, hook) \
1849 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 1843 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1850 suspend_late##hook, vendor, device, \ 1844 suspend_late##hook, vendor, device, class, class_shift, hook)
1851 class, class_shift, hook)
1852 1845
1853#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1846#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1854 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1847 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
@@ -1864,20 +1857,16 @@ enum pci_fixup_pass {
1864 hook, vendor, device, PCI_ANY_ID, 0, hook) 1857 hook, vendor, device, PCI_ANY_ID, 0, hook)
1865#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1858#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1866 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1859 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1867 resume##hook, vendor, device, \ 1860 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
1868 PCI_ANY_ID, 0, hook)
1869#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1861#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1870 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1862 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1871 resume_early##hook, vendor, device, \ 1863 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
1872 PCI_ANY_ID, 0, hook)
1873#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1864#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1874 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1865 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1875 suspend##hook, vendor, device, \ 1866 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
1876 PCI_ANY_ID, 0, hook)
1877#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ 1867#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1878 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 1868 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1879 suspend_late##hook, vendor, device, \ 1869 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
1880 PCI_ANY_ID, 0, hook)
1881 1870
1882#ifdef CONFIG_PCI_QUIRKS 1871#ifdef CONFIG_PCI_QUIRKS
1883void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1872void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@ -2061,6 +2050,7 @@ void pci_request_acs(void);
2061bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 2050bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2062bool pci_acs_path_enabled(struct pci_dev *start, 2051bool pci_acs_path_enabled(struct pci_dev *start,
2063 struct pci_dev *end, u16 acs_flags); 2052 struct pci_dev *end, u16 acs_flags);
2053int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2064 2054
2065#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 2055#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2066#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) 2056#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
@@ -2112,7 +2102,7 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
2112 */ 2102 */
2113static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) 2103static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
2114{ 2104{
2115 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); 2105 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
2116} 2106}
2117 2107
2118/** 2108/**
@@ -2182,6 +2172,9 @@ void pci_release_of_node(struct pci_dev *dev);
2182void pci_set_bus_of_node(struct pci_bus *bus); 2172void pci_set_bus_of_node(struct pci_bus *bus);
2183void pci_release_bus_of_node(struct pci_bus *bus); 2173void pci_release_bus_of_node(struct pci_bus *bus);
2184struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 2174struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2175int pci_parse_request_of_pci_ranges(struct device *dev,
2176 struct list_head *resources,
2177 struct resource **bus_range);
2185 2178
2186/* Arch may override this (weak) */ 2179/* Arch may override this (weak) */
2187struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 2180struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2197,7 +2190,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2197 return bus ? bus->dev.of_node : NULL; 2190 return bus ? bus->dev.of_node : NULL;
2198} 2191}
2199 2192
2200#else /* CONFIG_OF */ 2193#else /* CONFIG_OF */
2201static inline void pci_set_of_node(struct pci_dev *dev) { } 2194static inline void pci_set_of_node(struct pci_dev *dev) { }
2202static inline void pci_release_of_node(struct pci_dev *dev) { } 2195static inline void pci_release_of_node(struct pci_dev *dev) { }
2203static inline void pci_set_bus_of_node(struct pci_bus *bus) { } 2196static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
@@ -2206,6 +2199,12 @@ static inline struct device_node *
2206pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } 2199pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
2207static inline struct irq_domain * 2200static inline struct irq_domain *
2208pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 2201pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2202static inline int pci_parse_request_of_pci_ranges(struct device *dev,
2203 struct list_head *resources,
2204 struct resource **bus_range)
2205{
2206 return -EINVAL;
2207}
2209#endif /* CONFIG_OF */ 2208#endif /* CONFIG_OF */
2210 2209
2211#ifdef CONFIG_ACPI 2210#ifdef CONFIG_ACPI
@@ -2231,7 +2230,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
2231 int (*fn)(struct pci_dev *pdev, 2230 int (*fn)(struct pci_dev *pdev,
2232 u16 alias, void *data), void *data); 2231 u16 alias, void *data), void *data);
2233 2232
2234/* helper functions for operation of device flag */ 2233/* Helper functions for operation of device flag */
2235static inline void pci_set_dev_assigned(struct pci_dev *pdev) 2234static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2236{ 2235{
2237 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 2236 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
@@ -2278,7 +2277,19 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2278 return false; 2277 return false;
2279} 2278}
2280 2279
2281/* provide the legacy pci_dma_* API */ 2280/* Provide the legacy pci_dma_* API */
2282#include <linux/pci-dma-compat.h> 2281#include <linux/pci-dma-compat.h>
2283 2282
2283#define pci_printk(level, pdev, fmt, arg...) \
2284 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2285
2286#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2287#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2288#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2289#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2290#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2291#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2292#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2293#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2294
2284#endif /* LINUX_PCI_H */ 2295#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ab20dc5db423..eb13e84e1fef 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2381,6 +2381,8 @@
2381 2381
2382#define PCI_VENDOR_ID_LENOVO 0x17aa 2382#define PCI_VENDOR_ID_LENOVO 0x17aa
2383 2383
2384#define PCI_VENDOR_ID_CDNS 0x17cd
2385
2384#define PCI_VENDOR_ID_ARECA 0x17d3 2386#define PCI_VENDOR_ID_ARECA 0x17d3
2385#define PCI_DEVICE_ID_ARECA_1110 0x1110 2387#define PCI_DEVICE_ID_ARECA_1110 0x1110
2386#define PCI_DEVICE_ID_ARECA_1120 0x1120 2388#define PCI_DEVICE_ID_ARECA_1120 0x1120
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 09d73d0d1aa8..42d121174fe2 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -100,6 +100,9 @@ struct sw_event_regs {
100 u32 gpio_interrupt_hdr; 100 u32 gpio_interrupt_hdr;
101 u32 gpio_interrupt_data; 101 u32 gpio_interrupt_data;
102 u32 reserved16[4]; 102 u32 reserved16[4];
103 u32 gfms_event_hdr;
104 u32 gfms_event_data;
105 u32 reserved17[4];
103} __packed; 106} __packed;
104 107
105enum { 108enum {
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 70c2b2ade048..0c79eac5e9b8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -622,15 +622,19 @@
622 * safely. 622 * safely.
623 */ 623 */
624#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ 624#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
625#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
625#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ 626#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
626#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ 627#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
627#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */ 628#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
629#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
630#define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */
628#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ 631#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
629#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ 632#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
630#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ 633#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
631#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ 634#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
632#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ 635#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
633#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ 636#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
637#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
634#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ 638#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
635#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ 639#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
636#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ 640#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
@@ -966,26 +970,28 @@
966 970
967/* Downstream Port Containment */ 971/* Downstream Port Containment */
968#define PCI_EXP_DPC_CAP 4 /* DPC Capability */ 972#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
969#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */ 973#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
970#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */ 974#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
971#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */ 975#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
972#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */ 976#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */
973#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0xF00 /* RP PIO log size */ 977#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
974#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ 978#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
975 979
976#define PCI_EXP_DPC_CTL 6 /* DPC control */ 980#define PCI_EXP_DPC_CTL 6 /* DPC control */
977#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */ 981#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
978#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */ 982#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
979 983
980#define PCI_EXP_DPC_STATUS 8 /* DPC Status */ 984#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
981#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ 985#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
982#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ 986#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
983#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */ 987#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
988#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
989#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
984 990
985#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ 991#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
986 992
987#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */ 993#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
988#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO MASK */ 994#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
989#define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */ 995#define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */
990#define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */ 996#define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */
991#define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */ 997#define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 75df44373034..4f4daf8db954 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -88,7 +88,8 @@ struct switchtec_ioctl_event_summary {
88#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26 88#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26
89#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27 89#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
90#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28 90#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
91#define SWITCHTEC_IOCTL_MAX_EVENTS 29 91#define SWITCHTEC_IOCTL_EVENT_GFMS 29
92#define SWITCHTEC_IOCTL_MAX_EVENTS 30
92 93
93#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1 94#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
94#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2 95#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
diff --git a/kernel/resource.c b/kernel/resource.c
index 54ba6de3757c..8c527d83ca76 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1022,6 +1022,7 @@ static void __init __reserve_region_with_split(struct resource *root,
1022 struct resource *conflict; 1022 struct resource *conflict;
1023 struct resource *res = alloc_resource(GFP_ATOMIC); 1023 struct resource *res = alloc_resource(GFP_ATOMIC);
1024 struct resource *next_res = NULL; 1024 struct resource *next_res = NULL;
1025 int type = resource_type(root);
1025 1026
1026 if (!res) 1027 if (!res)
1027 return; 1028 return;
@@ -1029,7 +1030,7 @@ static void __init __reserve_region_with_split(struct resource *root,
1029 res->name = name; 1030 res->name = name;
1030 res->start = start; 1031 res->start = start;
1031 res->end = end; 1032 res->end = end;
1032 res->flags = IORESOURCE_BUSY; 1033 res->flags = type | IORESOURCE_BUSY;
1033 res->desc = IORES_DESC_NONE; 1034 res->desc = IORES_DESC_NONE;
1034 1035
1035 while (1) { 1036 while (1) {
@@ -1064,7 +1065,7 @@ static void __init __reserve_region_with_split(struct resource *root,
1064 next_res->name = name; 1065 next_res->name = name;
1065 next_res->start = conflict->end + 1; 1066 next_res->start = conflict->end + 1;
1066 next_res->end = end; 1067 next_res->end = end;
1067 next_res->flags = IORESOURCE_BUSY; 1068 next_res->flags = type | IORESOURCE_BUSY;
1068 next_res->desc = IORES_DESC_NONE; 1069 next_res->desc = IORES_DESC_NONE;
1069 } 1070 }
1070 } else { 1071 } else {
@@ -1478,7 +1479,7 @@ void __devm_release_region(struct device *dev, struct resource *parent,
1478EXPORT_SYMBOL(__devm_release_region); 1479EXPORT_SYMBOL(__devm_release_region);
1479 1480
1480/* 1481/*
1481 * Called from init/main.c to reserve IO ports. 1482 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1482 */ 1483 */
1483#define MAXRESERVE 4 1484#define MAXRESERVE 4
1484static int __init reserve_setup(char *str) 1485static int __init reserve_setup(char *str)
@@ -1489,26 +1490,38 @@ static int __init reserve_setup(char *str)
1489 for (;;) { 1490 for (;;) {
1490 unsigned int io_start, io_num; 1491 unsigned int io_start, io_num;
1491 int x = reserved; 1492 int x = reserved;
1493 struct resource *parent;
1492 1494
1493 if (get_option (&str, &io_start) != 2) 1495 if (get_option(&str, &io_start) != 2)
1494 break; 1496 break;
1495 if (get_option (&str, &io_num) == 0) 1497 if (get_option(&str, &io_num) == 0)
1496 break; 1498 break;
1497 if (x < MAXRESERVE) { 1499 if (x < MAXRESERVE) {
1498 struct resource *res = reserve + x; 1500 struct resource *res = reserve + x;
1501
1502 /*
1503 * If the region starts below 0x10000, we assume it's
1504 * I/O port space; otherwise assume it's memory.
1505 */
1506 if (io_start < 0x10000) {
1507 res->flags = IORESOURCE_IO;
1508 parent = &ioport_resource;
1509 } else {
1510 res->flags = IORESOURCE_MEM;
1511 parent = &iomem_resource;
1512 }
1499 res->name = "reserved"; 1513 res->name = "reserved";
1500 res->start = io_start; 1514 res->start = io_start;
1501 res->end = io_start + io_num - 1; 1515 res->end = io_start + io_num - 1;
1502 res->flags = IORESOURCE_BUSY; 1516 res->flags |= IORESOURCE_BUSY;
1503 res->desc = IORES_DESC_NONE; 1517 res->desc = IORES_DESC_NONE;
1504 res->child = NULL; 1518 res->child = NULL;
1505 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) 1519 if (request_resource(parent, res) == 0)
1506 reserved = x+1; 1520 reserved = x+1;
1507 } 1521 }
1508 } 1522 }
1509 return 1; 1523 return 1;
1510} 1524}
1511
1512__setup("reserve=", reserve_setup); 1525__setup("reserve=", reserve_setup);
1513 1526
1514/* 1527/*